Commit
·
daf991b
1
Parent(s):
6a1b12b
Update parquet files (step 59 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/17TheWord/RealESRGAN/realesrgan/train.py +0 -11
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md +0 -98
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md +0 -38
- spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md +0 -12
- spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md +0 -14
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md +0 -90
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md +0 -110
- spaces/2023Liu2023/bingo/src/components/chat-list.tsx +0 -28
- spaces/360macky/first-space/app.py +0 -5
- spaces/4Taps/SadTalker/src/test_audio2coeff.py +0 -87
- spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py +0 -346
- spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py +0 -13
- spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md +0 -13
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py +0 -18
- spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py +0 -234
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py +0 -55
- spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md +0 -80
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js +0 -45
- spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py +0 -136
- spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py +0 -981
- spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py +0 -645
- spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py +0 -120
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile +0 -47
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +0 -1645
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py +0 -317
- spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py +0 -19
- spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py +0 -11
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py +0 -794
- spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py +0 -82
- spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py +0 -145
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py +0 -151
- spaces/AutoBG/Auto-BoardGame/description_generator.py +0 -119
- spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py +0 -60
- spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py +0 -87
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py +0 -13
- spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md +0 -71
- spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md +0 -125
- spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md +0 -53
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py +0 -27
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py +0 -225
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py +0 -86
- spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md +0 -65
- spaces/CVPR/LIVE/thrust/generate_mk.py +0 -146
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h +0 -23
- spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py +0 -83
- spaces/ChristopherMarais/Andrew_Alpha/README.md +0 -19
- spaces/CofAI/chat/g4f/Provider/Providers/Phind.py +0 -36
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py +0 -89
- spaces/Dagfinn1962/stablediffusion-models/main.css +0 -57
spaces/17TheWord/RealESRGAN/realesrgan/train.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
# flake8: noqa
|
2 |
-
import os.path as osp
|
3 |
-
from basicsr.train import train_pipeline
|
4 |
-
|
5 |
-
import realesrgan.archs
|
6 |
-
import realesrgan.data
|
7 |
-
import realesrgan.models
|
8 |
-
|
9 |
-
if __name__ == '__main__':
|
10 |
-
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
|
11 |
-
train_pipeline(root_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Introduction</h1>
|
3 |
-
<p>Have you ever heard of desarrollo del pensamiento tomo 2 resuelto pdf 27? If you are interested in developing your thinking skills, this book is for you. It is a Spanish book that translates to "Development of Thinking Volume 2 Solved PDF 27". It is a comprehensive guide that covers various aspects of logic, reasoning, critical thinking, problem solving, creativity and innovation. It is written by a team of experts from different fields and disciplines, and it includes exercises, examples, diagrams, tables and charts to help you understand and apply the concepts.</p>
|
4 |
-
<p>Why is it important to study this book? Because in today's complex and dynamic world, you need to be able to think clearly, critically and creatively. You need to be able to analyze information, evaluate arguments, solve problems, make decisions, generate ideas and innovate solutions. These skills are essential for your personal and professional growth, as well as for your contribution to society. By studying this book, you will learn how to improve your thinking skills and become a better thinker.</p>
|
5 |
-
<h2>desarrollo del pensamiento tomo 2 resuelto pdf 27</h2><br /><p><b><b>Download Zip</b> ✅ <a href="https://byltly.com/2uKvwk">https://byltly.com/2uKvwk</a></b></p><br /><br />
|
6 |
-
<h1>Main body</h1>
|
7 |
-
<h2>What are the main topics covered in the book?</h2>
|
8 |
-
<p>The book is divided into three parts: logic and reasoning, critical thinking and problem solving, and creativity and innovation. Each part contains several chapters that explore different aspects of these topics. Here are some of the main topics covered in the book:</p>
|
9 |
-
<h3>Logic and reasoning</h3>
|
10 |
-
<p>This part introduces you to the basics of logic and reasoning, such as propositions, arguments, validity, soundness, fallacies, induction, deduction and abduction. You will learn how to identify and construct valid and sound arguments, how to avoid common logical errors and fallacies, how to use different types of reasoning for different purposes and contexts, and how to evaluate the strength of evidence and arguments.</p>
|
11 |
-
<h3>Critical thinking and problem solving</h3>
|
12 |
-
<p>This part teaches you how to apply logic and reasoning to critical thinking and problem solving. You will learn how to define problems, identify assumptions, generate hypotheses, test solutions, monitor results and revise strategies. You will also learn how to use various tools and techniques for critical thinking and problem solving, such as brainstorming, mind mapping, SWOT analysis, decision matrix, fishbone diagram and Pareto principle.</p>
|
13 |
-
<p>desarrollo del pensamiento tomo 2 solucionario pdf gratis<br />
|
14 |
-
descargar desarrollo del pensamiento tomo 2 resuelto pdf<br />
|
15 |
-
libro desarrollo del pensamiento tomo 2 resuelto pdf completo<br />
|
16 |
-
desarrollo del pensamiento tomo 2 resuelto pdf 2021<br />
|
17 |
-
desarrollo del pensamiento tomo 2 resuelto pdf online<br />
|
18 |
-
desarrollo del pensamiento tomo 2 resuelto pdf descargar gratis<br />
|
19 |
-
desarrollo del pensamiento tomo 2 resuelto pdf capitulo 27<br />
|
20 |
-
desarrollo del pensamiento tomo 2 resuelto pdf ejercicios<br />
|
21 |
-
desarrollo del pensamiento tomo 2 resuelto pdf pagina 27<br />
|
22 |
-
desarrollo del pensamiento tomo 2 resuelto pdf gratis<br />
|
23 |
-
desarrollo del pensamiento tomo 2 resuelto pdf sway<br />
|
24 |
-
desarrollo del pensamiento tomo 2 resuelto pdf soundcloud<br />
|
25 |
-
desarrollo del pensamiento tomo 2 resuelto pdf libro<br />
|
26 |
-
desarrollo del pensamiento tomo 2 resuelto pdf download<br />
|
27 |
-
desarrollo del pensamiento tomo 2 resuelto pdf gratis online<br />
|
28 |
-
desarrollo del pensamiento tomo 2 resuelto pdf soluciones<br />
|
29 |
-
desarrollo del pensamiento tomo 2 resuelto pdf completo<br />
|
30 |
-
desarrollo del pensamiento tomo 2 resuelto pdf gratis descargar<br />
|
31 |
-
desarrollo del pensamiento tomo 2 resuelto pdf capitulo 27 solucionario<br />
|
32 |
-
desarrollo del pensamiento tomo 2 resuelto pdf ejercicios resueltos<br />
|
33 |
-
desarrollo del pensamiento tomo 2 resuelto pdf pagina 27 soluciones<br />
|
34 |
-
desarrollo del pensamiento tomo 2 resuelto pdf gratis sway<br />
|
35 |
-
desarrollo del pensamiento tomo 2 resuelto pdf gratis soundcloud<br />
|
36 |
-
desarrollo del pensamiento tomo 2 resuelto pdf libro gratis<br />
|
37 |
-
desarrollo del pensamiento tomo 2 resuelto pdf download gratis<br />
|
38 |
-
desarrollo del pensamiento tomo 2 solucionario pdf online<br />
|
39 |
-
descargar desarrollo del pensamiento tomo 2 solucionario pdf gratis<br />
|
40 |
-
libro desarrollo del pensamiento tomo 2 solucionario pdf completo<br />
|
41 |
-
desarrollo del pensamiento tomo 2 solucionario pdf 2021<br />
|
42 |
-
desarrollo del pensamiento tomo 2 solucionario pdf descargar gratis<br />
|
43 |
-
desarrollo del pensamiento tomo 2 solucionario pdf capitulo 27<br />
|
44 |
-
desarrollo del pensamiento tomo 2 solucionario pdf ejercicios<br />
|
45 |
-
desarrollo del pensamiento tomo 2 solucionario pdf pagina 27<br />
|
46 |
-
desarrollo del pensamiento tomo 2 solucionario pdf sway<br />
|
47 |
-
desarrollo del pensamiento tomo 2 solucionario pdf soundcloud<br />
|
48 |
-
desarrollo del pensamiento tomo 2 solucionario pdf libro<br />
|
49 |
-
desarrollo del pensamiento tomo 2 solucionario pdf download<br />
|
50 |
-
descargar desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf gratis <br />
|
51 |
-
libro desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf completo <br />
|
52 |
-
desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf online <br />
|
53 |
-
desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf descargar gratis <br />
|
54 |
-
desarrollo del pensamiento tomo 2 resuelto capitulo 27 ejercicios <br />
|
55 |
-
desarrollo del pensamiento tomo 2 resuelto capitulo 27 pagina <br />
|
56 |
-
desarrollo del pensamiento tomo 2 resuelto capitulo 27 sway <br />
|
57 |
-
desarrollo del pensamiento tomo 2 resuelto capitulo 27 soundcloud <br />
|
58 |
-
desarrollo del pensamiento tomo 2 resuelto capitulo</p>
|
59 |
-
<h3>Creativity and innovation</h3>
|
60 |
-
<p>This part shows you how to use logic and reasoning to enhance your creativity and innovation. You will learn how to develop your creative potential, overcome mental blocks, stimulate your imagination, generate original ideas and implement innovative solutions. You will also learn how to use various methods and models for creativity and innovation, such as lateral thinking, divergent thinking, convergent thinking, TRIZ method, SCAMPER technique and design thinking.</p>
|
61 |
-
<h2>How can you access the book online?</h2>
|
62 |
-
<p>If you want to read desarrollo del pensamiento tomo 2 resuelto pdf 27 online, you have several options. Here are some of them:</p>
|
63 |
-
<h3>Download it from Sway</h3>
|
64 |
-
<p>Sway is a Microsoft service that allows you to create and share interactive presentations online. You can find desarrollo del pensamiento tomo 2 resuelto pdf 27 on Sway by following this link: <a href="https://sway.office.com/skrWSVcG4BefKxCb">https://sway.office.com/skrWSVcG4BefKxCb</a>. You can download the PDF file from there by clicking on the download icon at the top right corner of the screen.</p>
|
65 |
-
<h3>Read it on Scribd</h3>
|
66 |
-
<p>Scribd is a digital library that offers unlimited access to books, audiobooks, magazines and documents online. You can find desarrollo del pensamiento tomo 2 resuelto pdf 27 on Scribd by following this link: <a href="https://www.scribd.com/document/511741583/Desarrollo-Del-Pensamiento-Tomo-2-Resuelto-Pdf-27">https://www.scribd.com/document/511741583/Desarrollo-Del-Pensamiento-Tomo-2-Resuelto-Pdf-27</a>. You can read the book online or download it as a PDF file by clicking on the download icon at the top right corner of the screen.</p>
|
67 |
-
<h3>Buy it from Amazon</h3>
|
68 |
-
<p>Amazon is an online marketplace that sells books, electronics, clothing and other products. You can buy desarrollo del pensamiento tomo 2 resuelto pdf 27 on Amazon by following this link: <a href="https://www.amazon.com/Desarrollo-Del-Pensamiento-Tomo-Resuelto/dp/B08ZJWZQ8Q">https://www.amazon.com/Desarrollo-Del-Pensamiento-Tomo-Resuelto/dp/B08ZJWZQ8Q</a>. You can order the paperback version or the Kindle version of the book by clicking on the add to cart or buy now buttons.</p>
|
69 |
-
<h2>How can you use the book to improve your skills?</h2>
|
70 |
-
<p>Reading desarrollo del pensamiento tomo 2 resuelto pdf 27 online is not enough if you want to improve your skills. You need to practice what you learn by doing the exercises and examples in the book. You also need to apply what you learn by using the concepts in real-life situations. Here are some tips on how to use the book effectively:</p>
|
71 |
-
<h3>Follow the exercises and examples</h3>
|
72 |
-
<p>The book contains many exercises and examples that help you test your understanding and reinforce your learning. You should follow them carefully and try to solve them on your own before checking the answers. You should also compare your answers with those provided in the book and analyze why they are correct or incorrect. This will help you identify your strengths and weaknesses and improve your skills.</p>
|
73 |
-
<h3>Apply the concepts to real-life situations</h3>
|
74 |
-
<p>The book also contains many case studies and scenarios that illustrate how the concepts can be applied in real-life situations. You should read them attentively and try to relate them to your own experiences or interests. You should also think of other situations where you can use the concepts in your personal or professional life. This will help you transfer your learning from theory to practice and enhance your skills.</p>
|
75 |
-
<h3>Join a study group or a forum</h3>
|
76 |
-
<p>The book can be more enjoyable and effective if you study it with others who share your interest or goal. You can join a study group or a forum where you can discuss the topics in the book with other learners or experts. You can ask questions, share insights, exchange feedbacks or challenge each other with new problems or ideas. This will help you expand your perspective and deepen your understanding.</p>
|
77 |
-
<h1>Conclusion</h1>
|
78 |
-
<h2>Summary of the main points</h2>
|
79 |
-
<p>In conclusion, desarrollo del pensamiento tomo 2 resuelto pdf 27 is a valuable resource for anyone who wants to develop their thinking skills. It covers various aspects of logic, reasoning, critical thinking, problem solving, creativity and innovation. It provides exercises, examples, diagrams, tables and charts to help you understand and apply the concepts. It also offers several options for accessing the book online, such as downloading it from Sway, reading it on Scribd or buying it from Amazon. Finally, it gives some tips on how to use the book effectively, such as following the exercises and examples, applying the concepts to real-life situations or joining a study group or a forum.</p>
|
80 |
-
<h2>Recommendations for further reading</h2>
|
81 |
-
<p>If you want to learn more about the topics covered in the book, you can check out these resources:</p>
|
82 |
-
<ul>
|
83 |
-
<li><a href="https://www.coursera.org/learn/logical-and-critical-thinking">Logical and Critical Thinking</a>: This is a free online course offered by the University of Auckland that teaches you how to identify, analyze and evaluate arguments using logic and critical thinking.</li>
|
84 |
-
<li><a href="https://www.edx.org/course/problem-solving-using-computational-thinking">Problem Solving using Computational Thinking</a>: This is a free online course offered by the University of Michigan that teaches you how to use computational thinking to solve complex problems in various domains.</li>
|
85 |
-
<li><a href="https://www.udemy.com/course/creativity-innovation-and-change/">Creativity, Innovation and Change</a>: This is a paid online course offered by Udemy that teaches you how to unleash your creativity, generate innovative ideas and implement change in your personal or professional life.</li>
|
86 |
-
</ul>
|
87 |
-
<h1>FAQs</h1>
|
88 |
-
<p>Here are some frequently asked questions about desarrollo del pensamiento tomo 2 resuelto pdf 27:</p>
|
89 |
-
<ol>
|
90 |
-
<li><b>What is the purpose of the book?</b><br>The purpose of the book is to help you develop your thinking skills in various aspects, such as logic, reasoning, critical thinking, problem solving, creativity and innovation.</li>
|
91 |
-
<li><b>Who is the author of the book?</b><br>The book is written by a team of experts from different fields and disciplines, such as mathematics, philosophy, psychology, engineering and education.</li>
|
92 |
-
<li><b>How long is the book?</b><br>The book is about 400 pages long. It contains 27 chapters divided into three parts: logic and reasoning, critical thinking and problem solving, and creativity and innovation.</li>
|
93 |
-
<li><b>How can I get a copy of the book?</b><br>You can get a copy of the book online by downloading it from Sway, reading it on Scribd or buying it from Amazon. You can also find it in some libraries or bookstores.</li>
|
94 |
-
<li><b>How can I use the book effectively?</b><br>You can use the book effectively by following the exercises and examples in the book, applying the concepts to real-life situations and joining a study group or a forum.</li>
|
95 |
-
</ol>
|
96 |
-
</p> 0a6ba089eb<br />
|
97 |
-
<br />
|
98 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download V-Ray SketchUp 2016 64 Bit Full Crack</h1>
|
3 |
-
<p>V-Ray is a powerful rendering engine that can enhance the quality and realism of your 3D models and scenes. It is compatible with SketchUp, a popular 3D modeling and design software that can create stunning architectural and interior designs. If you want to download V-Ray SketchUp 2016 64 bit full crack for free, you are in the right place. In this article, we will show you how to download and install V-Ray SketchUp 2016 64 bit full crack on your PC.</p>
|
4 |
-
<h2>download v-ray sketchup 2016 64 bit full crack</h2><br /><p><b><b>Download</b> >> <a href="https://byltly.com/2uKzZy">https://byltly.com/2uKzZy</a></b></p><br /><br />
|
5 |
-
<h2>What is V-Ray SketchUp 2016 64 Bit Full Crack?</h2>
|
6 |
-
<p>V-Ray SketchUp 2016 64 bit full crack is a cracked version of V-Ray SketchUp 2016 64 bit, which is a plugin that adds rendering capabilities to SketchUp. With V-Ray SketchUp 2016 64 bit full crack, you can render photorealistic images and animations with advanced lighting, materials, and camera settings. You can also use V-Ray SketchUp 2016 64 bit full crack to create realistic effects such as depth of field, motion blur, fog, caustics, and more.</p>
|
7 |
-
<p>V-Ray SketchUp 2016 64 bit full crack has many features and benefits, such as:</p>
|
8 |
-
<ul>
|
9 |
-
<li>It has a friendly user interface and supports 4K monitors.</li>
|
10 |
-
<li>It can render any type of natural or artificial lighting with a wide range of built-in light types.</li>
|
11 |
-
<li>It can render photorealistic rooms and interiors with powerful and fast global illumination.</li>
|
12 |
-
<li>It can emit light from any scene object to simulate real-world custom light shapes.</li>
|
13 |
-
<li>It can simulate natural looking skies with realistic atmospheric depth.</li>
|
14 |
-
<li>It can handle complex geometry and large scenes with ease.</li>
|
15 |
-
<li>It can integrate with other SketchUp features and extensions.</li>
|
16 |
-
<li>It can export VR-ready content for virtual reality devices.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to Download V-Ray SketchUp 2016 64 Bit Full Crack?</h2>
|
19 |
-
<p>To download V-Ray SketchUp 2016 64 bit full crack, you need to follow these steps:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Click on this link to download V-Ray SketchUp 2016 64 bit full crack from Google Drive: <a href="https://www.ridhopedia.com/2019/11/download-sketchup-2016-vray-20-64-bit.html">Download V-Ray SketchUp 2016 64 bit full crack</a>.</li>
|
22 |
-
<li>Extract the downloaded file with WinRAR or any other file compression software.</li>
|
23 |
-
<li>Run the installer file "SketchUpPro-en-x64.exe" and follow the instructions to install SketchUp Pro 2016 on your PC.</li>
|
24 |
-
<li>After the installation is complete, unzip the file "SketchUp Pro 2016 x64-patch.zip". Inside it, you will find a patcher file named "su2015-64-patch.exe".</li>
|
25 |
-
<li>Copy and paste the patcher file to the folder where you installed SketchUp (by default, it is C:\\Program Files\\SketchUp\\SketchUp 2016).</li>
|
26 |
-
<li>Run the patcher file as administrator and click on the patch button. You will see a message saying "Can not find the file. Search the file?". Click on "Yes".</li>
|
27 |
-
<li>A new window will open. Browse to the folder "LayOut" (by default, it is C:\\Program Files\\SketchUp\\SketchUp 2016\\LayOut) and select the file "LayOut.exe". Click on "Open".</li>
|
28 |
-
<li>The patcher will patch the file and show a message saying "The file has been patched!". Click on "OK".</li>
|
29 |
-
<li>Repeat steps 6 to 8 for the files "Style Builder.exe" and "SketchUp.exe" in their respective folders.</li>
|
30 |
-
<li>You have now successfully installed V-Ray SketchUp 2016 64 bit full crack on your PC.</li>
|
31 |
-
</ol>
|
32 |
-
<h2>How to Use V-Ray SketchUp 2016 64 Bit Full Crack?</h2>
|
33 |
-
<p>To use V-Ray SketchUp 2016 64 bit full crack, you need to follow these steps:</p>
|
34 |
-
<p></p>
|
35 |
-
<ol>
|
36 |
-
<li>Launch Sketch</p> ddb901b051<br />
|
37 |
-
<br />
|
38 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>This text contains a list of file names and links related to Celemony Melodyne Editor, a software for editing audio files. The file names have different extensions, such as .rar, .zip, .html, and indicate the version number (v2.1.1.15), the release group (R2R), and the presence of a crack (a program that bypasses the software's copy protection). The file size is 84.8 MB for most of the files. The links at the end of the text point to websites that offer downloads of other files, such as a summary of biology for high school students in PDF format, a physics textbook for class 9 in PDF format, and a Hindi comedy movie in 720p resolution.</p>
|
3 |
-
<h2>Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar</h2><br /><p><b><b>Download File</b> ››› <a href="https://imgfil.com/2uy0QP">https://imgfil.com/2uy0QP</a></b></p><br /><br />
|
4 |
-
|
5 |
-
<p>Celemony Melodyne Editor is a software that allows users to manipulate audio files in various ways, such as changing the pitch, tempo, timing, and tone of individual notes or entire tracks. It can also correct intonation and timing errors, create harmonies and melodies, and transcribe audio into musical notation. Celemony Melodyne Editor is compatible with Windows and Mac operating systems, and can be used as a standalone application or as a plug-in for other audio editing software.</p>
|
6 |
-
|
7 |
-
<p>The files listed in the text are compressed archives that contain the installation files and the crack for Celemony Melodyne Editor. A crack is a program that modifies the software's code to bypass its copy protection and allow users to use it without a license or activation key. However, using a crack is illegal and risky, as it may contain malware or viruses that can harm the user's computer or data. Moreover, using a cracked software may result in poor performance, errors, or compatibility issues with other software or hardware.</p>
|
8 |
-
|
9 |
-
<p>The links at the end of the text are unrelated to Celemony Melodyne Editor and seem to be spam or phishing attempts. They direct the user to websites that offer downloads of other files that may be of interest to some users, such as educational materials or entertainment content. However, these websites may also contain malware or viruses that can harm the user's computer or data. Furthermore, downloading these files may infringe the intellectual property rights of the original authors or creators. Therefore, it is advisable to avoid clicking on these links and to delete the text.</p>
|
10 |
-
<p></p> d5da3c52bf<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
<h2>cubase 6 full version free download torrent</h2><br /><p><b><b>DOWNLOAD</b> ····· <a href="https://imgfil.com/2uxZsc">https://imgfil.com/2uxZsc</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Current sounds can only be downloaded using the Steinberg Download Assistant. ... 1, MAC WINDOWS, Groove Agent ONE/SE/4 VST Toolkit, 800MB.... 3, MAC WINDOWS, Groove Agent SE/5 VST Toolkit, 2GB. ...
|
4 |
-
4, MAC WINDOWS, Groove Agent SE/5 VST Toolkit, 2 GB
|
5 |
-
Jul 12 2019 Download.
|
6 |
-
Groove Agent SE 5.0 VST, AAX, AU WIN.OSX x86 x64 Release Year/Date: 05.2019 Version: 5.0 Developer: Steinberg Website
|
7 |
-
Feb 7
|
8 |
-
2014 · Groove Agent SE 5.0.
|
9 |
-
Description: Steinberg Groove Agent puts at your disposal a set of tools and ... VST, AAX, AU
|
10 |
-
Mar 9 2015 Download torrent for free.
|
11 |
-
distribution statistics. ... 8a78ff9644<br />
|
12 |
-
<br />
|
13 |
-
<br />
|
14 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>CarX Drift Racing Lite Mod APK OBB: A Guide for Drift Racing Fans</h1>
|
3 |
-
<p>Do you love drifting and racing games? Do you want to experience the thrill of driving realistic cars on challenging tracks? If yes, then you should try CarX Drift Racing Lite, a popular game that lets you enjoy the best of both worlds. And if you want to make the game even more fun and exciting, you should download CarX Drift Racing Lite Mod APK OBB, a modified version that gives you unlimited money, coins, cars, tracks, and more. In this article, we will tell you everything you need to know about CarX Drift Racing Lite and its mod apk obb version.</p>
|
4 |
-
<h2>What is CarX Drift Racing Lite?</h2>
|
5 |
-
<p>CarX Drift Racing Lite is a racing game that focuses on drifting, a driving technique where the driver intentionally oversteers the car to make it slide sideways. The game is developed by CarX Technologies, a company that specializes in creating realistic car physics and graphics for games. CarX Drift Racing Lite is a lite version of CarX Drift Racing, which means it has fewer cars, tracks, and features than the original game. However, it still offers a lot of fun and entertainment for drift racing fans.</p>
|
6 |
-
<h2>carx drift racing lite mod apk obb</h2><br /><p><b><b>Download</b> ►►►►► <a href="https://urlin.us/2uT1lA">https://urlin.us/2uT1lA</a></b></p><br /><br />
|
7 |
-
<h3>Features of CarX Drift Racing Lite</h3>
|
8 |
-
<h4>Realistic physics and graphics</h4>
|
9 |
-
<p>One of the main attractions of CarX Drift Racing Lite is its realistic physics and graphics. The game uses a sophisticated car physics engine that simulates the behavior of real cars on different surfaces and conditions. The game also has stunning graphics that create a immersive environment for the players. You can see the smoke, dust, sparks, and tire marks as you drift your car on the track. You can also feel the vibration and sound effects as you accelerate, brake, and steer your car.</p>
|
10 |
-
<h4>Customizable cars and tracks</h4>
|
11 |
-
<p>Another feature of CarX Drift Racing Lite is its customizable cars and tracks. The game allows you to choose from a variety of cars, each with its own characteristics and performance. You can also customize your car's appearance, color, wheels, engine, suspension, and more. You can also choose from different tracks, each with its own layout, difficulty, and scenery. You can also adjust the weather, time of day, and camera angle to suit your preference.</p>
|
12 |
-
<h4>Online and offline modes</h4>
|
13 |
-
<p>A third feature of CarX Drift Racing Lite is its online and offline modes. The game lets you play either online or offline, depending on your internet connection and mood. If you play online, you can compete with other players from around the world in various modes such as time attack, ghost mode, or multiplayer mode. You can also chat with other players and share your replays and screenshots. If you play offline, you can practice your skills in single-player mode or challenge yourself in career mode.</p>
|
14 |
-
<h2>Why download CarX Drift Racing Lite Mod APK OBB?</h2>
|
15 |
-
<p>If you are already enjoying CarX Drift Racing Lite, you might wonder why you should download CarX Drift Racing Lite Mod APK OBB. Well, the answer is simple: because it makes the game even better. CarX Drift Racing Lite Mod APK OBB is a modified version of the game that gives you access to unlimited money <h4>No ads and no root required</h4>
|
16 |
-
<p>With CarX Drift Racing Lite Mod APK OBB, you don't have to deal with annoying ads and pop-ups that interrupt your gameplay. You can enjoy the game without any distractions or interruptions. You also don't need to root your device to install the mod apk obb files. You can simply follow the instructions below and enjoy the game safely and smoothly.</p>
|
17 |
-
<h3>How to download and install CarX Drift Racing Lite Mod APK OBB?</h3>
|
18 |
-
<h4>Step 1: Download the mod apk and obb files from a trusted source</h4>
|
19 |
-
<p>The first step is to download the mod apk and obb files from a trusted source. You can use the link provided at the end of this article to download the files. Make sure you have enough storage space on your device before downloading the files.</p>
|
20 |
-
<h4>Step 2: Enable unknown sources on your device settings</h4>
|
21 |
-
<p>The second step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may also need to disable any antivirus or security apps that may interfere with the installation process.</p>
|
22 |
-
<p>carx drift racing lite mod apk obb download<br />
|
23 |
-
carx drift racing lite mod apk obb unlimited money<br />
|
24 |
-
carx drift racing lite mod apk obb latest version<br />
|
25 |
-
carx drift racing lite mod apk obb android 1<br />
|
26 |
-
carx drift racing lite mod apk obb revdl<br />
|
27 |
-
carx drift racing lite mod apk obb rexdl<br />
|
28 |
-
carx drift racing lite mod apk obb offline<br />
|
29 |
-
carx drift racing lite mod apk obb hack<br />
|
30 |
-
carx drift racing lite mod apk obb free<br />
|
31 |
-
carx drift racing lite mod apk obb data<br />
|
32 |
-
carx drift racing lite mod apk obb file<br />
|
33 |
-
carx drift racing lite mod apk obb full<br />
|
34 |
-
carx drift racing lite mod apk obb mega<br />
|
35 |
-
carx drift racing lite mod apk obb mediafire<br />
|
36 |
-
carx drift racing lite mod apk obb google drive<br />
|
37 |
-
carx drift racing lite mod apk obb 2023<br />
|
38 |
-
carx drift racing lite mod apk obb update<br />
|
39 |
-
carx drift racing lite mod apk obb new<br />
|
40 |
-
carx drift racing lite mod apk obb best<br />
|
41 |
-
carx drift racing lite mod apk obb premium<br />
|
42 |
-
carx drift racing lite mod apk obb pro<br />
|
43 |
-
carx drift racing lite mod apk obb vip<br />
|
44 |
-
carx drift racing lite mod apk obb unlocked<br />
|
45 |
-
carx drift racing lite mod apk obb all cars<br />
|
46 |
-
carx drift racing lite mod apk obb no ads<br />
|
47 |
-
carx drift racing lite mod apk obb no root<br />
|
48 |
-
carx drift racing lite mod apk obb no verification<br />
|
49 |
-
carx drift racing lite mod apk obb no survey<br />
|
50 |
-
carx drift racing lite mod apk obb easy install<br />
|
51 |
-
carx drift racing lite mod apk obb direct link<br />
|
52 |
-
carx drift racing lite mod apk obb high quality<br />
|
53 |
-
carx drift racing lite mod apk obb realistic graphics<br />
|
54 |
-
carx drift racing lite mod apk obb smooth gameplay<br />
|
55 |
-
carx drift racing lite mod apk obb awesome features<br />
|
56 |
-
carx drift racing lite mod apk obb fun modes<br />
|
57 |
-
carx drift racing lite mod apk obb online multiplayer<br />
|
58 |
-
carx drift racing lite mod apk obb custom cars<br />
|
59 |
-
carx drift racing lite mod apk obb tuning options<br />
|
60 |
-
carx drift racing lite mod apk obb drifting physics<br />
|
61 |
-
carx drift racing lite mod apk obb sound effects<br />
|
62 |
-
carx drift racing lite mod apk obb music tracks<br />
|
63 |
-
carx drift racing lite mod apk obb leaderboards<br />
|
64 |
-
carx drift racing lite mod apk obb achievements<br />
|
65 |
-
carx drift racing lite mod apk obb rewards<br />
|
66 |
-
carx drift racing lite mod apk obb cheats<br />
|
67 |
-
carx drift racing lite mod apk obb tips tricks<br />
|
68 |
-
carx drift racing lite mod apk obb guide tutorial<br />
|
69 |
-
carx drift racing lite mod apk obb review rating<br />
|
70 |
-
carx drift racing lite mod apk obb gameplay video</p>
|
71 |
-
<h4>Step 3: Install the mod apk file and extract the obb file to the Android/obb folder</h4>
|
72 |
-
<p>The third step is to install the mod apk file and extract the obb file to the Android/obb folder. To do this, locate the downloaded files on your device, then tap on the mod apk file and follow the instructions to install it. Then, use a file manager app to extract the obb file to the Android/obb folder. If you don't have a file manager app, you can download one from the Google Play Store. Make sure you create a folder named com.CarXTech.CarXDriftRacingLite inside the Android/obb folder and place the extracted obb file there.</p>
|
73 |
-
<h2>Conclusion</h2>
|
74 |
-
<p>CarX Drift Racing Lite is a great game for drift racing fans who want to experience realistic physics and graphics, customizable cars and tracks, and online and offline modes. However, if you want to make the game even more enjoyable and exciting, you should download CarX Drift Racing Lite Mod APK OBB, which gives you unlimited money, coins, cars, tracks, and more. You can download CarX Drift Racing Lite Mod APK OBB from the link below and follow the steps above to install it on your device. Have fun drifting and racing!</p>
|
75 |
-
<h3>FAQs</h3>
|
76 |
-
<p>Here are some of the frequently asked questions about CarX Drift Racing Lite Mod APK OBB:</p>
|
77 |
-
<ul>
|
78 |
-
<li><b>Is CarX Drift Racing Lite Mod APK OBB safe to use?</b></li>
|
79 |
-
<p>Yes, CarX Drift Racing Lite Mod APK OBB is safe to use as long as you download it from a trusted source and follow the installation instructions carefully. However, you should always be careful when downloading and installing any mod apk obb files from unknown sources as they may contain viruses or malware that can harm your device.</p>
|
80 |
-
<li><b>Is CarX Drift Racing Lite Mod APK OBB compatible with my device?</b></li>
|
81 |
-
<p>CarX Drift Racing Lite Mod APK OBB is compatible with most Android devices that have Android 4.1 or higher versions. However, some devices may not support some of the features or functions of the game due to hardware or software limitations.</p>
|
82 |
-
<li><b>Can I play CarX Drift Racing Lite Mod APK OBB with my friends?</b></li>
|
83 |
-
<p>Yes, you can play CarX Drift Racing Lite Mod APK OBB with your friends online in multiplayer mode. You can also chat with them and share your replays and screenshots.</p>
|
84 |
-
<li><b>Can I update CarX Drift Racing Lite Mod APK OBB?</b></li>
|
85 |
-
<p>No, you cannot update CarX Drift Racing Lite Mod APK OBB as it is a modified version of the game that may not be compatible with the latest updates from the official developers. If you want to update the game, you will have to uninstall the mod apk obb files and install the original version from the Google Play Store.</p>
|
86 |
-
<li><b>Where can I download CarX Drift Racing Lite Mod APK OBB?</b></li>
|
87 |
-
<p>You can download CarX Drift Racing Lite Mod APK OBB from this link: <a href="">CarX Drift Racing Lite Mod APK OBB Download</a>.</p>
|
88 |
-
</ul></p> 197e85843d<br />
|
89 |
-
<br />
|
90 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Euro Truck Simulator 2 For Mobile - Everything You Need to Know</h1>
|
3 |
-
<p>Do you love driving trucks and exploring new places? Do you want to experience the thrill of being a truck driver from the comfort of your home? If you answered yes to any of these questions, then you should definitely check out Euro Truck Simulator 2, one of the most popular and realistic truck driving simulator games ever made. And the best part is, you can now play it on your mobile device thanks to ets2.mobi, a website that offers ETS2 for Android and iOS. In this article, we will tell you everything you need to know about Euro Truck Simulator 2 for mobile, including what it is, how to download and install it, how to play it, and why you should try it today.</p>
|
4 |
-
<h2>ets2 mobi</h2><br /><p><b><b>DOWNLOAD</b> ✸ <a href="https://urlin.us/2uSUVd">https://urlin.us/2uSUVd</a></b></p><br /><br />
|
5 |
-
<h2>What is Euro Truck Simulator 2?</h2>
|
6 |
-
<p>Euro Truck Simulator 2, or ETS2 for short, is a game that simulates the life of a truck driver in Europe. It was developed and published by SCS Software, a Czech company that specializes in creating simulation games. ETS2 was released in 2012 for Windows, Linux, and Mac OS, and has since received many updates and expansions that added new features, content, and improvements. ETS2 has three main aspects that make it so appealing and realistic: a truck driving simulator, a huge map of Europe, and a variety of trucks and customization options.</p>
|
7 |
-
<h3>A realistic truck driving simulator game</h3>
|
8 |
-
<p>ETS2 is not just a game where you drive a truck from point A to point B. It is a game where you have to follow the rules of the road, deal with traffic, weather, fuel consumption, fatigue, cargo delivery, fines, repairs, and more. You have to plan your routes carefully, choose the best contracts, manage your finances, hire drivers, buy garages, and grow your own trucking company. You also have to take care of your truck, which can get damaged or break down if you drive recklessly or neglect maintenance. You can also customize your truck with different parts, accessories, paint jobs, decals, and more.</p>
|
9 |
-
<h3>A huge map of Europe to explore</h3>
|
10 |
-
<p>ETS2 features a massive map of Europe that covers over 70 cities in 13 countries. You can drive across different landscapes, such as mountains, forests, fields, deserts, coasts, and urban areas. You can also visit famous landmarks, such as the Eiffel Tower in Paris, the Brandenburg Gate in Berlin, the Colosseum in Rome, and more. The map is constantly updated with new regions and roads that add more diversity and realism to the game. You can also download mods that add even more countries and locations to the game.</p>
|
11 |
-
<h3>A variety of trucks and customization options</h3>
|
12 |
-
<p>ETS2 offers a wide range of trucks from different manufacturers, such as Mercedes-Benz, Volvo, Scania, MAN, DAF, Renault, Iveco, and more. Each truck has its own specifications, performance, handling, sound effects, and interior design. You can also customize your truck with different parts, accessories, paint jobs, decals, and more. You can also download mods that add new trucks or modify existing ones.</p>
|
13 |
-
<h2>What is ets2.mobi?</h2>
|
14 |
-
<p>ets2.mobi is a website that offers Euro Truck Simulator 2 for mobile devices. It allows you to download and install ETS2 on your Android or iOS phone or tablet without any hassle. You don't need to root or jailbreak your device or use any complicated software or hardware. You just need to follow a few simple steps and you will be able to enjoy ETS2 on your mobile device in no time.</p>
|
15 |
-
<h3>How to download and install ETS2 on your phone or tablet</h3>
|
16 |
-
<p>Downloading and installing ETS2 on your mobile device is very easy and fast. Here are the steps you need to follow:</p>
|
17 |
-
<p>ets2 mobile apk download<br />
|
18 |
-
ets2 android gameplay<br />
|
19 |
-
ets2 ios app<br />
|
20 |
-
ets2 licensed trucks<br />
|
21 |
-
ets2 customization options<br />
|
22 |
-
ets2 advanced driving physics<br />
|
23 |
-
ets2 truck driving simulator<br />
|
24 |
-
ets2 official website<br />
|
25 |
-
ets2 modhub<br />
|
26 |
-
ets2 mods download<br />
|
27 |
-
ets2 best mods<br />
|
28 |
-
ets2 realistic mods<br />
|
29 |
-
ets2 map mods<br />
|
30 |
-
ets2 traffic mods<br />
|
31 |
-
ets2 sound mods<br />
|
32 |
-
ets2 graphics mods<br />
|
33 |
-
ets2 tuning mods<br />
|
34 |
-
ets2 trailer mods<br />
|
35 |
-
ets2 skin mods<br />
|
36 |
-
ets2 truck mods<br />
|
37 |
-
ets2 multiplayer mod<br />
|
38 |
-
ets2 online mod<br />
|
39 |
-
ets2 promods<br />
|
40 |
-
ets2 rusmap<br />
|
41 |
-
ets2 balkans map<br />
|
42 |
-
ets2 scandinavia dlc<br />
|
43 |
-
ets2 going east dlc<br />
|
44 |
-
ets2 vive la france dlc<br />
|
45 |
-
ets2 italia dlc<br />
|
46 |
-
ets2 beyond the baltic sea dlc<br />
|
47 |
-
ets2 road to the black sea dlc<br />
|
48 |
-
ets2 iberia dlc<br />
|
49 |
-
ets2 heart of russia dlc<br />
|
50 |
-
ets2 cabin accessories dlc<br />
|
51 |
-
ets2 wheel tuning pack dlc<br />
|
52 |
-
ets2 mighty griffin tuning pack dlc<br />
|
53 |
-
ets2 heavy cargo pack dlc<br />
|
54 |
-
ets2 special transport dlc<br />
|
55 |
-
ets2 high power cargo pack dlc<br />
|
56 |
-
ets2 krone trailer pack dlc<br />
|
57 |
-
ets2 schwarzmuller trailer pack dlc<br />
|
58 |
-
ets2 michelin fan pack dlc<br />
|
59 |
-
ets2 goodyear tyres pack dlc<br />
|
60 |
-
ets2 actros tuning pack dlc<br />
|
61 |
-
ets2 fh tuning pack dlc</p>
|
62 |
-
<ol>
|
63 |
-
<li>Go to ets2.mobi on your mobile browser and click on the download button.</li>
|
64 |
-
<li>Choose your device type (Android or iOS) and wait for the download to finish.</li>
|
65 |
-
<li>Open the downloaded file and follow the instructions to install ETS2 on your device.</li>
|
66 |
-
<li>Launch the game and enjoy playing ETS2 on your mobile device.</li>
|
67 |
-
</ol>
|
68 |
-
<p>Note: You may need to enable unknown sources or trust the app in your device settings before installing ETS2. This is a normal procedure for installing apps from outside the official app stores and it does not harm your device or data in any way.</p>
|
69 |
-
<h3>The features and benefits of playing ETS2 on mobile</h3>
|
70 |
-
<p>Playing ETS2 on your mobile device has many advantages over playing it on a PC or console. Here are some of them:</p>
|
71 |
-
<ul>
|
72 |
-
<li>You can play ETS2 anytime and anywhere you want, as long as you have your mobile device with you.</li>
|
73 |
-
<li>You can save space and money, as you don't need to buy or maintain a PC or console to play ETS2.</li>
|
74 |
-
<li>You can enjoy the same graphics, gameplay, and content as the PC version of ETS2, as ets2.mobi uses a special technology that optimizes the game for mobile devices without compromising quality or performance.</li>
|
75 |
-
<li>You can connect with other players online and join multiplayer sessions, chat with them, share your progress, and more.</li>
|
76 |
-
<li>You can access exclusive features and bonuses that are only available for mobile users, such as special trucks, skins, events, rewards, and more.</li>
|
77 |
-
</ul>
|
78 |
-
<h2>How to play ETS2 on mobile?</h2>
|
79 |
-
<p>Playing ETS2 on your mobile device is very similar to playing it on a PC or console. You just need to learn the controls and interface of the game and you will be ready to hit the road. Here are some tips and tricks to help you get started:</p>
|
80 |
-
<h3>The controls and interface of ETS2 on mobile</h3>
|
81 |
-
<p>The controls and interface of ETS2 on mobile are designed to be intuitive and user-friendly. You can choose between different control modes, such as tilt, touch, or steering wheel. You can also customize the buttons, sensitivity, and layout of the controls according to your preference. You can also use voice commands to control some functions of the game, such as navigation, radio, or horn.</p>
|
82 |
-
<p>The interface of ETS2 on mobile consists of various elements that display important information and options for the game. You can see your speedometer, fuel gauge, damage indicator, map, GPS, mirrors, dashboard, and more. You can also access the menu, settings, profile, achievements, statistics, leaderboards, and more. You can also interact with various objects and characters in the game, such as toll booths, gas stations, rest areas, traffic lights, pedestrians, police officers, and more.</p>
|
83 |
-
<h3>The game modes and challenges of ETS2 on mobile</h3>
|
84 |
-
<p>ETS2 on mobile offers various game modes and challenges that suit different play styles and preferences. You can choose between different difficulty levels, such as easy, normal, or hard, depending on how realistic and challenging you want the game to be. You can also choose between different game modes, such as: - Career mode: This is the main mode of the game, where you start as a rookie driver and work your way up to become a successful trucker. You have to complete various contracts, deliver cargo, earn money, buy and upgrade trucks, hire drivers, and expand your business. You can also customize your profile, choose your preferred truck brand, and join a company of your choice. - Free mode: This is a mode where you can drive freely across the map without any time or money constraints. You can explore different regions, visit landmarks, test different trucks, and enjoy the scenery. You can also switch between day and night, change the weather, and adjust the traffic density. - Challenge mode: This is a mode where you can test your skills and compete with other players in various challenges, such as parking, racing, cargo delivery, fuel economy, and more. You can also create your own challenges and share them with other players online. <h3>The tips and tricks to enjoy ETS2 on mobile</h3>
|
85 |
-
<p>ETS2 on mobile is a fun and immersive game that can keep you entertained for hours. However, it can also be challenging and frustrating at times, especially if you are new to the game or not familiar with the controls. Here are some tips and tricks that can help you enjoy ETS2 on mobile more: - Follow the tutorial: The game offers a tutorial that teaches you the basics of the game, such as how to drive, park, deliver cargo, use the GPS, and more. It is highly recommended that you follow the tutorial before you start playing the game, as it will help you avoid many mistakes and problems later on. - Adjust the settings: The game allows you to adjust various settings that can affect your gameplay experience, such as graphics quality, sound volume, control mode, sensitivity, language, units, and more. You should experiment with different settings and find the ones that suit your device and preference best. - Save frequently: The game has an autosave feature that saves your progress every time you complete a contract or enter a new city. However, it is also advisable that you manually save your game often, especially before you start a long or difficult journey. This way, you can avoid losing your progress or money if something goes wrong or if the game crashes. - Drive carefully: The game simulates realistic driving physics and mechanics, which means that you have to drive carefully and follow the rules of the road. You have to pay attention to your speed limit, traffic signs, signals, lanes, pedestrians, and other vehicles. You also have to watch out for your fuel level, damage, fatigue, and cargo weight. If you drive recklessly or break the law, you can get fined, lose your cargo, damage your truck, or even cause accidents. You can also use the cruise control, speed limiter, and brake assist features to help you drive more smoothly and safely. - Use the GPS: The game provides you with a GPS system that shows you the best route to your destination, the distance and time remaining, the speed limit, and the traffic conditions. You can also use the map view to see the whole map of Europe and plan your routes ahead. You can also set waypoints, zoom in and out, and switch between 2D and 3D modes. The GPS is a very useful tool that can help you navigate the roads and avoid getting lost or stuck. - Enjoy the scenery: The game features stunning graphics and realistic sound effects that create a immersive atmosphere for the game. You can see the changing landscapes, weather, seasons, day and night cycles, and more. You can also listen to the radio, which offers various stations that play different genres of music and news. You can also use the photo mode to take pictures of your truck or the scenery and share them with other players online. <h2>Conclusion</h2>
|
86 |
-
<p>Euro Truck Simulator 2 is a game that lets you experience the life of a truck driver in Europe. You can drive across different countries, deliver cargo, earn money, buy and upgrade trucks, hire drivers, and grow your own trucking company. You can also customize your truck with different parts, accessories, paint jobs, decals, and more. You can also download mods that add new trucks or modify existing ones.</p>
|
87 |
-
<p>ETS2 is now available for mobile devices thanks to ets2.mobi, a website that offers ETS2 for Android and iOS. You can download and install ETS2 on your phone or tablet without any hassle. You can enjoy the same graphics, gameplay, and content as the PC version of ETS2, as well as exclusive features and bonuses for mobile users. You can also connect with other players online and join multiplayer sessions, chat with them, share your progress, and more.</p>
|
88 |
-
<p>ETS2 is a fun and immersive game that can keep you entertained for hours. However, it can also be challenging and frustrating at times, especially if you are new to the game or not familiar with the controls. That's why we have provided you with some tips and tricks that can help you enjoy ETS2 on mobile more.</p>
|
89 |
-
<p>If you love driving trucks and exploring new places, then you should definitely try ETS2 on mobile today. It is a game that will make you feel like a real truck driver in Europe.</p>
|
90 |
-
<h3>FAQs</h3>
|
91 |
-
<p>Here are some frequently asked questions about ETS2 on mobile:</p>
|
92 |
-
<ol>
|
93 |
-
<li>Is ETS2 on mobile free?</li>
|
94 |
-
<p>Yes, ETS2 on mobile is free to download and play. However, it may contain some in-app purchases or ads that can enhance your gameplay experience or support the developers.</p>
|
95 |
-
<li>Is ETS2 on mobile safe?</li>
|
96 |
-
<p>Yes, ETS2 on mobile is safe to download and install on your device. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from ets2.mobi or other trusted sources to avoid any risks.</p>
|
97 |
-
<li>Is ETS2 on mobile compatible with my device?</li>
|
98 |
-
<p>ETS2 on mobile is compatible with most Android and iOS devices that have at least 4 GB of RAM and 3 GB of free storage space. However, some devices may have different performance or compatibility issues depending on their specifications or settings.</p>
|
99 |
-
<li>Can I play ETS2 on mobile offline?</li>
|
100 |
-
<p>Yes, you can play ETS2 on mobile offline without an internet connection. However, some features or functions may not work properly or be available offline, such as multiplayer mode, online leaderboards, updates, or downloads. You also need an internet connection to verify your game license and activate it on your device.</p>
|
101 |
-
<li>How can I contact the developers or report a bug?</li>
|
102 |
-
<p>If you have any questions, feedback, suggestions, or issues regarding ETS2 on mobile, you can contact the developers or report a bug through the following channels:</p>
|
103 |
-
<ul>
|
104 |
-
<li>Email: [email protected]</li>
|
105 |
-
<li>Facebook: https://www.facebook.com/ets2mobi</li>
|
106 |
-
<li>Twitter: https://twitter.com/ets2mobi</li>
|
107 |
-
<li>Instagram: https://www.instagram.com/ets2mobi</li>
|
108 |
-
</ul></p> 197e85843d<br />
|
109 |
-
<br />
|
110 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/chat-list.tsx
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import React from 'react'
|
2 |
-
|
3 |
-
import { Separator } from '@/components/ui/separator'
|
4 |
-
import { ChatMessage } from '@/components/chat-message'
|
5 |
-
import { ChatMessageModel } from '@/lib/bots/bing/types'
|
6 |
-
|
7 |
-
export interface ChatList {
|
8 |
-
messages: ChatMessageModel[]
|
9 |
-
}
|
10 |
-
|
11 |
-
export function ChatList({ messages }: ChatList) {
|
12 |
-
if (!messages.length) {
|
13 |
-
return null
|
14 |
-
}
|
15 |
-
|
16 |
-
return (
|
17 |
-
<div className="chat-container relative flex flex-col">
|
18 |
-
{messages.map((message, index) => (
|
19 |
-
<React.Fragment key={index}>
|
20 |
-
<ChatMessage message={message} />
|
21 |
-
{index < messages.length - 1 && (
|
22 |
-
<Separator className="my-2" />
|
23 |
-
)}
|
24 |
-
</React.Fragment>
|
25 |
-
))}
|
26 |
-
</div>
|
27 |
-
)
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/360macky/first-space/app.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
|
3 |
-
x = st.slider('Select a value')
|
4 |
-
st.write(x, 'squared is', x * x)
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/test_audio2coeff.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
from scipy.io import savemat
|
5 |
-
from yacs.config import CfgNode as CN
|
6 |
-
from scipy.signal import savgol_filter
|
7 |
-
|
8 |
-
from src.audio2pose_models.audio2pose import Audio2Pose
|
9 |
-
from src.audio2exp_models.networks import SimpleWrapperV2
|
10 |
-
from src.audio2exp_models.audio2exp import Audio2Exp
|
11 |
-
|
12 |
-
def load_cpk(checkpoint_path, model=None, optimizer=None, device="cpu"):
|
13 |
-
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
|
14 |
-
if model is not None:
|
15 |
-
model.load_state_dict(checkpoint['model'])
|
16 |
-
if optimizer is not None:
|
17 |
-
optimizer.load_state_dict(checkpoint['optimizer'])
|
18 |
-
|
19 |
-
return checkpoint['epoch']
|
20 |
-
|
21 |
-
class Audio2Coeff():
|
22 |
-
|
23 |
-
def __init__(self, audio2pose_checkpoint, audio2pose_yaml_path,
|
24 |
-
audio2exp_checkpoint, audio2exp_yaml_path,
|
25 |
-
wav2lip_checkpoint, device):
|
26 |
-
#load config
|
27 |
-
fcfg_pose = open(audio2pose_yaml_path)
|
28 |
-
cfg_pose = CN.load_cfg(fcfg_pose)
|
29 |
-
cfg_pose.freeze()
|
30 |
-
fcfg_exp = open(audio2exp_yaml_path)
|
31 |
-
cfg_exp = CN.load_cfg(fcfg_exp)
|
32 |
-
cfg_exp.freeze()
|
33 |
-
|
34 |
-
# load audio2pose_model
|
35 |
-
self.audio2pose_model = Audio2Pose(cfg_pose, wav2lip_checkpoint, device=device)
|
36 |
-
self.audio2pose_model = self.audio2pose_model.to(device)
|
37 |
-
self.audio2pose_model.eval()
|
38 |
-
for param in self.audio2pose_model.parameters():
|
39 |
-
param.requires_grad = False
|
40 |
-
try:
|
41 |
-
load_cpk(audio2pose_checkpoint, model=self.audio2pose_model, device=device)
|
42 |
-
except:
|
43 |
-
raise Exception("Failed in loading audio2pose_checkpoint")
|
44 |
-
|
45 |
-
# load audio2exp_model
|
46 |
-
netG = SimpleWrapperV2()
|
47 |
-
netG = netG.to(device)
|
48 |
-
for param in netG.parameters():
|
49 |
-
netG.requires_grad = False
|
50 |
-
netG.eval()
|
51 |
-
try:
|
52 |
-
load_cpk(audio2exp_checkpoint, model=netG, device=device)
|
53 |
-
except:
|
54 |
-
raise Exception("Failed in loading audio2exp_checkpoint")
|
55 |
-
self.audio2exp_model = Audio2Exp(netG, cfg_exp, device=device, prepare_training_loss=False)
|
56 |
-
self.audio2exp_model = self.audio2exp_model.to(device)
|
57 |
-
for param in self.audio2exp_model.parameters():
|
58 |
-
param.requires_grad = False
|
59 |
-
self.audio2exp_model.eval()
|
60 |
-
|
61 |
-
self.device = device
|
62 |
-
|
63 |
-
def generate(self, batch, coeff_save_dir, pose_style):
|
64 |
-
|
65 |
-
with torch.no_grad():
|
66 |
-
#test
|
67 |
-
results_dict_exp= self.audio2exp_model.test(batch)
|
68 |
-
exp_pred = results_dict_exp['exp_coeff_pred'] #bs T 64
|
69 |
-
|
70 |
-
#for class_id in range(1):
|
71 |
-
#class_id = 0#(i+10)%45
|
72 |
-
#class_id = random.randint(0,46) #46 styles can be selected
|
73 |
-
batch['class'] = torch.LongTensor([pose_style]).to(self.device)
|
74 |
-
results_dict_pose = self.audio2pose_model.test(batch)
|
75 |
-
pose_pred = results_dict_pose['pose_pred'] #bs T 6
|
76 |
-
|
77 |
-
pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), 13, 2, axis=1)).to(self.device)
|
78 |
-
coeffs_pred = torch.cat((exp_pred, pose_pred), dim=-1) #bs T 70
|
79 |
-
|
80 |
-
coeffs_pred_numpy = coeffs_pred[0].clone().detach().cpu().numpy()
|
81 |
-
|
82 |
-
savemat(os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])),
|
83 |
-
{'coeff_3dmm': coeffs_pred_numpy})
|
84 |
-
|
85 |
-
return os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name']))
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py
DELETED
@@ -1,346 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import logging
|
3 |
-
|
4 |
-
logger = logging.getLogger(__name__)
|
5 |
-
|
6 |
-
import librosa
|
7 |
-
import numpy as np
|
8 |
-
import soundfile as sf
|
9 |
-
import torch
|
10 |
-
|
11 |
-
from infer.lib.uvr5_pack.lib_v5 import nets_61968KB as Nets
|
12 |
-
from infer.lib.uvr5_pack.lib_v5 import spec_utils
|
13 |
-
from infer.lib.uvr5_pack.lib_v5.model_param_init import ModelParameters
|
14 |
-
from infer.lib.uvr5_pack.lib_v5.nets_new import CascadedNet
|
15 |
-
from infer.lib.uvr5_pack.utils import inference
|
16 |
-
|
17 |
-
|
18 |
-
class AudioPre:
|
19 |
-
def __init__(self, agg, model_path, device, is_half):
|
20 |
-
self.model_path = model_path
|
21 |
-
self.device = device
|
22 |
-
self.data = {
|
23 |
-
# Processing Options
|
24 |
-
"postprocess": False,
|
25 |
-
"tta": False,
|
26 |
-
# Constants
|
27 |
-
"window_size": 512,
|
28 |
-
"agg": agg,
|
29 |
-
"high_end_process": "mirroring",
|
30 |
-
}
|
31 |
-
mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json")
|
32 |
-
model = Nets.CascadedASPPNet(mp.param["bins"] * 2)
|
33 |
-
cpk = torch.load(model_path, map_location="cpu")
|
34 |
-
model.load_state_dict(cpk)
|
35 |
-
model.eval()
|
36 |
-
if is_half:
|
37 |
-
model = model.half().to(device)
|
38 |
-
else:
|
39 |
-
model = model.to(device)
|
40 |
-
|
41 |
-
self.mp = mp
|
42 |
-
self.model = model
|
43 |
-
|
44 |
-
def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
|
45 |
-
if ins_root is None and vocal_root is None:
|
46 |
-
return "No save root."
|
47 |
-
name = os.path.basename(music_file)
|
48 |
-
if ins_root is not None:
|
49 |
-
os.makedirs(ins_root, exist_ok=True)
|
50 |
-
if vocal_root is not None:
|
51 |
-
os.makedirs(vocal_root, exist_ok=True)
|
52 |
-
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
53 |
-
bands_n = len(self.mp.param["band"])
|
54 |
-
# print(bands_n)
|
55 |
-
for d in range(bands_n, 0, -1):
|
56 |
-
bp = self.mp.param["band"][d]
|
57 |
-
if d == bands_n: # high-end band
|
58 |
-
(
|
59 |
-
X_wave[d],
|
60 |
-
_,
|
61 |
-
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
|
62 |
-
music_file,
|
63 |
-
bp["sr"],
|
64 |
-
False,
|
65 |
-
dtype=np.float32,
|
66 |
-
res_type=bp["res_type"],
|
67 |
-
)
|
68 |
-
if X_wave[d].ndim == 1:
|
69 |
-
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
|
70 |
-
else: # lower bands
|
71 |
-
X_wave[d] = librosa.core.resample(
|
72 |
-
X_wave[d + 1],
|
73 |
-
self.mp.param["band"][d + 1]["sr"],
|
74 |
-
bp["sr"],
|
75 |
-
res_type=bp["res_type"],
|
76 |
-
)
|
77 |
-
# Stft of wave source
|
78 |
-
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
|
79 |
-
X_wave[d],
|
80 |
-
bp["hl"],
|
81 |
-
bp["n_fft"],
|
82 |
-
self.mp.param["mid_side"],
|
83 |
-
self.mp.param["mid_side_b2"],
|
84 |
-
self.mp.param["reverse"],
|
85 |
-
)
|
86 |
-
# pdb.set_trace()
|
87 |
-
if d == bands_n and self.data["high_end_process"] != "none":
|
88 |
-
input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
|
89 |
-
self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
|
90 |
-
)
|
91 |
-
input_high_end = X_spec_s[d][
|
92 |
-
:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
|
93 |
-
]
|
94 |
-
|
95 |
-
X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
|
96 |
-
aggresive_set = float(self.data["agg"] / 100)
|
97 |
-
aggressiveness = {
|
98 |
-
"value": aggresive_set,
|
99 |
-
"split_bin": self.mp.param["band"][1]["crop_stop"],
|
100 |
-
}
|
101 |
-
with torch.no_grad():
|
102 |
-
pred, X_mag, X_phase = inference(
|
103 |
-
X_spec_m, self.device, self.model, aggressiveness, self.data
|
104 |
-
)
|
105 |
-
# Postprocess
|
106 |
-
if self.data["postprocess"]:
|
107 |
-
pred_inv = np.clip(X_mag - pred, 0, np.inf)
|
108 |
-
pred = spec_utils.mask_silence(pred, pred_inv)
|
109 |
-
y_spec_m = pred * X_phase
|
110 |
-
v_spec_m = X_spec_m - y_spec_m
|
111 |
-
|
112 |
-
if ins_root is not None:
|
113 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
114 |
-
input_high_end_ = spec_utils.mirroring(
|
115 |
-
self.data["high_end_process"], y_spec_m, input_high_end, self.mp
|
116 |
-
)
|
117 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(
|
118 |
-
y_spec_m, self.mp, input_high_end_h, input_high_end_
|
119 |
-
)
|
120 |
-
else:
|
121 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
|
122 |
-
logger.info("%s instruments done" % name)
|
123 |
-
if format in ["wav", "flac"]:
|
124 |
-
sf.write(
|
125 |
-
os.path.join(
|
126 |
-
ins_root,
|
127 |
-
"instrument_{}_{}.{}".format(name, self.data["agg"], format),
|
128 |
-
),
|
129 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
130 |
-
self.mp.param["sr"],
|
131 |
-
) #
|
132 |
-
else:
|
133 |
-
path = os.path.join(
|
134 |
-
ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
|
135 |
-
)
|
136 |
-
sf.write(
|
137 |
-
path,
|
138 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
139 |
-
self.mp.param["sr"],
|
140 |
-
)
|
141 |
-
if os.path.exists(path):
|
142 |
-
os.system(
|
143 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
144 |
-
% (path, path[:-4] + ".%s" % format)
|
145 |
-
)
|
146 |
-
if vocal_root is not None:
|
147 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
148 |
-
input_high_end_ = spec_utils.mirroring(
|
149 |
-
self.data["high_end_process"], v_spec_m, input_high_end, self.mp
|
150 |
-
)
|
151 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(
|
152 |
-
v_spec_m, self.mp, input_high_end_h, input_high_end_
|
153 |
-
)
|
154 |
-
else:
|
155 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
|
156 |
-
logger.info("%s vocals done" % name)
|
157 |
-
if format in ["wav", "flac"]:
|
158 |
-
sf.write(
|
159 |
-
os.path.join(
|
160 |
-
vocal_root,
|
161 |
-
"vocal_{}_{}.{}".format(name, self.data["agg"], format),
|
162 |
-
),
|
163 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
164 |
-
self.mp.param["sr"],
|
165 |
-
)
|
166 |
-
else:
|
167 |
-
path = os.path.join(
|
168 |
-
vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
|
169 |
-
)
|
170 |
-
sf.write(
|
171 |
-
path,
|
172 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
173 |
-
self.mp.param["sr"],
|
174 |
-
)
|
175 |
-
if os.path.exists(path):
|
176 |
-
os.system(
|
177 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
178 |
-
% (path, path[:-4] + ".%s" % format)
|
179 |
-
)
|
180 |
-
|
181 |
-
|
182 |
-
class AudioPreDeEcho:
|
183 |
-
def __init__(self, agg, model_path, device, is_half):
|
184 |
-
self.model_path = model_path
|
185 |
-
self.device = device
|
186 |
-
self.data = {
|
187 |
-
# Processing Options
|
188 |
-
"postprocess": False,
|
189 |
-
"tta": False,
|
190 |
-
# Constants
|
191 |
-
"window_size": 512,
|
192 |
-
"agg": agg,
|
193 |
-
"high_end_process": "mirroring",
|
194 |
-
}
|
195 |
-
mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json")
|
196 |
-
nout = 64 if "DeReverb" in model_path else 48
|
197 |
-
model = CascadedNet(mp.param["bins"] * 2, nout)
|
198 |
-
cpk = torch.load(model_path, map_location="cpu")
|
199 |
-
model.load_state_dict(cpk)
|
200 |
-
model.eval()
|
201 |
-
if is_half:
|
202 |
-
model = model.half().to(device)
|
203 |
-
else:
|
204 |
-
model = model.to(device)
|
205 |
-
|
206 |
-
self.mp = mp
|
207 |
-
self.model = model
|
208 |
-
|
209 |
-
def _path_audio_(
|
210 |
-
self, music_file, vocal_root=None, ins_root=None, format="flac"
|
211 |
-
): # 3个VR模型vocal和ins是反的
|
212 |
-
if ins_root is None and vocal_root is None:
|
213 |
-
return "No save root."
|
214 |
-
name = os.path.basename(music_file)
|
215 |
-
if ins_root is not None:
|
216 |
-
os.makedirs(ins_root, exist_ok=True)
|
217 |
-
if vocal_root is not None:
|
218 |
-
os.makedirs(vocal_root, exist_ok=True)
|
219 |
-
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
220 |
-
bands_n = len(self.mp.param["band"])
|
221 |
-
# print(bands_n)
|
222 |
-
for d in range(bands_n, 0, -1):
|
223 |
-
bp = self.mp.param["band"][d]
|
224 |
-
if d == bands_n: # high-end band
|
225 |
-
(
|
226 |
-
X_wave[d],
|
227 |
-
_,
|
228 |
-
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
|
229 |
-
music_file,
|
230 |
-
bp["sr"],
|
231 |
-
False,
|
232 |
-
dtype=np.float32,
|
233 |
-
res_type=bp["res_type"],
|
234 |
-
)
|
235 |
-
if X_wave[d].ndim == 1:
|
236 |
-
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
|
237 |
-
else: # lower bands
|
238 |
-
X_wave[d] = librosa.core.resample(
|
239 |
-
X_wave[d + 1],
|
240 |
-
self.mp.param["band"][d + 1]["sr"],
|
241 |
-
bp["sr"],
|
242 |
-
res_type=bp["res_type"],
|
243 |
-
)
|
244 |
-
# Stft of wave source
|
245 |
-
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
|
246 |
-
X_wave[d],
|
247 |
-
bp["hl"],
|
248 |
-
bp["n_fft"],
|
249 |
-
self.mp.param["mid_side"],
|
250 |
-
self.mp.param["mid_side_b2"],
|
251 |
-
self.mp.param["reverse"],
|
252 |
-
)
|
253 |
-
# pdb.set_trace()
|
254 |
-
if d == bands_n and self.data["high_end_process"] != "none":
|
255 |
-
input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
|
256 |
-
self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
|
257 |
-
)
|
258 |
-
input_high_end = X_spec_s[d][
|
259 |
-
:, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
|
260 |
-
]
|
261 |
-
|
262 |
-
X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
|
263 |
-
aggresive_set = float(self.data["agg"] / 100)
|
264 |
-
aggressiveness = {
|
265 |
-
"value": aggresive_set,
|
266 |
-
"split_bin": self.mp.param["band"][1]["crop_stop"],
|
267 |
-
}
|
268 |
-
with torch.no_grad():
|
269 |
-
pred, X_mag, X_phase = inference(
|
270 |
-
X_spec_m, self.device, self.model, aggressiveness, self.data
|
271 |
-
)
|
272 |
-
# Postprocess
|
273 |
-
if self.data["postprocess"]:
|
274 |
-
pred_inv = np.clip(X_mag - pred, 0, np.inf)
|
275 |
-
pred = spec_utils.mask_silence(pred, pred_inv)
|
276 |
-
y_spec_m = pred * X_phase
|
277 |
-
v_spec_m = X_spec_m - y_spec_m
|
278 |
-
|
279 |
-
if ins_root is not None:
|
280 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
281 |
-
input_high_end_ = spec_utils.mirroring(
|
282 |
-
self.data["high_end_process"], y_spec_m, input_high_end, self.mp
|
283 |
-
)
|
284 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(
|
285 |
-
y_spec_m, self.mp, input_high_end_h, input_high_end_
|
286 |
-
)
|
287 |
-
else:
|
288 |
-
wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
|
289 |
-
logger.info("%s instruments done" % name)
|
290 |
-
if format in ["wav", "flac"]:
|
291 |
-
sf.write(
|
292 |
-
os.path.join(
|
293 |
-
ins_root,
|
294 |
-
"instrument_{}_{}.{}".format(name, self.data["agg"], format),
|
295 |
-
),
|
296 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
297 |
-
self.mp.param["sr"],
|
298 |
-
) #
|
299 |
-
else:
|
300 |
-
path = os.path.join(
|
301 |
-
ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
|
302 |
-
)
|
303 |
-
sf.write(
|
304 |
-
path,
|
305 |
-
(np.array(wav_instrument) * 32768).astype("int16"),
|
306 |
-
self.mp.param["sr"],
|
307 |
-
)
|
308 |
-
if os.path.exists(path):
|
309 |
-
os.system(
|
310 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
311 |
-
% (path, path[:-4] + ".%s" % format)
|
312 |
-
)
|
313 |
-
if vocal_root is not None:
|
314 |
-
if self.data["high_end_process"].startswith("mirroring"):
|
315 |
-
input_high_end_ = spec_utils.mirroring(
|
316 |
-
self.data["high_end_process"], v_spec_m, input_high_end, self.mp
|
317 |
-
)
|
318 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(
|
319 |
-
v_spec_m, self.mp, input_high_end_h, input_high_end_
|
320 |
-
)
|
321 |
-
else:
|
322 |
-
wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
|
323 |
-
logger.info("%s vocals done" % name)
|
324 |
-
if format in ["wav", "flac"]:
|
325 |
-
sf.write(
|
326 |
-
os.path.join(
|
327 |
-
vocal_root,
|
328 |
-
"vocal_{}_{}.{}".format(name, self.data["agg"], format),
|
329 |
-
),
|
330 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
331 |
-
self.mp.param["sr"],
|
332 |
-
)
|
333 |
-
else:
|
334 |
-
path = os.path.join(
|
335 |
-
vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
|
336 |
-
)
|
337 |
-
sf.write(
|
338 |
-
path,
|
339 |
-
(np.array(wav_vocals) * 32768).astype("int16"),
|
340 |
-
self.mp.param["sr"],
|
341 |
-
)
|
342 |
-
if os.path.exists(path):
|
343 |
-
os.system(
|
344 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
345 |
-
% (path, path[:-4] + ".%s" % format)
|
346 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
from pyrender import Mesh, Scene, Viewer
|
2 |
-
from io import BytesIO
|
3 |
-
import numpy as np
|
4 |
-
import trimesh
|
5 |
-
import requests
|
6 |
-
|
7 |
-
duck_source = "https://github.com/KhronosGroup/glTF-Sample-Models/raw/master/2.0/Duck/glTF-Binary/Duck.glb"
|
8 |
-
|
9 |
-
duck = trimesh.load(BytesIO(requests.get(duck_source).content), file_type='glb')
|
10 |
-
duckmesh = Mesh.from_trimesh(list(duck.geometry.values())[0])
|
11 |
-
scene = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]))
|
12 |
-
scene.add(duckmesh)
|
13 |
-
Viewer(scene)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 01🗣️ Gradio NLP Speech 2 Text 2 Speech Generator AI Pipeline 🙉
|
3 |
-
emoji: 🗣️🎤🙉
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.9.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
model = dict(
|
3 |
-
type='ImageClassifier',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNet',
|
6 |
-
depth=50,
|
7 |
-
num_stages=4,
|
8 |
-
out_indices=(3, ),
|
9 |
-
style='pytorch'),
|
10 |
-
neck=dict(type='GlobalAveragePooling'),
|
11 |
-
head=dict(
|
12 |
-
type='LinearClsHead',
|
13 |
-
num_classes=1000,
|
14 |
-
in_channels=2048,
|
15 |
-
loss=dict(
|
16 |
-
type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0),
|
17 |
-
topk=(1, 5),
|
18 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py
DELETED
@@ -1,234 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from concurrent.futures import ProcessPoolExecutor
|
8 |
-
from functools import wraps
|
9 |
-
import hashlib
|
10 |
-
import logging
|
11 |
-
import typing as tp
|
12 |
-
|
13 |
-
import flashy
|
14 |
-
import flashy.distrib
|
15 |
-
import omegaconf
|
16 |
-
import torch
|
17 |
-
from torch.nn.utils.rnn import pad_sequence
|
18 |
-
|
19 |
-
|
20 |
-
logger = logging.getLogger(__name__)
|
21 |
-
|
22 |
-
|
23 |
-
def dict_from_config(cfg: omegaconf.DictConfig) -> dict:
|
24 |
-
"""Convenience function to map an omegaconf configuration to a dictionary.
|
25 |
-
|
26 |
-
Args:
|
27 |
-
cfg (omegaconf.DictConfig): Original configuration to map to dict.
|
28 |
-
Returns:
|
29 |
-
dict: Config as dictionary object.
|
30 |
-
"""
|
31 |
-
dct = omegaconf.OmegaConf.to_container(cfg, resolve=True)
|
32 |
-
assert isinstance(dct, dict)
|
33 |
-
return dct
|
34 |
-
|
35 |
-
|
36 |
-
def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset:
|
37 |
-
if max_samples >= len(dataset):
|
38 |
-
return dataset
|
39 |
-
|
40 |
-
generator = torch.Generator().manual_seed(seed)
|
41 |
-
perm = torch.randperm(len(dataset), generator=generator)
|
42 |
-
return torch.utils.data.Subset(dataset, perm[:max_samples].tolist())
|
43 |
-
|
44 |
-
|
45 |
-
def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int,
|
46 |
-
num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader:
|
47 |
-
"""Convenience function to load dataset into a dataloader with optional subset sampling.
|
48 |
-
|
49 |
-
Args:
|
50 |
-
dataset: Dataset to load.
|
51 |
-
num_samples (Optional[int]): Number of samples to limit subset size.
|
52 |
-
batch_size (int): Batch size.
|
53 |
-
num_workers (int): Number of workers for data loading.
|
54 |
-
seed (int): Random seed.
|
55 |
-
"""
|
56 |
-
if num_samples is not None:
|
57 |
-
dataset = random_subset(dataset, num_samples, seed)
|
58 |
-
|
59 |
-
dataloader = flashy.distrib.loader(
|
60 |
-
dataset,
|
61 |
-
batch_size=batch_size,
|
62 |
-
num_workers=num_workers,
|
63 |
-
**kwargs
|
64 |
-
)
|
65 |
-
return dataloader
|
66 |
-
|
67 |
-
|
68 |
-
def get_dataset_from_loader(dataloader):
|
69 |
-
dataset = dataloader.dataset
|
70 |
-
if isinstance(dataset, torch.utils.data.Subset):
|
71 |
-
return dataset.dataset
|
72 |
-
else:
|
73 |
-
return dataset
|
74 |
-
|
75 |
-
|
76 |
-
def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
|
77 |
-
"""torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
|
78 |
-
|
79 |
-
Args:
|
80 |
-
input (torch.Tensor): The input tensor containing probabilities.
|
81 |
-
num_samples (int): Number of samples to draw.
|
82 |
-
replacement (bool): Whether to draw with replacement or not.
|
83 |
-
Keywords args:
|
84 |
-
generator (torch.Generator): A pseudorandom number generator for sampling.
|
85 |
-
Returns:
|
86 |
-
torch.Tensor: Last dimension contains num_samples indices
|
87 |
-
sampled from the multinomial probability distribution
|
88 |
-
located in the last dimension of tensor input.
|
89 |
-
"""
|
90 |
-
input_ = input.reshape(-1, input.shape[-1])
|
91 |
-
output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
|
92 |
-
output = output_.reshape(*list(input.shape[:-1]), -1)
|
93 |
-
return output
|
94 |
-
|
95 |
-
|
96 |
-
def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
|
97 |
-
"""Sample next token from top K values along the last dimension of the input probs tensor.
|
98 |
-
|
99 |
-
Args:
|
100 |
-
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
|
101 |
-
k (int): The k in “top-k”.
|
102 |
-
Returns:
|
103 |
-
torch.Tensor: Sampled tokens.
|
104 |
-
"""
|
105 |
-
top_k_value, _ = torch.topk(probs, k, dim=-1)
|
106 |
-
min_value_top_k = top_k_value[..., [-1]]
|
107 |
-
probs *= (probs >= min_value_top_k).float()
|
108 |
-
probs.div_(probs.sum(dim=-1, keepdim=True))
|
109 |
-
next_token = multinomial(probs, num_samples=1)
|
110 |
-
return next_token
|
111 |
-
|
112 |
-
|
113 |
-
def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
|
114 |
-
"""Sample next token from top P probabilities along the last dimension of the input probs tensor.
|
115 |
-
|
116 |
-
Args:
|
117 |
-
probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
|
118 |
-
p (int): The p in “top-p”.
|
119 |
-
Returns:
|
120 |
-
torch.Tensor: Sampled tokens.
|
121 |
-
"""
|
122 |
-
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
|
123 |
-
probs_sum = torch.cumsum(probs_sort, dim=-1)
|
124 |
-
mask = probs_sum - probs_sort > p
|
125 |
-
probs_sort *= (~mask).float()
|
126 |
-
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
|
127 |
-
next_token = multinomial(probs_sort, num_samples=1)
|
128 |
-
next_token = torch.gather(probs_idx, -1, next_token)
|
129 |
-
return next_token
|
130 |
-
|
131 |
-
|
132 |
-
class DummyPoolExecutor:
|
133 |
-
"""Dummy pool executor to use when we actually have only 1 worker.
|
134 |
-
(e.g. instead of ProcessPoolExecutor).
|
135 |
-
"""
|
136 |
-
class DummyResult:
|
137 |
-
def __init__(self, func, *args, **kwargs):
|
138 |
-
self.func = func
|
139 |
-
self.args = args
|
140 |
-
self.kwargs = kwargs
|
141 |
-
|
142 |
-
def result(self):
|
143 |
-
return self.func(*self.args, **self.kwargs)
|
144 |
-
|
145 |
-
def __init__(self, workers, mp_context=None):
|
146 |
-
pass
|
147 |
-
|
148 |
-
def submit(self, func, *args, **kwargs):
|
149 |
-
return DummyPoolExecutor.DummyResult(func, *args, **kwargs)
|
150 |
-
|
151 |
-
def __enter__(self):
|
152 |
-
return self
|
153 |
-
|
154 |
-
def __exit__(self, exc_type, exc_value, exc_tb):
|
155 |
-
return
|
156 |
-
|
157 |
-
|
158 |
-
def get_pool_executor(num_workers: int, mp_context=None):
|
159 |
-
return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1)
|
160 |
-
|
161 |
-
|
162 |
-
def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:
|
163 |
-
"""Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).
|
164 |
-
For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]
|
165 |
-
|
166 |
-
Args:
|
167 |
-
lengths (torch.Tensor): tensor with lengths
|
168 |
-
max_len (int): can set the max length manually. Defaults to None.
|
169 |
-
Returns:
|
170 |
-
torch.Tensor: mask with 0s where there is pad tokens else 1s
|
171 |
-
"""
|
172 |
-
assert len(lengths.shape) == 1, "Length shape should be 1 dimensional."
|
173 |
-
final_length = lengths.max().item() if not max_len else max_len
|
174 |
-
final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor
|
175 |
-
return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None]
|
176 |
-
|
177 |
-
|
178 |
-
def hash_trick(word: str, vocab_size: int) -> int:
|
179 |
-
"""Hash trick to pair each word with an index
|
180 |
-
|
181 |
-
Args:
|
182 |
-
word (str): word we wish to convert to an index
|
183 |
-
vocab_size (int): size of the vocabulary
|
184 |
-
Returns:
|
185 |
-
int: index of the word in the embedding LUT
|
186 |
-
"""
|
187 |
-
hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16)
|
188 |
-
return hash % vocab_size
|
189 |
-
|
190 |
-
|
191 |
-
def with_rank_rng(base_seed: int = 1234):
|
192 |
-
"""Decorator for a function so that the function will use a Random Number Generator
|
193 |
-
whose state depend on the GPU rank. The original RNG state is restored upon returning.
|
194 |
-
|
195 |
-
Args:
|
196 |
-
base_seed (int): Random seed.
|
197 |
-
"""
|
198 |
-
def _decorator(fun: tp.Callable):
|
199 |
-
@wraps(fun)
|
200 |
-
def _decorated(*args, **kwargs):
|
201 |
-
state = torch.get_rng_state()
|
202 |
-
seed = base_seed ^ flashy.distrib.rank()
|
203 |
-
torch.manual_seed(seed)
|
204 |
-
logger.debug('Rank dependent seed set to %d', seed)
|
205 |
-
try:
|
206 |
-
return fun(*args, **kwargs)
|
207 |
-
finally:
|
208 |
-
torch.set_rng_state(state)
|
209 |
-
logger.debug('RNG state restored.')
|
210 |
-
return _decorated
|
211 |
-
return _decorator
|
212 |
-
|
213 |
-
|
214 |
-
def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:
|
215 |
-
"""Get a list of tensors and collate them to a single tensor. according to the following logic:
|
216 |
-
- `dim` specifies the time dimension which will be stacked and padded.
|
217 |
-
- The output will contain 1 new dimension (dimension index 0) which will be the size of
|
218 |
-
of the original list.
|
219 |
-
|
220 |
-
Args:
|
221 |
-
tensors (tp.List[torch.Tensor]): List of tensors to collate.
|
222 |
-
dim (int): Dimension which will be stacked and padded.
|
223 |
-
Returns:
|
224 |
-
tp.Tuple[torch.Tensor, torch.Tensor]:
|
225 |
-
torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension
|
226 |
-
(dimension index 0) which will be the size of the original list.
|
227 |
-
torch.Tensor: Tensor containing length of original tensor sizes (without padding).
|
228 |
-
"""
|
229 |
-
tensors = [x.transpose(0, dim) for x in tensors]
|
230 |
-
lens = torch.LongTensor([len(x) for x in tensors])
|
231 |
-
padded_tensors = pad_sequence(tensors)
|
232 |
-
padded_tensors = padded_tensors.transpose(0, 1)
|
233 |
-
padded_tensors = padded_tensors.transpose(1, dim + 1)
|
234 |
-
return padded_tensors, lens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import time, hashlib, random
|
4 |
-
|
5 |
-
from ..typing import AsyncGenerator
|
6 |
-
from ..requests import StreamSession
|
7 |
-
from .base_provider import AsyncGeneratorProvider
|
8 |
-
|
9 |
-
domains = [
|
10 |
-
'https://k.aifree.site',
|
11 |
-
'https://p.aifree.site'
|
12 |
-
]
|
13 |
-
|
14 |
-
class FreeGpt(AsyncGeneratorProvider):
|
15 |
-
url = "https://freegpts1.aifree.site/"
|
16 |
-
supports_gpt_35_turbo = True
|
17 |
-
working = True
|
18 |
-
|
19 |
-
@classmethod
|
20 |
-
async def create_async_generator(
|
21 |
-
cls,
|
22 |
-
model: str,
|
23 |
-
messages: list[dict[str, str]],
|
24 |
-
timeout: int = 30,
|
25 |
-
**kwargs
|
26 |
-
) -> AsyncGenerator:
|
27 |
-
async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
|
28 |
-
prompt = messages[-1]["content"]
|
29 |
-
timestamp = int(time.time())
|
30 |
-
data = {
|
31 |
-
"messages": messages,
|
32 |
-
"time": timestamp,
|
33 |
-
"pass": None,
|
34 |
-
"sign": generate_signature(timestamp, prompt)
|
35 |
-
}
|
36 |
-
url = random.choice(domains)
|
37 |
-
async with session.post(f"{url}/api/generate", json=data) as response:
|
38 |
-
response.raise_for_status()
|
39 |
-
async for chunk in response.iter_content():
|
40 |
-
yield chunk.decode()
|
41 |
-
|
42 |
-
@classmethod
|
43 |
-
@property
|
44 |
-
def params(cls):
|
45 |
-
params = [
|
46 |
-
("model", "str"),
|
47 |
-
("messages", "list[dict[str, str]]"),
|
48 |
-
("stream", "bool"),
|
49 |
-
]
|
50 |
-
param = ", ".join([": ".join(p) for p in params])
|
51 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
52 |
-
|
53 |
-
def generate_signature(timestamp: int, message: str, secret: str = ""):
|
54 |
-
data = f"{timestamp}:{message}:{secret}"
|
55 |
-
return hashlib.sha256(data.encode()).hexdigest()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
# How to contribute
|
2 |
-
|
3 |
-
It's important to us that you feel you can contribute towards the evolution of Phaser. This can take many forms: from helping to fix bugs or improve the docs, to adding in new features to the source. This guide should help you in making that process as smooth as possible.
|
4 |
-
|
5 |
-
Before contributing, please read the [code of conduct](https://github.com/photonstorm/phaser/blob/master/.github/CODE_OF_CONDUCT.md).
|
6 |
-
|
7 |
-
## Reporting issues
|
8 |
-
|
9 |
-
[GitHub Issues][0] is the place to report bugs you may have found. When submitting a bug please do the following:
|
10 |
-
|
11 |
-
**1. Search for existing issues.** Your bug may have already been fixed, or cannot, or will not, be fixed. So be sure to search the issues first before putting in a duplicate issue.
|
12 |
-
|
13 |
-
**2. Not sure if it's a bug?.** Please ask on the [forum][4]. If something is blatantly wrong then post it to GitHub. But if you feel it might just be because you're not sure of expected behavior, then it might save us time, and get you a response faster, if you post it to the Phaser forum instead.
|
14 |
-
|
15 |
-
**3. Create an isolated and reproducible test case.** If you are reporting a bug, make sure you also have a minimal, runnable, code example that reproduces the problem you have.
|
16 |
-
|
17 |
-
**4. Include a live example.** After narrowing your code down to only the problem areas, make use of [jsFiddle][1], [jsBin][2], [CodePen][5], or a link to your live site so that we can view a live example of the problem.
|
18 |
-
|
19 |
-
**5. Share as much information as possible.** Include browser version affected, your OS, version of the library, steps to reproduce, etc. "X isn't working!!!1!" will probably just be closed.
|
20 |
-
|
21 |
-
## Support Forum
|
22 |
-
|
23 |
-
We have a very active [Phaser Support Forum][4]. If you need general support, or are struggling to understand how to do something or need your code checked over, then we would urge you to post it to our forum. There are a lot of friendly devs in there who can help, as well as the core Phaser team, so it's a great place to get support. You're welcome to report bugs directly on GitHub, but for general support we'd always recommend using the forum first.
|
24 |
-
|
25 |
-
## Making Changes
|
26 |
-
|
27 |
-
I'm assuming you already have a recent version of [Node](https://nodejs.org) installed locally and can run `npm`. This guide is tested and works on both Windows 10 and OS X.
|
28 |
-
|
29 |
-
### 1. Checkout the repos
|
30 |
-
|
31 |
-
Check-out both the [Phaser repo](https://github.com/photonstorm/phaser) and the [Phaser 3 Examples Repo](https://github.com/photonstorm/phaser3-examples). Make sure the Phaser 3 Examples repo is saved locally in a folder called `phaser3-examples`, which will be the default for most Git clients.
|
32 |
-
|
33 |
-
### 2. Matching Directory Levels
|
34 |
-
|
35 |
-
Ensure that both repos live at the same depth in your directory structure. For example: `/usr/home/web/phaser` and `/usr/home/web/phaser3-examples`. This is so the dev build scripts in the Phaser repo can safely copy files to `../phaser3-examples` and have them end up in the correct place.
|
36 |
-
|
37 |
-
### 3. Install dependencies
|
38 |
-
|
39 |
-
Using your console, run `npm install` or `yarn install` as we've configs for both. This process will install a local copy of webpack and a handful of small support scripts. Note that Yarn on Windows seems to have issues making some packages global, so stick with npm if this is the case.
|
40 |
-
|
41 |
-
### 4. Webpack
|
42 |
-
|
43 |
-
Making sure you've got both repos checked out, and at the same directory level in your filesystem, issue the command `webpack`. If you can't issue the command then webpack may need [installing globally](https://webpack.js.org/guides/installation/). Webpack will build Phaser and if there are any path errors in the code they'll be flagged during the build process.
|
44 |
-
|
45 |
-
What you need is the ability to issue the command `webpack` within the v3 folder and have it work.
|
46 |
-
|
47 |
-
### 5. ESLint
|
48 |
-
|
49 |
-
There is an ESLint configuration and an Editor Configuration in the v3 folder. **Please adhere to them!** Although not enforced in the build process yet, I will be adding that at a later point. There are lots of tools you can install so your editor of choice will check the ESLint config during development.
|
50 |
-
|
51 |
-
To test if your code passes our lint config issue the command `npm run lint`.
|
52 |
-
|
53 |
-
## Coding style preferences are not contributions
|
54 |
-
|
55 |
-
If your PR is doing little more than changing the Phaser source code into a format / coding style that you prefer then we will automatically close it. All PRs must adhere to the coding style already set-out across the thousands of lines of code in Phaser. Your personal preferences for how things should "look" or be structured do not apply here, sorry. PRs should fix bugs, fix documentation or add features. No changes for the sake of change.
|
56 |
-
|
57 |
-
## I don't really like git / node.js, but I can fix this bug
|
58 |
-
|
59 |
-
That is fine too. While Pull Requests are the best thing in the world for us, they are not the only way to help. You're welcome to post fixes to our forum or even just email them to us. All we ask is that you still adhere to the guidelines presented here re: ESLint, etc.
|
60 |
-
|
61 |
-
## Code Style Guide
|
62 |
-
|
63 |
-
We provide an .editorconfig and eslint config for you to use, but generally:
|
64 |
-
|
65 |
-
- Use 4 spaces for tabs, never tab characters.
|
66 |
-
|
67 |
-
- No trailing whitespace, blank lines should have no whitespace.
|
68 |
-
|
69 |
-
- Always favor strict equals `===` unless you *need* to use type coercion.
|
70 |
-
|
71 |
-
- Follow conventions already in the code, and listen to eslint. Our config is set-up for a reason.
|
72 |
-
|
73 |
-
Thanks to Chad for creating the original Pixi.js Contributing file which we adapted for Phaser.
|
74 |
-
|
75 |
-
[0]: https://github.com/photonstorm/phaser/issues
|
76 |
-
[1]: http://jsfiddle.net
|
77 |
-
[2]: http://jsbin.com/
|
78 |
-
[3]: http://nodejs.org
|
79 |
-
[4]: https://phaser.discourse.group/
|
80 |
-
[5]: https://codepen.io/pen?template=YeEWom "Phaser 3 game template"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import Base from '../base/Base.js';
|
2 |
-
import { Circle } from '../utils/Geoms.js'
|
3 |
-
import Yoyo from '../utils/Yoyo.js';
|
4 |
-
|
5 |
-
const Linear = Phaser.Math.Linear;
|
6 |
-
|
7 |
-
class Ball extends Base {
|
8 |
-
constructor(scene, config) {
|
9 |
-
super(scene, config);
|
10 |
-
this.type = 'rexSpinnerBall';
|
11 |
-
}
|
12 |
-
|
13 |
-
buildShapes() {
|
14 |
-
for (var i = 0; i < 3; i++) {
|
15 |
-
this.addShape(new Circle());
|
16 |
-
}
|
17 |
-
}
|
18 |
-
|
19 |
-
updateShapes() {
|
20 |
-
var centerX = this.centerX;
|
21 |
-
var centerY = this.centerY;
|
22 |
-
var radius = this.radius;
|
23 |
-
var ballRadius = radius * 0.1;
|
24 |
-
var lineWidth = Math.ceil(ballRadius * 0.25);
|
25 |
-
|
26 |
-
var t = 1 - Yoyo(this.value);
|
27 |
-
var trackRadius = Linear(0.3, 0.9, t) * radius;
|
28 |
-
|
29 |
-
var shapes = this.getShapes();
|
30 |
-
for (var i = 0, cnt = shapes.length; i < cnt; i++) {
|
31 |
-
var ball = shapes[i];
|
32 |
-
var t = (this.value + (i / cnt)) % 1;
|
33 |
-
var angle = Math.PI * 2 * t;
|
34 |
-
ball
|
35 |
-
.lineStyle(lineWidth, this.color)
|
36 |
-
.setRadius(ballRadius)
|
37 |
-
.setCenterPosition(
|
38 |
-
centerX + Math.cos(angle) * trackRadius,
|
39 |
-
centerY + Math.sin(angle) * trackRadius
|
40 |
-
);
|
41 |
-
}
|
42 |
-
}
|
43 |
-
}
|
44 |
-
|
45 |
-
export default Ball;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import numpy as np
|
3 |
-
from PIL import Image
|
4 |
-
from skimage import io, img_as_float32, transform
|
5 |
-
import torch
|
6 |
-
import scipy.io as scio
|
7 |
-
|
8 |
-
def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path,
|
9 |
-
batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None,
|
10 |
-
expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):
|
11 |
-
|
12 |
-
semantic_radius = 13
|
13 |
-
video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]
|
14 |
-
txt_path = os.path.splitext(coeff_path)[0]
|
15 |
-
|
16 |
-
data={}
|
17 |
-
|
18 |
-
img1 = Image.open(pic_path)
|
19 |
-
source_image = np.array(img1)
|
20 |
-
source_image = img_as_float32(source_image)
|
21 |
-
source_image = transform.resize(source_image, (size, size, 3))
|
22 |
-
source_image = source_image.transpose((2, 0, 1))
|
23 |
-
source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)
|
24 |
-
source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)
|
25 |
-
data['source_image'] = source_image_ts
|
26 |
-
|
27 |
-
source_semantics_dict = scio.loadmat(first_coeff_path)
|
28 |
-
generated_dict = scio.loadmat(coeff_path)
|
29 |
-
|
30 |
-
if 'full' not in preprocess.lower():
|
31 |
-
source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70
|
32 |
-
generated_3dmm = generated_dict['coeff_3dmm'][:,:70]
|
33 |
-
|
34 |
-
else:
|
35 |
-
source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70
|
36 |
-
generated_3dmm = generated_dict['coeff_3dmm'][:,:70]
|
37 |
-
|
38 |
-
source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)
|
39 |
-
source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)
|
40 |
-
source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)
|
41 |
-
data['source_semantics'] = source_semantics_ts
|
42 |
-
|
43 |
-
# target
|
44 |
-
generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale
|
45 |
-
|
46 |
-
if 'full' in preprocess.lower():
|
47 |
-
generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)
|
48 |
-
|
49 |
-
if still_mode:
|
50 |
-
generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)
|
51 |
-
|
52 |
-
with open(txt_path+'.txt', 'w') as f:
|
53 |
-
for coeff in generated_3dmm:
|
54 |
-
for i in coeff:
|
55 |
-
f.write(str(i)[:7] + ' '+'\t')
|
56 |
-
f.write('\n')
|
57 |
-
|
58 |
-
target_semantics_list = []
|
59 |
-
frame_num = generated_3dmm.shape[0]
|
60 |
-
data['frame_num'] = frame_num
|
61 |
-
for frame_idx in range(frame_num):
|
62 |
-
target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)
|
63 |
-
target_semantics_list.append(target_semantics)
|
64 |
-
|
65 |
-
remainder = frame_num%batch_size
|
66 |
-
if remainder!=0:
|
67 |
-
for _ in range(batch_size-remainder):
|
68 |
-
target_semantics_list.append(target_semantics)
|
69 |
-
|
70 |
-
target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1
|
71 |
-
target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])
|
72 |
-
data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)
|
73 |
-
data['video_name'] = video_name
|
74 |
-
data['audio_path'] = audio_path
|
75 |
-
|
76 |
-
if input_yaw_list is not None:
|
77 |
-
yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)
|
78 |
-
data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)
|
79 |
-
if input_pitch_list is not None:
|
80 |
-
pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)
|
81 |
-
data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)
|
82 |
-
if input_roll_list is not None:
|
83 |
-
roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size)
|
84 |
-
data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)
|
85 |
-
|
86 |
-
return data
|
87 |
-
|
88 |
-
def transform_semantic_1(semantic, semantic_radius):
|
89 |
-
semantic_list = [semantic for i in range(0, semantic_radius*2+1)]
|
90 |
-
coeff_3dmm = np.concatenate(semantic_list, 0)
|
91 |
-
return coeff_3dmm.transpose(1,0)
|
92 |
-
|
93 |
-
def transform_semantic_target(coeff_3dmm, frame_index, semantic_radius):
|
94 |
-
num_frames = coeff_3dmm.shape[0]
|
95 |
-
seq = list(range(frame_index- semantic_radius, frame_index + semantic_radius+1))
|
96 |
-
index = [ min(max(item, 0), num_frames-1) for item in seq ]
|
97 |
-
coeff_3dmm_g = coeff_3dmm[index, :]
|
98 |
-
return coeff_3dmm_g.transpose(1,0)
|
99 |
-
|
100 |
-
def gen_camera_pose(camera_degree_list, frame_num, batch_size):
|
101 |
-
|
102 |
-
new_degree_list = []
|
103 |
-
if len(camera_degree_list) == 1:
|
104 |
-
for _ in range(frame_num):
|
105 |
-
new_degree_list.append(camera_degree_list[0])
|
106 |
-
remainder = frame_num%batch_size
|
107 |
-
if remainder!=0:
|
108 |
-
for _ in range(batch_size-remainder):
|
109 |
-
new_degree_list.append(new_degree_list[-1])
|
110 |
-
new_degree_np = np.array(new_degree_list).reshape(batch_size, -1)
|
111 |
-
return new_degree_np
|
112 |
-
|
113 |
-
degree_sum = 0.
|
114 |
-
for i, degree in enumerate(camera_degree_list[1:]):
|
115 |
-
degree_sum += abs(degree-camera_degree_list[i])
|
116 |
-
|
117 |
-
degree_per_frame = degree_sum/(frame_num-1)
|
118 |
-
for i, degree in enumerate(camera_degree_list[1:]):
|
119 |
-
degree_last = camera_degree_list[i]
|
120 |
-
degree_step = degree_per_frame * abs(degree-degree_last)/(degree-degree_last)
|
121 |
-
new_degree_list = new_degree_list + list(np.arange(degree_last, degree, degree_step))
|
122 |
-
if len(new_degree_list) > frame_num:
|
123 |
-
new_degree_list = new_degree_list[:frame_num]
|
124 |
-
elif len(new_degree_list) < frame_num:
|
125 |
-
for _ in range(frame_num-len(new_degree_list)):
|
126 |
-
new_degree_list.append(new_degree_list[-1])
|
127 |
-
print(len(new_degree_list))
|
128 |
-
print(frame_num)
|
129 |
-
|
130 |
-
remainder = frame_num%batch_size
|
131 |
-
if remainder!=0:
|
132 |
-
for _ in range(batch_size-remainder):
|
133 |
-
new_degree_list.append(new_degree_list[-1])
|
134 |
-
new_degree_np = np.array(new_degree_list).reshape(batch_size, -1)
|
135 |
-
return new_degree_np
|
136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py
DELETED
@@ -1,981 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Network architectures from the paper
|
10 |
-
"Analyzing and Improving the Image Quality of StyleGAN".
|
11 |
-
Matches the original implementation of configs E-F by Karras et al. at
|
12 |
-
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
|
13 |
-
|
14 |
-
import numpy as np
|
15 |
-
import torch
|
16 |
-
import torch.nn.functional as F
|
17 |
-
from torch_utils import misc
|
18 |
-
from torch_utils import persistence
|
19 |
-
from torch_utils.ops import conv2d_resample
|
20 |
-
from torch_utils.ops import upfirdn2d
|
21 |
-
from torch_utils.ops import bias_act
|
22 |
-
from torch_utils.ops import fma
|
23 |
-
|
24 |
-
# ----------------------------------------------------------------------------
|
25 |
-
|
26 |
-
|
27 |
-
@misc.profiled_function
|
28 |
-
def normalize_2nd_moment(x, dim=1, eps=1e-8):
|
29 |
-
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
|
30 |
-
|
31 |
-
# ----------------------------------------------------------------------------
|
32 |
-
|
33 |
-
|
34 |
-
@misc.profiled_function
|
35 |
-
def modulated_conv2d(
|
36 |
-
# Input tensor of shape [batch_size, in_channels, in_height, in_width].
|
37 |
-
x,
|
38 |
-
# Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
|
39 |
-
weight,
|
40 |
-
# Modulation coefficients of shape [batch_size, in_channels].
|
41 |
-
styles,
|
42 |
-
noise=None, # Optional noise tensor to add to the output activations.
|
43 |
-
up=1, # Integer upsampling factor.
|
44 |
-
down=1, # Integer downsampling factor.
|
45 |
-
padding=0, # Padding with respect to the upsampled image.
|
46 |
-
# Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
|
47 |
-
resample_filter=None,
|
48 |
-
demodulate=True, # Apply weight demodulation?
|
49 |
-
# False = convolution, True = correlation (matches torch.nn.functional.conv2d).
|
50 |
-
flip_weight=True,
|
51 |
-
# Perform modulation, convolution, and demodulation as a single fused operation?
|
52 |
-
fused_modconv=True,
|
53 |
-
):
|
54 |
-
batch_size = x.shape[0]
|
55 |
-
out_channels, in_channels, kh, kw = weight.shape
|
56 |
-
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
|
57 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
|
58 |
-
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
|
59 |
-
|
60 |
-
# Pre-normalize inputs to avoid FP16 overflow.
|
61 |
-
if x.dtype == torch.float16 and demodulate:
|
62 |
-
weight = weight * (1 / np.sqrt(in_channels * kh * kw) /
|
63 |
-
weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk
|
64 |
-
styles = styles / \
|
65 |
-
styles.norm(float('inf'), dim=1, keepdim=True) # max_I
|
66 |
-
|
67 |
-
# Calculate per-sample weights and demodulation coefficients.
|
68 |
-
w = None
|
69 |
-
dcoefs = None
|
70 |
-
if demodulate or fused_modconv:
|
71 |
-
w = weight.unsqueeze(0) # [NOIkk]
|
72 |
-
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
|
73 |
-
if demodulate:
|
74 |
-
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
|
75 |
-
if demodulate and fused_modconv:
|
76 |
-
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
|
77 |
-
|
78 |
-
# Execute by scaling the activations before and after the convolution.
|
79 |
-
if not fused_modconv:
|
80 |
-
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
81 |
-
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(
|
82 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
|
83 |
-
if demodulate and noise is not None:
|
84 |
-
x = fma.fma(x, dcoefs.to(x.dtype).reshape(
|
85 |
-
batch_size, -1, 1, 1), noise.to(x.dtype))
|
86 |
-
elif demodulate:
|
87 |
-
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
|
88 |
-
elif noise is not None:
|
89 |
-
x = x.add_(noise.to(x.dtype))
|
90 |
-
return x
|
91 |
-
|
92 |
-
# Execute as one fused op using grouped convolution.
|
93 |
-
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
94 |
-
batch_size = int(batch_size)
|
95 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None])
|
96 |
-
x = x.reshape(1, -1, *x.shape[2:])
|
97 |
-
w = w.reshape(-1, in_channels, kh, kw)
|
98 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
99 |
-
x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
|
100 |
-
x = x.reshape(batch_size, -1, *x.shape[2:])
|
101 |
-
if noise is not None:
|
102 |
-
x = x.add_(noise)
|
103 |
-
return x
|
104 |
-
|
105 |
-
# ----------------------------------------------------------------------------
|
106 |
-
|
107 |
-
|
108 |
-
@persistence.persistent_class
|
109 |
-
class FullyConnectedLayer(torch.nn.Module):
|
110 |
-
def __init__(self,
|
111 |
-
in_features, # Number of input features.
|
112 |
-
out_features, # Number of output features.
|
113 |
-
bias=True, # Apply additive bias before the activation function?
|
114 |
-
# Activation function: 'relu', 'lrelu', etc.
|
115 |
-
activation='linear',
|
116 |
-
lr_multiplier=1, # Learning rate multiplier.
|
117 |
-
bias_init=0, # Initial value for the additive bias.
|
118 |
-
):
|
119 |
-
super().__init__()
|
120 |
-
self.in_features = in_features
|
121 |
-
self.out_features = out_features
|
122 |
-
self.activation = activation
|
123 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
124 |
-
[out_features, in_features]) / lr_multiplier)
|
125 |
-
self.bias = torch.nn.Parameter(torch.full(
|
126 |
-
[out_features], np.float32(bias_init))) if bias else None
|
127 |
-
self.weight_gain = lr_multiplier / np.sqrt(in_features)
|
128 |
-
self.bias_gain = lr_multiplier
|
129 |
-
|
130 |
-
def forward(self, x):
|
131 |
-
w = self.weight.to(x.dtype) * self.weight_gain
|
132 |
-
b = self.bias
|
133 |
-
if b is not None:
|
134 |
-
b = b.to(x.dtype)
|
135 |
-
if self.bias_gain != 1:
|
136 |
-
b = b * self.bias_gain
|
137 |
-
|
138 |
-
if self.activation == 'linear' and b is not None:
|
139 |
-
x = torch.addmm(b.unsqueeze(0), x, w.t())
|
140 |
-
else:
|
141 |
-
x = x.matmul(w.t())
|
142 |
-
x = bias_act.bias_act(x, b, act=self.activation)
|
143 |
-
return x
|
144 |
-
|
145 |
-
def extra_repr(self):
|
146 |
-
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
|
147 |
-
|
148 |
-
# ----------------------------------------------------------------------------
|
149 |
-
|
150 |
-
|
151 |
-
@persistence.persistent_class
|
152 |
-
class Conv2dLayer(torch.nn.Module):
|
153 |
-
def __init__(self,
|
154 |
-
in_channels, # Number of input channels.
|
155 |
-
out_channels, # Number of output channels.
|
156 |
-
# Width and height of the convolution kernel.
|
157 |
-
kernel_size,
|
158 |
-
bias=True, # Apply additive bias before the activation function?
|
159 |
-
# Activation function: 'relu', 'lrelu', etc.
|
160 |
-
activation='linear',
|
161 |
-
up=1, # Integer upsampling factor.
|
162 |
-
down=1, # Integer downsampling factor.
|
163 |
-
# Low-pass filter to apply when resampling activations.
|
164 |
-
resample_filter=[1, 3, 3, 1],
|
165 |
-
# Clamp the output to +-X, None = disable clamping.
|
166 |
-
conv_clamp=None,
|
167 |
-
channels_last=False, # Expect the input to have memory_format=channels_last?
|
168 |
-
trainable=True, # Update the weights of this layer during training?
|
169 |
-
):
|
170 |
-
super().__init__()
|
171 |
-
self.in_channels = in_channels
|
172 |
-
self.out_channels = out_channels
|
173 |
-
self.activation = activation
|
174 |
-
self.up = up
|
175 |
-
self.down = down
|
176 |
-
self.conv_clamp = conv_clamp
|
177 |
-
self.register_buffer(
|
178 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
179 |
-
self.padding = kernel_size // 2
|
180 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
181 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
182 |
-
|
183 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
184 |
-
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
|
185 |
-
memory_format=memory_format)
|
186 |
-
bias = torch.zeros([out_channels]) if bias else None
|
187 |
-
if trainable:
|
188 |
-
self.weight = torch.nn.Parameter(weight)
|
189 |
-
self.bias = torch.nn.Parameter(bias) if bias is not None else None
|
190 |
-
else:
|
191 |
-
self.register_buffer('weight', weight)
|
192 |
-
if bias is not None:
|
193 |
-
self.register_buffer('bias', bias)
|
194 |
-
else:
|
195 |
-
self.bias = None
|
196 |
-
|
197 |
-
def forward(self, x, gain=1):
|
198 |
-
w = self.weight * self.weight_gain
|
199 |
-
b = self.bias.to(x.dtype) if self.bias is not None else None
|
200 |
-
flip_weight = (self.up == 1) # slightly faster
|
201 |
-
x = conv2d_resample.conv2d_resample(x=x, w=w.to(
|
202 |
-
x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
|
203 |
-
|
204 |
-
act_gain = self.act_gain * gain
|
205 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
206 |
-
x = bias_act.bias_act(x, b, act=self.activation,
|
207 |
-
gain=act_gain, clamp=act_clamp)
|
208 |
-
return x
|
209 |
-
|
210 |
-
def extra_repr(self):
|
211 |
-
return ' '.join([
|
212 |
-
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
|
213 |
-
f'up={self.up}, down={self.down}'])
|
214 |
-
|
215 |
-
# ----------------------------------------------------------------------------
|
216 |
-
|
217 |
-
|
218 |
-
@persistence.persistent_class
|
219 |
-
class MappingNetwork(torch.nn.Module):
|
220 |
-
def __init__(self,
|
221 |
-
# Input latent (Z) dimensionality, 0 = no latent.
|
222 |
-
z_dim,
|
223 |
-
# Conditioning label (C) dimensionality, 0 = no label.
|
224 |
-
c_dim,
|
225 |
-
# Intermediate latent (W) dimensionality.
|
226 |
-
w_dim,
|
227 |
-
# Number of intermediate latents to output, None = do not broadcast.
|
228 |
-
num_ws,
|
229 |
-
num_layers=8, # Number of mapping layers.
|
230 |
-
# Label embedding dimensionality, None = same as w_dim.
|
231 |
-
embed_features=None,
|
232 |
-
# Number of intermediate features in the mapping layers, None = same as w_dim.
|
233 |
-
layer_features=None,
|
234 |
-
# Activation function: 'relu', 'lrelu', etc.
|
235 |
-
activation='lrelu',
|
236 |
-
# Learning rate multiplier for the mapping layers.
|
237 |
-
lr_multiplier=0.01,
|
238 |
-
# Decay for tracking the moving average of W during training, None = do not track.
|
239 |
-
w_avg_beta=0.998,
|
240 |
-
):
|
241 |
-
super().__init__()
|
242 |
-
self.z_dim = z_dim
|
243 |
-
self.c_dim = c_dim
|
244 |
-
self.w_dim = w_dim
|
245 |
-
self.num_ws = num_ws
|
246 |
-
self.num_layers = num_layers
|
247 |
-
self.w_avg_beta = w_avg_beta
|
248 |
-
|
249 |
-
if embed_features is None:
|
250 |
-
embed_features = w_dim
|
251 |
-
if c_dim == 0:
|
252 |
-
embed_features = 0
|
253 |
-
if layer_features is None:
|
254 |
-
layer_features = w_dim
|
255 |
-
features_list = [z_dim + embed_features] + \
|
256 |
-
[layer_features] * (num_layers - 1) + [w_dim]
|
257 |
-
|
258 |
-
if c_dim > 0:
|
259 |
-
self.embed = FullyConnectedLayer(c_dim, embed_features)
|
260 |
-
for idx in range(num_layers):
|
261 |
-
in_features = features_list[idx]
|
262 |
-
out_features = features_list[idx + 1]
|
263 |
-
layer = FullyConnectedLayer(
|
264 |
-
in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
|
265 |
-
setattr(self, f'fc{idx}', layer)
|
266 |
-
|
267 |
-
if num_ws is not None and w_avg_beta is not None:
|
268 |
-
self.register_buffer('w_avg', torch.zeros([w_dim]))
|
269 |
-
|
270 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
|
271 |
-
# Embed, normalize, and concat inputs.
|
272 |
-
x = None
|
273 |
-
with torch.autograd.profiler.record_function('input'):
|
274 |
-
if self.z_dim > 0:
|
275 |
-
misc.assert_shape(z, [None, self.z_dim])
|
276 |
-
x = normalize_2nd_moment(z.to(torch.float32))
|
277 |
-
if self.c_dim > 0:
|
278 |
-
misc.assert_shape(c, [None, self.c_dim])
|
279 |
-
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
|
280 |
-
x = torch.cat([x, y], dim=1) if x is not None else y
|
281 |
-
|
282 |
-
# Main layers.
|
283 |
-
for idx in range(self.num_layers):
|
284 |
-
layer = getattr(self, f'fc{idx}')
|
285 |
-
x = layer(x)
|
286 |
-
|
287 |
-
# Update moving average of W.
|
288 |
-
if update_emas and self.w_avg_beta is not None:
|
289 |
-
with torch.autograd.profiler.record_function('update_w_avg'):
|
290 |
-
self.w_avg.copy_(x.detach().mean(
|
291 |
-
dim=0).lerp(self.w_avg, self.w_avg_beta))
|
292 |
-
|
293 |
-
# Broadcast.
|
294 |
-
if self.num_ws is not None:
|
295 |
-
with torch.autograd.profiler.record_function('broadcast'):
|
296 |
-
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
|
297 |
-
|
298 |
-
# Apply truncation.
|
299 |
-
if truncation_psi != 1:
|
300 |
-
with torch.autograd.profiler.record_function('truncate'):
|
301 |
-
assert self.w_avg_beta is not None
|
302 |
-
if self.num_ws is None or truncation_cutoff is None:
|
303 |
-
x = self.w_avg.lerp(x, truncation_psi)
|
304 |
-
else:
|
305 |
-
x[:, :truncation_cutoff] = self.w_avg.lerp(
|
306 |
-
x[:, :truncation_cutoff], truncation_psi)
|
307 |
-
return x
|
308 |
-
|
309 |
-
def extra_repr(self):
|
310 |
-
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
|
311 |
-
|
312 |
-
# ----------------------------------------------------------------------------
|
313 |
-
|
314 |
-
|
315 |
-
@persistence.persistent_class
|
316 |
-
class SynthesisLayer(torch.nn.Module):
|
317 |
-
def __init__(self,
|
318 |
-
in_channels, # Number of input channels.
|
319 |
-
out_channels, # Number of output channels.
|
320 |
-
# Intermediate latent (W) dimensionality.
|
321 |
-
w_dim,
|
322 |
-
resolution, # Resolution of this layer.
|
323 |
-
kernel_size=3, # Convolution kernel size.
|
324 |
-
up=1, # Integer upsampling factor.
|
325 |
-
use_noise=True, # Enable noise input?
|
326 |
-
# Activation function: 'relu', 'lrelu', etc.
|
327 |
-
activation='lrelu',
|
328 |
-
# Low-pass filter to apply when resampling activations.
|
329 |
-
resample_filter=[1, 3, 3, 1],
|
330 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
331 |
-
conv_clamp=None,
|
332 |
-
channels_last=False, # Use channels_last format for the weights?
|
333 |
-
):
|
334 |
-
super().__init__()
|
335 |
-
self.in_channels = in_channels
|
336 |
-
self.out_channels = out_channels
|
337 |
-
self.w_dim = w_dim
|
338 |
-
self.resolution = resolution
|
339 |
-
self.up = up
|
340 |
-
self.use_noise = use_noise
|
341 |
-
self.activation = activation
|
342 |
-
self.conv_clamp = conv_clamp
|
343 |
-
self.register_buffer(
|
344 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
345 |
-
self.padding = kernel_size // 2
|
346 |
-
self.act_gain = bias_act.activation_funcs[activation].def_gain
|
347 |
-
|
348 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
349 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
350 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
351 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
352 |
-
if use_noise:
|
353 |
-
self.register_buffer(
|
354 |
-
'noise_const', torch.randn([resolution, resolution]))
|
355 |
-
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
|
356 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
357 |
-
|
358 |
-
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
|
359 |
-
assert noise_mode in ['random', 'const', 'none']
|
360 |
-
in_resolution = self.resolution // self.up
|
361 |
-
misc.assert_shape(x, [None, self.in_channels,
|
362 |
-
in_resolution, in_resolution])
|
363 |
-
styles = self.affine(w)
|
364 |
-
|
365 |
-
noise = None
|
366 |
-
if self.use_noise and noise_mode == 'random':
|
367 |
-
noise = torch.randn([x.shape[0], 1, self.resolution,
|
368 |
-
self.resolution], device=x.device) * self.noise_strength
|
369 |
-
if self.use_noise and noise_mode == 'const':
|
370 |
-
noise = self.noise_const * self.noise_strength
|
371 |
-
|
372 |
-
flip_weight = (self.up == 1) # slightly faster
|
373 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
|
374 |
-
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
|
375 |
-
|
376 |
-
act_gain = self.act_gain * gain
|
377 |
-
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
|
378 |
-
x = bias_act.bias_act(x, self.bias.to(
|
379 |
-
x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
|
380 |
-
return x
|
381 |
-
|
382 |
-
def extra_repr(self):
|
383 |
-
return ' '.join([
|
384 |
-
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
|
385 |
-
f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
|
386 |
-
|
387 |
-
# ----------------------------------------------------------------------------
|
388 |
-
|
389 |
-
|
390 |
-
@persistence.persistent_class
|
391 |
-
class ToRGBLayer(torch.nn.Module):
|
392 |
-
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
|
393 |
-
super().__init__()
|
394 |
-
self.in_channels = in_channels
|
395 |
-
self.out_channels = out_channels
|
396 |
-
self.w_dim = w_dim
|
397 |
-
self.conv_clamp = conv_clamp
|
398 |
-
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
|
399 |
-
memory_format = torch.channels_last if channels_last else torch.contiguous_format
|
400 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
401 |
-
[out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
|
402 |
-
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
|
403 |
-
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
|
404 |
-
|
405 |
-
def forward(self, x, w, fused_modconv=True):
|
406 |
-
styles = self.affine(w) * self.weight_gain
|
407 |
-
x = modulated_conv2d(x=x, weight=self.weight, styles=styles,
|
408 |
-
demodulate=False, fused_modconv=fused_modconv)
|
409 |
-
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
|
410 |
-
return x
|
411 |
-
|
412 |
-
def extra_repr(self):
|
413 |
-
return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
|
414 |
-
|
415 |
-
# ----------------------------------------------------------------------------
|
416 |
-
|
417 |
-
|
418 |
-
@persistence.persistent_class
|
419 |
-
class SynthesisBlock(torch.nn.Module):
|
420 |
-
def __init__(self,
|
421 |
-
# Number of input channels, 0 = first block.
|
422 |
-
in_channels,
|
423 |
-
# Number of output channels.
|
424 |
-
out_channels,
|
425 |
-
# Intermediate latent (W) dimensionality.
|
426 |
-
w_dim,
|
427 |
-
# Resolution of this block.
|
428 |
-
resolution,
|
429 |
-
# Number of output color channels.
|
430 |
-
img_channels,
|
431 |
-
is_last, # Is this the last block?
|
432 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
433 |
-
architecture='skip',
|
434 |
-
# Low-pass filter to apply when resampling activations.
|
435 |
-
resample_filter=[1, 3, 3, 1],
|
436 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
437 |
-
conv_clamp=256,
|
438 |
-
use_fp16=False, # Use FP16 for this block?
|
439 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
440 |
-
# Default value of fused_modconv. 'inference_only' = True for inference, False for training.
|
441 |
-
fused_modconv_default=True,
|
442 |
-
# Arguments for SynthesisLayer.
|
443 |
-
**layer_kwargs,
|
444 |
-
):
|
445 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
446 |
-
super().__init__()
|
447 |
-
self.in_channels = in_channels
|
448 |
-
self.w_dim = w_dim
|
449 |
-
self.resolution = resolution
|
450 |
-
self.img_channels = img_channels
|
451 |
-
self.is_last = is_last
|
452 |
-
self.architecture = architecture
|
453 |
-
self.use_fp16 = use_fp16
|
454 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
455 |
-
self.fused_modconv_default = fused_modconv_default
|
456 |
-
self.register_buffer(
|
457 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
458 |
-
self.num_conv = 0
|
459 |
-
self.num_torgb = 0
|
460 |
-
|
461 |
-
if in_channels == 0:
|
462 |
-
self.const = torch.nn.Parameter(torch.randn(
|
463 |
-
[out_channels, resolution, resolution]))
|
464 |
-
|
465 |
-
if in_channels != 0:
|
466 |
-
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
|
467 |
-
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
|
468 |
-
self.num_conv += 1
|
469 |
-
|
470 |
-
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
|
471 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
|
472 |
-
self.num_conv += 1
|
473 |
-
|
474 |
-
if is_last or architecture == 'skip':
|
475 |
-
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
|
476 |
-
conv_clamp=conv_clamp, channels_last=self.channels_last)
|
477 |
-
self.num_torgb += 1
|
478 |
-
|
479 |
-
if in_channels != 0 and architecture == 'resnet':
|
480 |
-
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
|
481 |
-
resample_filter=resample_filter, channels_last=self.channels_last)
|
482 |
-
|
483 |
-
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
|
484 |
-
_ = update_emas # unused
|
485 |
-
misc.assert_shape(
|
486 |
-
ws, [None, self.num_conv + self.num_torgb, self.w_dim])
|
487 |
-
w_iter = iter(ws.unbind(dim=1))
|
488 |
-
if ws.device.type != 'cuda':
|
489 |
-
force_fp32 = True
|
490 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
491 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
492 |
-
if fused_modconv is None:
|
493 |
-
fused_modconv = self.fused_modconv_default
|
494 |
-
if fused_modconv == 'inference_only':
|
495 |
-
fused_modconv = (not self.training)
|
496 |
-
|
497 |
-
# Input.
|
498 |
-
if self.in_channels == 0:
|
499 |
-
x = self.const.to(dtype=dtype, memory_format=memory_format)
|
500 |
-
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
|
501 |
-
else:
|
502 |
-
misc.assert_shape(x, [None, self.in_channels,
|
503 |
-
self.resolution // 2, self.resolution // 2])
|
504 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
505 |
-
|
506 |
-
# Main layers.
|
507 |
-
if self.in_channels == 0:
|
508 |
-
x = self.conv1(x, next(w_iter),
|
509 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
510 |
-
elif self.architecture == 'resnet':
|
511 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
512 |
-
x = self.conv0(x, next(w_iter),
|
513 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
514 |
-
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv,
|
515 |
-
gain=np.sqrt(0.5), **layer_kwargs)
|
516 |
-
x = y.add_(x)
|
517 |
-
else:
|
518 |
-
x = self.conv0(x, next(w_iter),
|
519 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
520 |
-
x = self.conv1(x, next(w_iter),
|
521 |
-
fused_modconv=fused_modconv, **layer_kwargs)
|
522 |
-
|
523 |
-
# ToRGB.
|
524 |
-
if img is not None:
|
525 |
-
misc.assert_shape(
|
526 |
-
img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
|
527 |
-
img = upfirdn2d.upsample2d(img, self.resample_filter)
|
528 |
-
if self.is_last or self.architecture == 'skip':
|
529 |
-
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
|
530 |
-
y = y.to(dtype=torch.float32,
|
531 |
-
memory_format=torch.contiguous_format)
|
532 |
-
img = img.add_(y) if img is not None else y
|
533 |
-
|
534 |
-
assert x.dtype == dtype
|
535 |
-
assert img is None or img.dtype == torch.float32
|
536 |
-
return x, img
|
537 |
-
|
538 |
-
def extra_repr(self):
|
539 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
540 |
-
|
541 |
-
# ----------------------------------------------------------------------------
|
542 |
-
|
543 |
-
|
544 |
-
@persistence.persistent_class
|
545 |
-
class SynthesisNetwork(torch.nn.Module):
|
546 |
-
def __init__(self,
|
547 |
-
# Intermediate latent (W) dimensionality.
|
548 |
-
w_dim,
|
549 |
-
img_resolution, # Output image resolution.
|
550 |
-
img_channels, # Number of color channels.
|
551 |
-
# Overall multiplier for the number of channels.
|
552 |
-
channel_base=32768,
|
553 |
-
# Maximum number of channels in any layer.
|
554 |
-
channel_max=512,
|
555 |
-
# Use FP16 for the N highest resolutions.
|
556 |
-
num_fp16_res=4,
|
557 |
-
**block_kwargs, # Arguments for SynthesisBlock.
|
558 |
-
):
|
559 |
-
assert img_resolution >= 4 and img_resolution & (
|
560 |
-
img_resolution - 1) == 0
|
561 |
-
super().__init__()
|
562 |
-
self.w_dim = w_dim
|
563 |
-
self.img_resolution = img_resolution
|
564 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
565 |
-
self.img_channels = img_channels
|
566 |
-
self.num_fp16_res = num_fp16_res
|
567 |
-
self.block_resolutions = [
|
568 |
-
2 ** i for i in range(2, self.img_resolution_log2 + 1)]
|
569 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
570 |
-
for res in self.block_resolutions}
|
571 |
-
fp16_resolution = max(
|
572 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
573 |
-
|
574 |
-
self.num_ws = 0
|
575 |
-
for res in self.block_resolutions:
|
576 |
-
in_channels = channels_dict[res // 2] if res > 4 else 0
|
577 |
-
out_channels = channels_dict[res]
|
578 |
-
use_fp16 = (res >= fp16_resolution)
|
579 |
-
is_last = (res == self.img_resolution)
|
580 |
-
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
|
581 |
-
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
|
582 |
-
self.num_ws += block.num_conv
|
583 |
-
if is_last:
|
584 |
-
self.num_ws += block.num_torgb
|
585 |
-
setattr(self, f'b{res}', block)
|
586 |
-
|
587 |
-
def forward(self, ws, return_feature=False, **block_kwargs):
|
588 |
-
block_ws = []
|
589 |
-
features = []
|
590 |
-
with torch.autograd.profiler.record_function('split_ws'):
|
591 |
-
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
|
592 |
-
ws = ws.to(torch.float32)
|
593 |
-
w_idx = 0
|
594 |
-
for res in self.block_resolutions:
|
595 |
-
block = getattr(self, f'b{res}')
|
596 |
-
block_ws.append(
|
597 |
-
ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
|
598 |
-
w_idx += block.num_conv
|
599 |
-
|
600 |
-
x = img = None
|
601 |
-
for res, cur_ws in zip(self.block_resolutions, block_ws):
|
602 |
-
block = getattr(self, f'b{res}')
|
603 |
-
x, img = block(x, img, cur_ws, **block_kwargs)
|
604 |
-
features.append(x)
|
605 |
-
if return_feature:
|
606 |
-
return img, features
|
607 |
-
else:
|
608 |
-
return img
|
609 |
-
|
610 |
-
def extra_repr(self):
|
611 |
-
return ' '.join([
|
612 |
-
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
|
613 |
-
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
|
614 |
-
f'num_fp16_res={self.num_fp16_res:d}'])
|
615 |
-
|
616 |
-
# ----------------------------------------------------------------------------
|
617 |
-
|
618 |
-
|
619 |
-
@persistence.persistent_class
|
620 |
-
class Generator(torch.nn.Module):
|
621 |
-
def __init__(self,
|
622 |
-
z_dim, # Input latent (Z) dimensionality.
|
623 |
-
# Conditioning label (C) dimensionality.
|
624 |
-
c_dim,
|
625 |
-
# Intermediate latent (W) dimensionality.
|
626 |
-
w_dim,
|
627 |
-
img_resolution, # Output resolution.
|
628 |
-
img_channels, # Number of output color channels.
|
629 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
630 |
-
synthesis_kwargs={}, # Arguments for SynthesisNetwork.
|
631 |
-
resize=None,
|
632 |
-
**synthesis_kwargs2, # Arguments for SynthesisNetwork.
|
633 |
-
):
|
634 |
-
super().__init__()
|
635 |
-
self.z_dim = z_dim
|
636 |
-
self.c_dim = c_dim
|
637 |
-
self.w_dim = w_dim
|
638 |
-
self.img_resolution = img_resolution
|
639 |
-
self.img_channels = img_channels
|
640 |
-
if len(synthesis_kwargs) == 0:
|
641 |
-
synthesis_kwargs = synthesis_kwargs2
|
642 |
-
self.synthesis = SynthesisNetwork(
|
643 |
-
w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
|
644 |
-
self.num_ws = self.synthesis.num_ws
|
645 |
-
self.mapping = MappingNetwork(
|
646 |
-
z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
|
647 |
-
self.resize = resize
|
648 |
-
|
649 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs):
|
650 |
-
if input_is_w:
|
651 |
-
ws = z
|
652 |
-
if ws.dim() == 2:
|
653 |
-
ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
|
654 |
-
else:
|
655 |
-
ws = self.mapping(z, c, truncation_psi=truncation_psi,
|
656 |
-
truncation_cutoff=truncation_cutoff, update_emas=update_emas)
|
657 |
-
img = self.synthesis(ws, update_emas=update_emas,
|
658 |
-
return_feature=return_feature, **synthesis_kwargs)
|
659 |
-
if return_feature:
|
660 |
-
img, feature = img
|
661 |
-
if self.resize is not None:
|
662 |
-
img = imresize(img, [self.resize, self.resize])
|
663 |
-
if return_feature:
|
664 |
-
return img, feature
|
665 |
-
else:
|
666 |
-
return img
|
667 |
-
|
668 |
-
|
669 |
-
def imresize(image, size):
|
670 |
-
dim = image.dim()
|
671 |
-
if dim == 3:
|
672 |
-
image = image.unsqueeze(1)
|
673 |
-
b, _, h, w = image.shape
|
674 |
-
if size[0] > h:
|
675 |
-
image = F.interpolate(image, size, mode='bilinear')
|
676 |
-
elif size[0] < h:
|
677 |
-
image = F.interpolate(image, size, mode='area')
|
678 |
-
if dim == 3:
|
679 |
-
image = image.squeeze(1)
|
680 |
-
return image
|
681 |
-
|
682 |
-
# ----------------------------------------------------------------------------
|
683 |
-
|
684 |
-
|
685 |
-
@persistence.persistent_class
|
686 |
-
class DiscriminatorBlock(torch.nn.Module):
|
687 |
-
def __init__(self,
|
688 |
-
# Number of input channels, 0 = first block.
|
689 |
-
in_channels,
|
690 |
-
# Number of intermediate channels.
|
691 |
-
tmp_channels,
|
692 |
-
# Number of output channels.
|
693 |
-
out_channels,
|
694 |
-
# Resolution of this block.
|
695 |
-
resolution,
|
696 |
-
# Number of input color channels.
|
697 |
-
img_channels,
|
698 |
-
# Index of the first layer.
|
699 |
-
first_layer_idx,
|
700 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
701 |
-
architecture='resnet',
|
702 |
-
# Activation function: 'relu', 'lrelu', etc.
|
703 |
-
activation='lrelu',
|
704 |
-
# Low-pass filter to apply when resampling activations.
|
705 |
-
resample_filter=[1, 3, 3, 1],
|
706 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
707 |
-
conv_clamp=None,
|
708 |
-
use_fp16=False, # Use FP16 for this block?
|
709 |
-
fp16_channels_last=False, # Use channels-last memory format with FP16?
|
710 |
-
# Freeze-D: Number of layers to freeze.
|
711 |
-
freeze_layers=0,
|
712 |
-
):
|
713 |
-
assert in_channels in [0, tmp_channels]
|
714 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
715 |
-
super().__init__()
|
716 |
-
self.in_channels = in_channels
|
717 |
-
self.resolution = resolution
|
718 |
-
self.img_channels = img_channels
|
719 |
-
self.first_layer_idx = first_layer_idx
|
720 |
-
self.architecture = architecture
|
721 |
-
self.use_fp16 = use_fp16
|
722 |
-
self.channels_last = (use_fp16 and fp16_channels_last)
|
723 |
-
self.register_buffer(
|
724 |
-
'resample_filter', upfirdn2d.setup_filter(resample_filter))
|
725 |
-
|
726 |
-
self.num_layers = 0
|
727 |
-
|
728 |
-
def trainable_gen():
|
729 |
-
while True:
|
730 |
-
layer_idx = self.first_layer_idx + self.num_layers
|
731 |
-
trainable = (layer_idx >= freeze_layers)
|
732 |
-
self.num_layers += 1
|
733 |
-
yield trainable
|
734 |
-
trainable_iter = trainable_gen()
|
735 |
-
|
736 |
-
if in_channels == 0 or architecture == 'skip':
|
737 |
-
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
|
738 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
739 |
-
|
740 |
-
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
|
741 |
-
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
|
742 |
-
|
743 |
-
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
|
744 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
|
745 |
-
|
746 |
-
if architecture == 'resnet':
|
747 |
-
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
|
748 |
-
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
|
749 |
-
|
750 |
-
def forward(self, x, img, force_fp32=False):
|
751 |
-
if (x if x is not None else img).device.type != 'cuda':
|
752 |
-
force_fp32 = True
|
753 |
-
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
|
754 |
-
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
|
755 |
-
|
756 |
-
# Input.
|
757 |
-
if x is not None:
|
758 |
-
misc.assert_shape(x, [None, self.in_channels,
|
759 |
-
self.resolution, self.resolution])
|
760 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
761 |
-
|
762 |
-
# FromRGB.
|
763 |
-
if self.in_channels == 0 or self.architecture == 'skip':
|
764 |
-
misc.assert_shape(
|
765 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
766 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
767 |
-
y = self.fromrgb(img)
|
768 |
-
x = x + y if x is not None else y
|
769 |
-
img = upfirdn2d.downsample2d(
|
770 |
-
img, self.resample_filter) if self.architecture == 'skip' else None
|
771 |
-
|
772 |
-
# Main layers.
|
773 |
-
if self.architecture == 'resnet':
|
774 |
-
y = self.skip(x, gain=np.sqrt(0.5))
|
775 |
-
x = self.conv0(x)
|
776 |
-
x = self.conv1(x, gain=np.sqrt(0.5))
|
777 |
-
x = y.add_(x)
|
778 |
-
else:
|
779 |
-
x = self.conv0(x)
|
780 |
-
x = self.conv1(x)
|
781 |
-
|
782 |
-
assert x.dtype == dtype
|
783 |
-
return x, img
|
784 |
-
|
785 |
-
def extra_repr(self):
|
786 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
787 |
-
|
788 |
-
# ----------------------------------------------------------------------------
|
789 |
-
|
790 |
-
|
791 |
-
@persistence.persistent_class
|
792 |
-
class MinibatchStdLayer(torch.nn.Module):
|
793 |
-
def __init__(self, group_size, num_channels=1):
|
794 |
-
super().__init__()
|
795 |
-
self.group_size = group_size
|
796 |
-
self.num_channels = num_channels
|
797 |
-
|
798 |
-
def forward(self, x):
|
799 |
-
N, C, H, W = x.shape
|
800 |
-
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
|
801 |
-
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(
|
802 |
-
N)) if self.group_size is not None else N
|
803 |
-
F = self.num_channels
|
804 |
-
c = C // F
|
805 |
-
|
806 |
-
# [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
|
807 |
-
y = x.reshape(G, -1, F, c, H, W)
|
808 |
-
# [GnFcHW] Subtract mean over group.
|
809 |
-
y = y - y.mean(dim=0)
|
810 |
-
# [nFcHW] Calc variance over group.
|
811 |
-
y = y.square().mean(dim=0)
|
812 |
-
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
|
813 |
-
# [nF] Take average over channels and pixels.
|
814 |
-
y = y.mean(dim=[2, 3, 4])
|
815 |
-
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
|
816 |
-
# [NFHW] Replicate over group and pixels.
|
817 |
-
y = y.repeat(G, 1, H, W)
|
818 |
-
# [NCHW] Append to input as new channels.
|
819 |
-
x = torch.cat([x, y], dim=1)
|
820 |
-
return x
|
821 |
-
|
822 |
-
def extra_repr(self):
|
823 |
-
return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
|
824 |
-
|
825 |
-
# ----------------------------------------------------------------------------
|
826 |
-
|
827 |
-
|
828 |
-
@persistence.persistent_class
|
829 |
-
class DiscriminatorEpilogue(torch.nn.Module):
|
830 |
-
def __init__(self,
|
831 |
-
in_channels, # Number of input channels.
|
832 |
-
# Dimensionality of mapped conditioning label, 0 = no label.
|
833 |
-
cmap_dim,
|
834 |
-
resolution, # Resolution of this block.
|
835 |
-
# Number of input color channels.
|
836 |
-
img_channels,
|
837 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
838 |
-
architecture='resnet',
|
839 |
-
# Group size for the minibatch standard deviation layer, None = entire minibatch.
|
840 |
-
mbstd_group_size=4,
|
841 |
-
# Number of features for the minibatch standard deviation layer, 0 = disable.
|
842 |
-
mbstd_num_channels=1,
|
843 |
-
# Activation function: 'relu', 'lrelu', etc.
|
844 |
-
activation='lrelu',
|
845 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
846 |
-
conv_clamp=None,
|
847 |
-
):
|
848 |
-
assert architecture in ['orig', 'skip', 'resnet']
|
849 |
-
super().__init__()
|
850 |
-
self.in_channels = in_channels
|
851 |
-
self.cmap_dim = cmap_dim
|
852 |
-
self.resolution = resolution
|
853 |
-
self.img_channels = img_channels
|
854 |
-
self.architecture = architecture
|
855 |
-
|
856 |
-
if architecture == 'skip':
|
857 |
-
self.fromrgb = Conv2dLayer(
|
858 |
-
img_channels, in_channels, kernel_size=1, activation=activation)
|
859 |
-
self.mbstd = MinibatchStdLayer(
|
860 |
-
group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
|
861 |
-
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels,
|
862 |
-
kernel_size=3, activation=activation, conv_clamp=conv_clamp)
|
863 |
-
self.fc = FullyConnectedLayer(
|
864 |
-
in_channels * (resolution ** 2), in_channels, activation=activation)
|
865 |
-
self.out = FullyConnectedLayer(
|
866 |
-
in_channels, 1 if cmap_dim == 0 else cmap_dim)
|
867 |
-
|
868 |
-
def forward(self, x, img, cmap, force_fp32=False):
|
869 |
-
misc.assert_shape(x, [None, self.in_channels,
|
870 |
-
self.resolution, self.resolution]) # [NCHW]
|
871 |
-
_ = force_fp32 # unused
|
872 |
-
dtype = torch.float32
|
873 |
-
memory_format = torch.contiguous_format
|
874 |
-
|
875 |
-
# FromRGB.
|
876 |
-
x = x.to(dtype=dtype, memory_format=memory_format)
|
877 |
-
if self.architecture == 'skip':
|
878 |
-
misc.assert_shape(
|
879 |
-
img, [None, self.img_channels, self.resolution, self.resolution])
|
880 |
-
img = img.to(dtype=dtype, memory_format=memory_format)
|
881 |
-
x = x + self.fromrgb(img)
|
882 |
-
|
883 |
-
# Main layers.
|
884 |
-
if self.mbstd is not None:
|
885 |
-
x = self.mbstd(x)
|
886 |
-
x = self.conv(x)
|
887 |
-
x = self.fc(x.flatten(1))
|
888 |
-
x = self.out(x)
|
889 |
-
|
890 |
-
# Conditioning.
|
891 |
-
if self.cmap_dim > 0:
|
892 |
-
misc.assert_shape(cmap, [None, self.cmap_dim])
|
893 |
-
x = (x * cmap).sum(dim=1, keepdim=True) * \
|
894 |
-
(1 / np.sqrt(self.cmap_dim))
|
895 |
-
|
896 |
-
assert x.dtype == dtype
|
897 |
-
return x
|
898 |
-
|
899 |
-
def extra_repr(self):
|
900 |
-
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
|
901 |
-
|
902 |
-
# ----------------------------------------------------------------------------
|
903 |
-
|
904 |
-
|
905 |
-
@persistence.persistent_class
|
906 |
-
class Discriminator(torch.nn.Module):
|
907 |
-
def __init__(self,
|
908 |
-
# Conditioning label (C) dimensionality.
|
909 |
-
c_dim,
|
910 |
-
img_resolution, # Input resolution.
|
911 |
-
# Number of input color channels.
|
912 |
-
img_channels,
|
913 |
-
# Architecture: 'orig', 'skip', 'resnet'.
|
914 |
-
architecture='resnet',
|
915 |
-
# Overall multiplier for the number of channels.
|
916 |
-
channel_base=32768,
|
917 |
-
# Maximum number of channels in any layer.
|
918 |
-
channel_max=512,
|
919 |
-
# Use FP16 for the N highest resolutions.
|
920 |
-
num_fp16_res=4,
|
921 |
-
# Clamp the output of convolution layers to +-X, None = disable clamping.
|
922 |
-
conv_clamp=256,
|
923 |
-
# Dimensionality of mapped conditioning label, None = default.
|
924 |
-
cmap_dim=None,
|
925 |
-
block_kwargs={}, # Arguments for DiscriminatorBlock.
|
926 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
927 |
-
# Arguments for DiscriminatorEpilogue.
|
928 |
-
epilogue_kwargs={},
|
929 |
-
):
|
930 |
-
super().__init__()
|
931 |
-
self.c_dim = c_dim
|
932 |
-
self.img_resolution = img_resolution
|
933 |
-
self.img_resolution_log2 = int(np.log2(img_resolution))
|
934 |
-
self.img_channels = img_channels
|
935 |
-
self.block_resolutions = [
|
936 |
-
2 ** i for i in range(self.img_resolution_log2, 2, -1)]
|
937 |
-
channels_dict = {res: min(channel_base // res, channel_max)
|
938 |
-
for res in self.block_resolutions + [4]}
|
939 |
-
fp16_resolution = max(
|
940 |
-
2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
|
941 |
-
|
942 |
-
if cmap_dim is None:
|
943 |
-
cmap_dim = channels_dict[4]
|
944 |
-
if c_dim == 0:
|
945 |
-
cmap_dim = 0
|
946 |
-
|
947 |
-
common_kwargs = dict(img_channels=img_channels,
|
948 |
-
architecture=architecture, conv_clamp=conv_clamp)
|
949 |
-
cur_layer_idx = 0
|
950 |
-
for res in self.block_resolutions:
|
951 |
-
in_channels = channels_dict[res] if res < img_resolution else 0
|
952 |
-
tmp_channels = channels_dict[res]
|
953 |
-
out_channels = channels_dict[res // 2]
|
954 |
-
use_fp16 = (res >= fp16_resolution)
|
955 |
-
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
|
956 |
-
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
|
957 |
-
setattr(self, f'b{res}', block)
|
958 |
-
cur_layer_idx += block.num_layers
|
959 |
-
if c_dim > 0:
|
960 |
-
self.mapping = MappingNetwork(
|
961 |
-
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
|
962 |
-
self.b4 = DiscriminatorEpilogue(
|
963 |
-
channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
|
964 |
-
|
965 |
-
def forward(self, img, c, update_emas=False, **block_kwargs):
|
966 |
-
_ = update_emas # unused
|
967 |
-
x = None
|
968 |
-
for res in self.block_resolutions:
|
969 |
-
block = getattr(self, f'b{res}')
|
970 |
-
x, img = block(x, img, **block_kwargs)
|
971 |
-
|
972 |
-
cmap = None
|
973 |
-
if self.c_dim > 0:
|
974 |
-
cmap = self.mapping(None, c)
|
975 |
-
x = self.b4(x, img, cmap)
|
976 |
-
return x
|
977 |
-
|
978 |
-
def extra_repr(self):
|
979 |
-
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
|
980 |
-
|
981 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py
DELETED
@@ -1,645 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Generator architecture from the paper
|
10 |
-
"Alias-Free Generative Adversarial Networks"."""
|
11 |
-
|
12 |
-
import numpy as np
|
13 |
-
import scipy.signal
|
14 |
-
import scipy.optimize
|
15 |
-
import torch
|
16 |
-
import torch.nn.functional as F
|
17 |
-
from torch_utils import misc
|
18 |
-
from torch_utils import persistence
|
19 |
-
from torch_utils.ops import conv2d_gradfix
|
20 |
-
from torch_utils.ops import filtered_lrelu
|
21 |
-
from torch_utils.ops import bias_act
|
22 |
-
|
23 |
-
# ----------------------------------------------------------------------------
|
24 |
-
|
25 |
-
|
26 |
-
@misc.profiled_function
|
27 |
-
def modulated_conv2d(
|
28 |
-
# Input tensor: [batch_size, in_channels, in_height, in_width]
|
29 |
-
x,
|
30 |
-
# Weight tensor: [out_channels, in_channels, kernel_height, kernel_width]
|
31 |
-
w,
|
32 |
-
s, # Style tensor: [batch_size, in_channels]
|
33 |
-
demodulate=True, # Apply weight demodulation?
|
34 |
-
padding=0, # Padding: int or [padH, padW]
|
35 |
-
input_gain=None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels]
|
36 |
-
):
|
37 |
-
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
|
38 |
-
batch_size = int(x.shape[0])
|
39 |
-
out_channels, in_channels, kh, kw = w.shape
|
40 |
-
misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk]
|
41 |
-
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
|
42 |
-
misc.assert_shape(s, [batch_size, in_channels]) # [NI]
|
43 |
-
|
44 |
-
# Pre-normalize inputs.
|
45 |
-
if demodulate:
|
46 |
-
w = w * w.square().mean([1, 2, 3], keepdim=True).rsqrt()
|
47 |
-
s = s * s.square().mean().rsqrt()
|
48 |
-
|
49 |
-
# Modulate weights.
|
50 |
-
w = w.unsqueeze(0) # [NOIkk]
|
51 |
-
w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
|
52 |
-
|
53 |
-
# Demodulate weights.
|
54 |
-
if demodulate:
|
55 |
-
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
|
56 |
-
w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk]
|
57 |
-
|
58 |
-
# Apply input scaling.
|
59 |
-
if input_gain is not None:
|
60 |
-
input_gain = input_gain.expand(batch_size, in_channels) # [NI]
|
61 |
-
w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
|
62 |
-
|
63 |
-
# Execute as one fused op using grouped convolution.
|
64 |
-
x = x.reshape(1, -1, *x.shape[2:])
|
65 |
-
w = w.reshape(-1, in_channels, kh, kw)
|
66 |
-
x = conv2d_gradfix.conv2d(input=x, weight=w.to(
|
67 |
-
x.dtype), padding=padding, groups=batch_size)
|
68 |
-
x = x.reshape(batch_size, -1, *x.shape[2:])
|
69 |
-
return x
|
70 |
-
|
71 |
-
# ----------------------------------------------------------------------------
|
72 |
-
|
73 |
-
|
74 |
-
@persistence.persistent_class
|
75 |
-
class FullyConnectedLayer(torch.nn.Module):
|
76 |
-
def __init__(self,
|
77 |
-
in_features, # Number of input features.
|
78 |
-
out_features, # Number of output features.
|
79 |
-
# Activation function: 'relu', 'lrelu', etc.
|
80 |
-
activation='linear',
|
81 |
-
bias=True, # Apply additive bias before the activation function?
|
82 |
-
lr_multiplier=1, # Learning rate multiplier.
|
83 |
-
# Initial standard deviation of the weight tensor.
|
84 |
-
weight_init=1,
|
85 |
-
bias_init=0, # Initial value of the additive bias.
|
86 |
-
):
|
87 |
-
super().__init__()
|
88 |
-
self.in_features = in_features
|
89 |
-
self.out_features = out_features
|
90 |
-
self.activation = activation
|
91 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
92 |
-
[out_features, in_features]) * (weight_init / lr_multiplier))
|
93 |
-
bias_init = np.broadcast_to(np.asarray(
|
94 |
-
bias_init, dtype=np.float32), [out_features])
|
95 |
-
self.bias = torch.nn.Parameter(torch.from_numpy(
|
96 |
-
bias_init / lr_multiplier)) if bias else None
|
97 |
-
self.weight_gain = lr_multiplier / np.sqrt(in_features)
|
98 |
-
self.bias_gain = lr_multiplier
|
99 |
-
|
100 |
-
def forward(self, x):
|
101 |
-
w = self.weight.to(x.dtype) * self.weight_gain
|
102 |
-
b = self.bias
|
103 |
-
if b is not None:
|
104 |
-
b = b.to(x.dtype)
|
105 |
-
if self.bias_gain != 1:
|
106 |
-
b = b * self.bias_gain
|
107 |
-
if self.activation == 'linear' and b is not None:
|
108 |
-
x = torch.addmm(b.unsqueeze(0), x, w.t())
|
109 |
-
else:
|
110 |
-
x = x.matmul(w.t())
|
111 |
-
x = bias_act.bias_act(x, b, act=self.activation)
|
112 |
-
return x
|
113 |
-
|
114 |
-
def extra_repr(self):
|
115 |
-
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
|
116 |
-
|
117 |
-
# ----------------------------------------------------------------------------
|
118 |
-
|
119 |
-
|
120 |
-
@persistence.persistent_class
|
121 |
-
class MappingNetwork(torch.nn.Module):
|
122 |
-
def __init__(self,
|
123 |
-
z_dim, # Input latent (Z) dimensionality.
|
124 |
-
# Conditioning label (C) dimensionality, 0 = no labels.
|
125 |
-
c_dim,
|
126 |
-
# Intermediate latent (W) dimensionality.
|
127 |
-
w_dim,
|
128 |
-
# Number of intermediate latents to output.
|
129 |
-
num_ws,
|
130 |
-
num_layers=2, # Number of mapping layers.
|
131 |
-
# Learning rate multiplier for the mapping layers.
|
132 |
-
lr_multiplier=0.01,
|
133 |
-
# Decay for tracking the moving average of W during training.
|
134 |
-
w_avg_beta=0.998,
|
135 |
-
):
|
136 |
-
super().__init__()
|
137 |
-
self.z_dim = z_dim
|
138 |
-
self.c_dim = c_dim
|
139 |
-
self.w_dim = w_dim
|
140 |
-
self.num_ws = num_ws
|
141 |
-
self.num_layers = num_layers
|
142 |
-
self.w_avg_beta = w_avg_beta
|
143 |
-
|
144 |
-
# Construct layers.
|
145 |
-
self.embed = FullyConnectedLayer(
|
146 |
-
self.c_dim, self.w_dim) if self.c_dim > 0 else None
|
147 |
-
features = [self.z_dim + (self.w_dim if self.c_dim >
|
148 |
-
0 else 0)] + [self.w_dim] * self.num_layers
|
149 |
-
for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]):
|
150 |
-
layer = FullyConnectedLayer(
|
151 |
-
in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier)
|
152 |
-
setattr(self, f'fc{idx}', layer)
|
153 |
-
self.register_buffer('w_avg', torch.zeros([w_dim]))
|
154 |
-
|
155 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
|
156 |
-
misc.assert_shape(z, [None, self.z_dim])
|
157 |
-
if truncation_cutoff is None:
|
158 |
-
truncation_cutoff = self.num_ws
|
159 |
-
|
160 |
-
# Embed, normalize, and concatenate inputs.
|
161 |
-
x = z.to(torch.float32)
|
162 |
-
x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt()
|
163 |
-
if self.c_dim > 0:
|
164 |
-
misc.assert_shape(c, [None, self.c_dim])
|
165 |
-
y = self.embed(c.to(torch.float32))
|
166 |
-
y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt()
|
167 |
-
x = torch.cat([x, y], dim=1) if x is not None else y
|
168 |
-
|
169 |
-
# Execute layers.
|
170 |
-
for idx in range(self.num_layers):
|
171 |
-
x = getattr(self, f'fc{idx}')(x)
|
172 |
-
|
173 |
-
# Update moving average of W.
|
174 |
-
if update_emas:
|
175 |
-
self.w_avg.copy_(x.detach().mean(
|
176 |
-
dim=0).lerp(self.w_avg, self.w_avg_beta))
|
177 |
-
|
178 |
-
# Broadcast and apply truncation.
|
179 |
-
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
|
180 |
-
if truncation_psi != 1:
|
181 |
-
x[:, :truncation_cutoff] = self.w_avg.lerp(
|
182 |
-
x[:, :truncation_cutoff], truncation_psi)
|
183 |
-
return x
|
184 |
-
|
185 |
-
def extra_repr(self):
|
186 |
-
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
|
187 |
-
|
188 |
-
# ----------------------------------------------------------------------------
|
189 |
-
|
190 |
-
|
191 |
-
@persistence.persistent_class
|
192 |
-
class SynthesisInput(torch.nn.Module):
|
193 |
-
def __init__(self,
|
194 |
-
w_dim, # Intermediate latent (W) dimensionality.
|
195 |
-
channels, # Number of output channels.
|
196 |
-
size, # Output spatial size: int or [width, height].
|
197 |
-
sampling_rate, # Output sampling rate.
|
198 |
-
bandwidth, # Output bandwidth.
|
199 |
-
):
|
200 |
-
super().__init__()
|
201 |
-
self.w_dim = w_dim
|
202 |
-
self.channels = channels
|
203 |
-
self.size = np.broadcast_to(np.asarray(size), [2])
|
204 |
-
self.sampling_rate = sampling_rate
|
205 |
-
self.bandwidth = bandwidth
|
206 |
-
|
207 |
-
# Draw random frequencies from uniform 2D disc.
|
208 |
-
freqs = torch.randn([self.channels, 2])
|
209 |
-
radii = freqs.square().sum(dim=1, keepdim=True).sqrt()
|
210 |
-
freqs /= radii * radii.square().exp().pow(0.25)
|
211 |
-
freqs *= bandwidth
|
212 |
-
phases = torch.rand([self.channels]) - 0.5
|
213 |
-
|
214 |
-
# Setup parameters and buffers.
|
215 |
-
self.weight = torch.nn.Parameter(
|
216 |
-
torch.randn([self.channels, self.channels]))
|
217 |
-
self.affine = FullyConnectedLayer(
|
218 |
-
w_dim, 4, weight_init=0, bias_init=[1, 0, 0, 0])
|
219 |
-
# User-specified inverse transform wrt. resulting image.
|
220 |
-
self.register_buffer('transform', torch.eye(3, 3))
|
221 |
-
self.register_buffer('freqs', freqs)
|
222 |
-
self.register_buffer('phases', phases)
|
223 |
-
|
224 |
-
def forward(self, w):
|
225 |
-
# Introduce batch dimension.
|
226 |
-
transforms = self.transform.unsqueeze(0) # [batch, row, col]
|
227 |
-
freqs = self.freqs.unsqueeze(0) # [batch, channel, xy]
|
228 |
-
phases = self.phases.unsqueeze(0) # [batch, channel]
|
229 |
-
|
230 |
-
# Apply learned transformation.
|
231 |
-
t = self.affine(w) # t = (r_c, r_s, t_x, t_y)
|
232 |
-
# t' = (r'_c, r'_s, t'_x, t'_y)
|
233 |
-
t = t / t[:, :2].norm(dim=1, keepdim=True)
|
234 |
-
# Inverse rotation wrt. resulting image.
|
235 |
-
m_r = torch.eye(3, device=w.device).unsqueeze(
|
236 |
-
0).repeat([w.shape[0], 1, 1])
|
237 |
-
m_r[:, 0, 0] = t[:, 0] # r'_c
|
238 |
-
m_r[:, 0, 1] = -t[:, 1] # r'_s
|
239 |
-
m_r[:, 1, 0] = t[:, 1] # r'_s
|
240 |
-
m_r[:, 1, 1] = t[:, 0] # r'_c
|
241 |
-
# Inverse translation wrt. resulting image.
|
242 |
-
m_t = torch.eye(3, device=w.device).unsqueeze(
|
243 |
-
0).repeat([w.shape[0], 1, 1])
|
244 |
-
m_t[:, 0, 2] = -t[:, 2] # t'_x
|
245 |
-
m_t[:, 1, 2] = -t[:, 3] # t'_y
|
246 |
-
# First rotate resulting image, then translate, and finally apply user-specified transform.
|
247 |
-
transforms = m_r @ m_t @ transforms
|
248 |
-
|
249 |
-
# Transform frequencies.
|
250 |
-
phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2)
|
251 |
-
freqs = freqs @ transforms[:, :2, :2]
|
252 |
-
|
253 |
-
# Dampen out-of-band frequencies that may occur due to the user-specified transform.
|
254 |
-
amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) /
|
255 |
-
(self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1)
|
256 |
-
|
257 |
-
# Construct sampling grid.
|
258 |
-
theta = torch.eye(2, 3, device=w.device)
|
259 |
-
theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate
|
260 |
-
theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate
|
261 |
-
grids = torch.nn.functional.affine_grid(theta.unsqueeze(
|
262 |
-
0), [1, 1, self.size[1], self.size[0]], align_corners=False)
|
263 |
-
|
264 |
-
# Compute Fourier features.
|
265 |
-
x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)
|
266 |
-
).squeeze(3) # [batch, height, width, channel]
|
267 |
-
x = x + phases.unsqueeze(1).unsqueeze(2)
|
268 |
-
x = torch.sin(x * (np.pi * 2))
|
269 |
-
x = x * amplitudes.unsqueeze(1).unsqueeze(2)
|
270 |
-
|
271 |
-
# Apply trainable mapping.
|
272 |
-
weight = self.weight / np.sqrt(self.channels)
|
273 |
-
x = x @ weight.t()
|
274 |
-
|
275 |
-
# Ensure correct shape.
|
276 |
-
x = x.permute(0, 3, 1, 2) # [batch, channel, height, width]
|
277 |
-
misc.assert_shape(x, [w.shape[0], self.channels,
|
278 |
-
int(self.size[1]), int(self.size[0])])
|
279 |
-
return x
|
280 |
-
|
281 |
-
def extra_repr(self):
|
282 |
-
return '\n'.join([
|
283 |
-
f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},',
|
284 |
-
f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}'])
|
285 |
-
|
286 |
-
# ----------------------------------------------------------------------------
|
287 |
-
|
288 |
-
|
289 |
-
@persistence.persistent_class
|
290 |
-
class SynthesisLayer(torch.nn.Module):
|
291 |
-
def __init__(self,
|
292 |
-
# Intermediate latent (W) dimensionality.
|
293 |
-
w_dim,
|
294 |
-
is_torgb, # Is this the final ToRGB layer?
|
295 |
-
is_critically_sampled, # Does this layer use critical sampling?
|
296 |
-
use_fp16, # Does this layer use FP16?
|
297 |
-
|
298 |
-
# Input & output specifications.
|
299 |
-
in_channels, # Number of input channels.
|
300 |
-
out_channels, # Number of output channels.
|
301 |
-
# Input spatial size: int or [width, height].
|
302 |
-
in_size,
|
303 |
-
# Output spatial size: int or [width, height].
|
304 |
-
out_size,
|
305 |
-
in_sampling_rate, # Input sampling rate (s).
|
306 |
-
out_sampling_rate, # Output sampling rate (s).
|
307 |
-
# Input cutoff frequency (f_c).
|
308 |
-
in_cutoff,
|
309 |
-
# Output cutoff frequency (f_c).
|
310 |
-
out_cutoff,
|
311 |
-
# Input transition band half-width (f_h).
|
312 |
-
in_half_width,
|
313 |
-
# Output Transition band half-width (f_h).
|
314 |
-
out_half_width,
|
315 |
-
|
316 |
-
# Hyperparameters.
|
317 |
-
# Convolution kernel size. Ignored for final the ToRGB layer.
|
318 |
-
conv_kernel=3,
|
319 |
-
# Low-pass filter size relative to the lower resolution when up/downsampling.
|
320 |
-
filter_size=6,
|
321 |
-
# Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.
|
322 |
-
lrelu_upsampling=2,
|
323 |
-
# Use radially symmetric downsampling filter? Ignored for critically sampled layers.
|
324 |
-
use_radial_filters=False,
|
325 |
-
# Clamp the output to [-X, +X], None = disable clamping.
|
326 |
-
conv_clamp=256,
|
327 |
-
# Decay rate for the moving average of input magnitudes.
|
328 |
-
magnitude_ema_beta=0.999,
|
329 |
-
):
|
330 |
-
super().__init__()
|
331 |
-
self.w_dim = w_dim
|
332 |
-
self.is_torgb = is_torgb
|
333 |
-
self.is_critically_sampled = is_critically_sampled
|
334 |
-
self.use_fp16 = use_fp16
|
335 |
-
self.in_channels = in_channels
|
336 |
-
self.out_channels = out_channels
|
337 |
-
self.in_size = np.broadcast_to(np.asarray(in_size), [2])
|
338 |
-
self.out_size = np.broadcast_to(np.asarray(out_size), [2])
|
339 |
-
self.in_sampling_rate = in_sampling_rate
|
340 |
-
self.out_sampling_rate = out_sampling_rate
|
341 |
-
self.tmp_sampling_rate = max(
|
342 |
-
in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling)
|
343 |
-
self.in_cutoff = in_cutoff
|
344 |
-
self.out_cutoff = out_cutoff
|
345 |
-
self.in_half_width = in_half_width
|
346 |
-
self.out_half_width = out_half_width
|
347 |
-
self.conv_kernel = 1 if is_torgb else conv_kernel
|
348 |
-
self.conv_clamp = conv_clamp
|
349 |
-
self.magnitude_ema_beta = magnitude_ema_beta
|
350 |
-
|
351 |
-
# Setup parameters and buffers.
|
352 |
-
self.affine = FullyConnectedLayer(
|
353 |
-
self.w_dim, self.in_channels, bias_init=1)
|
354 |
-
self.weight = torch.nn.Parameter(torch.randn(
|
355 |
-
[self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel]))
|
356 |
-
self.bias = torch.nn.Parameter(torch.zeros([self.out_channels]))
|
357 |
-
self.register_buffer('magnitude_ema', torch.ones([]))
|
358 |
-
|
359 |
-
# Design upsampling filter.
|
360 |
-
self.up_factor = int(
|
361 |
-
np.rint(self.tmp_sampling_rate / self.in_sampling_rate))
|
362 |
-
assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate
|
363 |
-
self.up_taps = filter_size * \
|
364 |
-
self.up_factor if self.up_factor > 1 and not self.is_torgb else 1
|
365 |
-
self.register_buffer('up_filter', self.design_lowpass_filter(
|
366 |
-
numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate))
|
367 |
-
|
368 |
-
# Design downsampling filter.
|
369 |
-
self.down_factor = int(
|
370 |
-
np.rint(self.tmp_sampling_rate / self.out_sampling_rate))
|
371 |
-
assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate
|
372 |
-
self.down_taps = filter_size * \
|
373 |
-
self.down_factor if self.down_factor > 1 and not self.is_torgb else 1
|
374 |
-
self.down_radial = use_radial_filters and not self.is_critically_sampled
|
375 |
-
self.register_buffer('down_filter', self.design_lowpass_filter(
|
376 |
-
numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial))
|
377 |
-
|
378 |
-
# Compute padding.
|
379 |
-
# Desired output size before downsampling.
|
380 |
-
pad_total = (self.out_size - 1) * self.down_factor + 1
|
381 |
-
# Input size after upsampling.
|
382 |
-
pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor
|
383 |
-
# Size reduction caused by the filters.
|
384 |
-
pad_total += self.up_taps + self.down_taps - 2
|
385 |
-
# Shift sample locations according to the symmetric interpretation (Appendix C.3).
|
386 |
-
pad_lo = (pad_total + self.up_factor) // 2
|
387 |
-
pad_hi = pad_total - pad_lo
|
388 |
-
self.padding = [int(pad_lo[0]), int(pad_hi[0]),
|
389 |
-
int(pad_lo[1]), int(pad_hi[1])]
|
390 |
-
|
391 |
-
def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False):
|
392 |
-
assert noise_mode in ['random', 'const', 'none'] # unused
|
393 |
-
misc.assert_shape(x, [None, self.in_channels, int(
|
394 |
-
self.in_size[1]), int(self.in_size[0])])
|
395 |
-
misc.assert_shape(w, [x.shape[0], self.w_dim])
|
396 |
-
|
397 |
-
# Track input magnitude.
|
398 |
-
if update_emas:
|
399 |
-
with torch.autograd.profiler.record_function('update_magnitude_ema'):
|
400 |
-
magnitude_cur = x.detach().to(torch.float32).square().mean()
|
401 |
-
self.magnitude_ema.copy_(magnitude_cur.lerp(
|
402 |
-
self.magnitude_ema, self.magnitude_ema_beta))
|
403 |
-
input_gain = self.magnitude_ema.rsqrt()
|
404 |
-
|
405 |
-
# Execute affine layer.
|
406 |
-
styles = self.affine(w)
|
407 |
-
if self.is_torgb:
|
408 |
-
weight_gain = 1 / \
|
409 |
-
np.sqrt(self.in_channels * (self.conv_kernel ** 2))
|
410 |
-
styles = styles * weight_gain
|
411 |
-
|
412 |
-
# Execute modulated conv2d.
|
413 |
-
dtype = torch.float16 if (
|
414 |
-
self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32
|
415 |
-
x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles,
|
416 |
-
padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain)
|
417 |
-
|
418 |
-
# Execute bias, filtered leaky ReLU, and clamping.
|
419 |
-
gain = 1 if self.is_torgb else np.sqrt(2)
|
420 |
-
slope = 1 if self.is_torgb else 0.2
|
421 |
-
x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),
|
422 |
-
up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp)
|
423 |
-
|
424 |
-
# Ensure correct shape and dtype.
|
425 |
-
misc.assert_shape(x, [None, self.out_channels, int(
|
426 |
-
self.out_size[1]), int(self.out_size[0])])
|
427 |
-
assert x.dtype == dtype
|
428 |
-
return x
|
429 |
-
|
430 |
-
@staticmethod
|
431 |
-
def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False):
|
432 |
-
assert numtaps >= 1
|
433 |
-
|
434 |
-
# Identity filter.
|
435 |
-
if numtaps == 1:
|
436 |
-
return None
|
437 |
-
|
438 |
-
# Separable Kaiser low-pass filter.
|
439 |
-
if not radial:
|
440 |
-
f = scipy.signal.firwin(
|
441 |
-
numtaps=numtaps, cutoff=cutoff, width=width, fs=fs)
|
442 |
-
return torch.as_tensor(f, dtype=torch.float32)
|
443 |
-
|
444 |
-
# Radially symmetric jinc-based filter.
|
445 |
-
x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs
|
446 |
-
r = np.hypot(*np.meshgrid(x, x))
|
447 |
-
f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r)
|
448 |
-
beta = scipy.signal.kaiser_beta(
|
449 |
-
scipy.signal.kaiser_atten(numtaps, width / (fs / 2)))
|
450 |
-
w = np.kaiser(numtaps, beta)
|
451 |
-
f *= np.outer(w, w)
|
452 |
-
f /= np.sum(f)
|
453 |
-
return torch.as_tensor(f, dtype=torch.float32)
|
454 |
-
|
455 |
-
def extra_repr(self):
|
456 |
-
return '\n'.join([
|
457 |
-
f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},',
|
458 |
-
f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},',
|
459 |
-
f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},',
|
460 |
-
f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},',
|
461 |
-
f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},',
|
462 |
-
f'in_size={list(self.in_size)}, out_size={list(self.out_size)},',
|
463 |
-
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}'])
|
464 |
-
|
465 |
-
# ----------------------------------------------------------------------------
|
466 |
-
|
467 |
-
|
468 |
-
@persistence.persistent_class
|
469 |
-
class SynthesisNetwork(torch.nn.Module):
|
470 |
-
def __init__(self,
|
471 |
-
# Intermediate latent (W) dimensionality.
|
472 |
-
w_dim,
|
473 |
-
img_resolution, # Output image resolution.
|
474 |
-
img_channels, # Number of color channels.
|
475 |
-
# Overall multiplier for the number of channels.
|
476 |
-
channel_base=32768,
|
477 |
-
# Maximum number of channels in any layer.
|
478 |
-
channel_max=512,
|
479 |
-
# Total number of layers, excluding Fourier features and ToRGB.
|
480 |
-
num_layers=14,
|
481 |
-
# Number of critically sampled layers at the end.
|
482 |
-
num_critical=2,
|
483 |
-
# Cutoff frequency of the first layer (f_{c,0}).
|
484 |
-
first_cutoff=2,
|
485 |
-
# Minimum stopband of the first layer (f_{t,0}).
|
486 |
-
first_stopband=2**2.1,
|
487 |
-
# Minimum stopband of the last layer, expressed relative to the cutoff.
|
488 |
-
last_stopband_rel=2**0.3,
|
489 |
-
# Number of additional pixels outside the image.
|
490 |
-
margin_size=10,
|
491 |
-
output_scale=0.25, # Scale factor for the output image.
|
492 |
-
# Use FP16 for the N highest resolutions.
|
493 |
-
num_fp16_res=4,
|
494 |
-
# Arguments for SynthesisLayer.
|
495 |
-
**layer_kwargs,
|
496 |
-
):
|
497 |
-
super().__init__()
|
498 |
-
self.w_dim = w_dim
|
499 |
-
self.num_ws = num_layers + 2
|
500 |
-
self.img_resolution = img_resolution
|
501 |
-
self.img_channels = img_channels
|
502 |
-
self.num_layers = num_layers
|
503 |
-
self.num_critical = num_critical
|
504 |
-
self.margin_size = margin_size
|
505 |
-
self.output_scale = output_scale
|
506 |
-
self.num_fp16_res = num_fp16_res
|
507 |
-
|
508 |
-
# Geometric progression of layer cutoffs and min. stopbands.
|
509 |
-
last_cutoff = self.img_resolution / 2 # f_{c,N}
|
510 |
-
last_stopband = last_cutoff * last_stopband_rel # f_{t,N}
|
511 |
-
exponents = np.minimum(
|
512 |
-
np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1)
|
513 |
-
cutoffs = first_cutoff * \
|
514 |
-
(last_cutoff / first_cutoff) ** exponents # f_c[i]
|
515 |
-
stopbands = first_stopband * \
|
516 |
-
(last_stopband / first_stopband) ** exponents # f_t[i]
|
517 |
-
|
518 |
-
# Compute remaining layer parameters.
|
519 |
-
sampling_rates = np.exp2(
|
520 |
-
np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i]
|
521 |
-
half_widths = np.maximum(
|
522 |
-
stopbands, sampling_rates / 2) - cutoffs # f_h[i]
|
523 |
-
sizes = sampling_rates + self.margin_size * 2
|
524 |
-
sizes[-2:] = self.img_resolution
|
525 |
-
channels = np.rint(np.minimum(
|
526 |
-
(channel_base / 2) / cutoffs, channel_max))
|
527 |
-
channels[-1] = self.img_channels
|
528 |
-
|
529 |
-
# Construct layers.
|
530 |
-
self.input = SynthesisInput(
|
531 |
-
w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]),
|
532 |
-
sampling_rate=sampling_rates[0], bandwidth=cutoffs[0])
|
533 |
-
self.layer_names = []
|
534 |
-
for idx in range(self.num_layers + 1):
|
535 |
-
prev = max(idx - 1, 0)
|
536 |
-
is_torgb = (idx == self.num_layers)
|
537 |
-
is_critically_sampled = (
|
538 |
-
idx >= self.num_layers - self.num_critical)
|
539 |
-
use_fp16 = (sampling_rates[idx] * (2 **
|
540 |
-
self.num_fp16_res) > self.img_resolution)
|
541 |
-
layer = SynthesisLayer(
|
542 |
-
w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16,
|
543 |
-
in_channels=int(channels[prev]), out_channels=int(channels[idx]),
|
544 |
-
in_size=int(sizes[prev]), out_size=int(sizes[idx]),
|
545 |
-
in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]),
|
546 |
-
in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx],
|
547 |
-
in_half_width=half_widths[prev], out_half_width=half_widths[idx],
|
548 |
-
**layer_kwargs)
|
549 |
-
name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}'
|
550 |
-
setattr(self, name, layer)
|
551 |
-
self.layer_names.append(name)
|
552 |
-
|
553 |
-
def forward(self, ws, return_feature=False, **layer_kwargs):
|
554 |
-
features = []
|
555 |
-
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
|
556 |
-
ws = ws.to(torch.float32).unbind(dim=1)
|
557 |
-
|
558 |
-
# Execute layers.
|
559 |
-
x = self.input(ws[0])
|
560 |
-
for name, w in zip(self.layer_names, ws[1:]):
|
561 |
-
x = getattr(self, name)(x, w, **layer_kwargs)
|
562 |
-
features.append(x)
|
563 |
-
if self.output_scale != 1:
|
564 |
-
x = x * self.output_scale
|
565 |
-
|
566 |
-
# Ensure correct shape and dtype.
|
567 |
-
misc.assert_shape(x, [None, self.img_channels,
|
568 |
-
self.img_resolution, self.img_resolution])
|
569 |
-
x = x.to(torch.float32)
|
570 |
-
if return_feature:
|
571 |
-
return x, features
|
572 |
-
else:
|
573 |
-
return x
|
574 |
-
|
575 |
-
def extra_repr(self):
|
576 |
-
return '\n'.join([
|
577 |
-
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
|
578 |
-
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
|
579 |
-
f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},',
|
580 |
-
f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}'])
|
581 |
-
|
582 |
-
# ----------------------------------------------------------------------------
|
583 |
-
|
584 |
-
|
585 |
-
@persistence.persistent_class
|
586 |
-
class Generator(torch.nn.Module):
|
587 |
-
def __init__(self,
|
588 |
-
z_dim, # Input latent (Z) dimensionality.
|
589 |
-
# Conditioning label (C) dimensionality.
|
590 |
-
c_dim,
|
591 |
-
# Intermediate latent (W) dimensionality.
|
592 |
-
w_dim,
|
593 |
-
img_resolution, # Output resolution.
|
594 |
-
img_channels, # Number of output color channels.
|
595 |
-
mapping_kwargs={}, # Arguments for MappingNetwork.
|
596 |
-
resize=None,
|
597 |
-
**synthesis_kwargs, # Arguments for SynthesisNetwork.
|
598 |
-
):
|
599 |
-
super().__init__()
|
600 |
-
self.z_dim = z_dim
|
601 |
-
self.c_dim = c_dim
|
602 |
-
self.w_dim = w_dim
|
603 |
-
self.img_resolution = img_resolution
|
604 |
-
self.img_channels = img_channels
|
605 |
-
self.synthesis = SynthesisNetwork(
|
606 |
-
w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
|
607 |
-
self.num_ws = self.synthesis.num_ws
|
608 |
-
self.mapping = MappingNetwork(
|
609 |
-
z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
|
610 |
-
self.resize = resize
|
611 |
-
|
612 |
-
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs):
|
613 |
-
if input_is_w:
|
614 |
-
ws = z
|
615 |
-
if ws.dim() == 2:
|
616 |
-
ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
|
617 |
-
else:
|
618 |
-
ws = self.mapping(z, c, truncation_psi=truncation_psi,
|
619 |
-
truncation_cutoff=truncation_cutoff, update_emas=update_emas)
|
620 |
-
img = self.synthesis(ws, update_emas=update_emas,
|
621 |
-
return_feature=return_feature, **synthesis_kwargs)
|
622 |
-
if return_feature:
|
623 |
-
img, feature = img
|
624 |
-
if self.resize is not None:
|
625 |
-
img = imresize(img, [self.resize, self.resize])
|
626 |
-
if return_feature:
|
627 |
-
return img, feature
|
628 |
-
else:
|
629 |
-
return img
|
630 |
-
|
631 |
-
# ----------------------------------------------------------------------------
|
632 |
-
|
633 |
-
|
634 |
-
def imresize(image, size):
|
635 |
-
dim = image.dim()
|
636 |
-
if dim == 3:
|
637 |
-
image = image.unsqueeze(1)
|
638 |
-
b, _, h, w = image.shape
|
639 |
-
if size[0] > h:
|
640 |
-
image = F.interpolate(image, size, mode='bilinear')
|
641 |
-
elif size[0] < h:
|
642 |
-
image = F.interpolate(image, size, mode='area')
|
643 |
-
if dim == 3:
|
644 |
-
image = image.squeeze(1)
|
645 |
-
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
import numpy as np
|
3 |
-
import pytorch_lightning as pl
|
4 |
-
from pytorch_lightning.utilities.types import (
|
5 |
-
EVAL_DATALOADERS,
|
6 |
-
TRAIN_DATALOADERS,
|
7 |
-
)
|
8 |
-
from torch import nn
|
9 |
-
from torch.utils.data import DataLoader
|
10 |
-
import torch
|
11 |
-
from torchvision import transforms
|
12 |
-
|
13 |
-
from src.dataset import DATASET_REGISTRY
|
14 |
-
|
15 |
-
|
16 |
-
class AbstractModel(pl.LightningModule):
|
17 |
-
def __init__(self, cfg):
|
18 |
-
super().__init__()
|
19 |
-
self.cfg = cfg
|
20 |
-
self.train_dataset = None
|
21 |
-
self.val_dataset = None
|
22 |
-
self.metric_evaluator = None
|
23 |
-
self.init_model()
|
24 |
-
|
25 |
-
def setup(self, stage):
|
26 |
-
if stage in ["fit", "validate", "test"]:
|
27 |
-
self.train_dataset = DATASET_REGISTRY.get("BlenderDataset")(
|
28 |
-
**self.cfg["dataset"]["train"]["params"],
|
29 |
-
)
|
30 |
-
|
31 |
-
self.val_dataset = DATASET_REGISTRY.get("BlenderDataset")(
|
32 |
-
**self.cfg["dataset"]["val"]["params"],
|
33 |
-
)
|
34 |
-
# self.metric_evaluator = SHRECMetricEvaluator(
|
35 |
-
# embed_dim=self.cfg["model"]["embed_dim"]
|
36 |
-
# )
|
37 |
-
@abc.abstractmethod
|
38 |
-
def init_model(self):
|
39 |
-
"""
|
40 |
-
Function to initialize model
|
41 |
-
"""
|
42 |
-
raise NotImplementedError
|
43 |
-
|
44 |
-
@abc.abstractmethod
|
45 |
-
def forward(self, batch):
|
46 |
-
raise NotImplementedError
|
47 |
-
|
48 |
-
@abc.abstractmethod
|
49 |
-
def compute_loss(self, forwarded_batch, input_batch):
|
50 |
-
"""
|
51 |
-
Function to compute loss
|
52 |
-
Args:
|
53 |
-
forwarded_batch: output of `forward` method
|
54 |
-
input_batch: input of batch method
|
55 |
-
|
56 |
-
Returns:
|
57 |
-
loss: computed loss
|
58 |
-
"""
|
59 |
-
raise NotImplementedError
|
60 |
-
|
61 |
-
def training_step(self, batch, batch_idx):
|
62 |
-
# 1. get embeddings from model
|
63 |
-
forwarded_batch = self.forward(batch)
|
64 |
-
# 2. Calculate loss
|
65 |
-
loss = self.compute_loss(forwarded_batch=forwarded_batch, input_batch=batch)
|
66 |
-
# 3. Update monitor
|
67 |
-
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True)
|
68 |
-
return {"loss": loss}
|
69 |
-
|
70 |
-
def validation_step(self, batch, batch_idx):
|
71 |
-
# 1. Get embeddings from model
|
72 |
-
forwarded_batch = self.forward(batch)
|
73 |
-
# 2. Calculate loss
|
74 |
-
loss = self.compute_loss(forwarded_batch=forwarded_batch, input_batch=batch)
|
75 |
-
# 3. Update metric for each batch
|
76 |
-
self.log("val_loss", loss, on_step=True, on_epoch=True, prog_bar=True)
|
77 |
-
self.metric_evaluator.append(
|
78 |
-
g_emb=forwarded_batch["pc_embedding_feats"].float().clone().detach(),
|
79 |
-
q_emb=forwarded_batch["query_embedding_feats"].float().clone().detach(),
|
80 |
-
query_ids=batch["query_ids"],
|
81 |
-
gallery_ids=batch["point_cloud_ids"],
|
82 |
-
target_ids=batch["point_cloud_ids"],
|
83 |
-
)
|
84 |
-
|
85 |
-
return {"loss": loss}
|
86 |
-
|
87 |
-
def validation_epoch_end(self, outputs) -> None:
|
88 |
-
"""
|
89 |
-
Callback at validation epoch end to do additional works
|
90 |
-
with output of validation step, note that this is called
|
91 |
-
before `training_epoch_end()`
|
92 |
-
Args:
|
93 |
-
outputs: output of validation step
|
94 |
-
"""
|
95 |
-
self.log_dict(
|
96 |
-
self.metric_evaluator.evaluate(),
|
97 |
-
prog_bar=True,
|
98 |
-
on_step=False,
|
99 |
-
on_epoch=True,
|
100 |
-
)
|
101 |
-
self.metric_evaluator.reset()
|
102 |
-
|
103 |
-
def train_dataloader(self) -> TRAIN_DATALOADERS:
|
104 |
-
train_loader = DataLoader(
|
105 |
-
dataset=self.train_dataset,
|
106 |
-
collate_fn=self.train_dataset.collate_fn,
|
107 |
-
**self.cfg["data_loader"]["train"]["params"],
|
108 |
-
)
|
109 |
-
return train_loader
|
110 |
-
|
111 |
-
def val_dataloader(self) -> EVAL_DATALOADERS:
|
112 |
-
val_loader = DataLoader(
|
113 |
-
dataset=self.val_dataset,
|
114 |
-
collate_fn=self.val_dataset.collate_fn,
|
115 |
-
**self.cfg["data_loader"]["val"]["params"],
|
116 |
-
)
|
117 |
-
return val_loader
|
118 |
-
|
119 |
-
def configure_optimizers(self):
|
120 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04
|
2 |
-
LABEL maintainer="Hugging Face"
|
3 |
-
LABEL repository="diffusers"
|
4 |
-
|
5 |
-
ENV DEBIAN_FRONTEND=noninteractive
|
6 |
-
|
7 |
-
RUN apt update && \
|
8 |
-
apt install -y bash \
|
9 |
-
build-essential \
|
10 |
-
git \
|
11 |
-
git-lfs \
|
12 |
-
curl \
|
13 |
-
ca-certificates \
|
14 |
-
libsndfile1-dev \
|
15 |
-
libgl1 \
|
16 |
-
python3.8 \
|
17 |
-
python3-pip \
|
18 |
-
python3.8-venv && \
|
19 |
-
rm -rf /var/lib/apt/lists
|
20 |
-
|
21 |
-
# make sure to use venv
|
22 |
-
RUN python3 -m venv /opt/venv
|
23 |
-
ENV PATH="/opt/venv/bin:$PATH"
|
24 |
-
|
25 |
-
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
|
26 |
-
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
27 |
-
python3 -m pip install --no-cache-dir \
|
28 |
-
torch \
|
29 |
-
torchvision \
|
30 |
-
torchaudio \
|
31 |
-
invisible_watermark && \
|
32 |
-
python3 -m pip install --no-cache-dir \
|
33 |
-
accelerate \
|
34 |
-
datasets \
|
35 |
-
hf-doc-builder \
|
36 |
-
huggingface-hub \
|
37 |
-
Jinja2 \
|
38 |
-
librosa \
|
39 |
-
numpy \
|
40 |
-
scipy \
|
41 |
-
tensorboard \
|
42 |
-
transformers \
|
43 |
-
omegaconf \
|
44 |
-
pytorch-lightning \
|
45 |
-
xformers
|
46 |
-
|
47 |
-
CMD ["/bin/bash"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py
DELETED
@@ -1,1645 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
""" Conversion script for the Stable Diffusion checkpoints."""
|
16 |
-
|
17 |
-
import re
|
18 |
-
from contextlib import nullcontext
|
19 |
-
from io import BytesIO
|
20 |
-
from typing import Optional
|
21 |
-
|
22 |
-
import requests
|
23 |
-
import torch
|
24 |
-
from transformers import (
|
25 |
-
AutoFeatureExtractor,
|
26 |
-
BertTokenizerFast,
|
27 |
-
CLIPImageProcessor,
|
28 |
-
CLIPTextConfig,
|
29 |
-
CLIPTextModel,
|
30 |
-
CLIPTextModelWithProjection,
|
31 |
-
CLIPTokenizer,
|
32 |
-
CLIPVisionConfig,
|
33 |
-
CLIPVisionModelWithProjection,
|
34 |
-
)
|
35 |
-
|
36 |
-
from ...models import (
|
37 |
-
AutoencoderKL,
|
38 |
-
ControlNetModel,
|
39 |
-
PriorTransformer,
|
40 |
-
UNet2DConditionModel,
|
41 |
-
)
|
42 |
-
from ...schedulers import (
|
43 |
-
DDIMScheduler,
|
44 |
-
DDPMScheduler,
|
45 |
-
DPMSolverMultistepScheduler,
|
46 |
-
EulerAncestralDiscreteScheduler,
|
47 |
-
EulerDiscreteScheduler,
|
48 |
-
HeunDiscreteScheduler,
|
49 |
-
LMSDiscreteScheduler,
|
50 |
-
PNDMScheduler,
|
51 |
-
UnCLIPScheduler,
|
52 |
-
)
|
53 |
-
from ...utils import is_accelerate_available, is_omegaconf_available, is_safetensors_available, logging
|
54 |
-
from ...utils.import_utils import BACKENDS_MAPPING
|
55 |
-
from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
|
56 |
-
from ..paint_by_example import PaintByExampleImageEncoder
|
57 |
-
from ..pipeline_utils import DiffusionPipeline
|
58 |
-
from .safety_checker import StableDiffusionSafetyChecker
|
59 |
-
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
|
60 |
-
|
61 |
-
|
62 |
-
if is_accelerate_available():
|
63 |
-
from accelerate import init_empty_weights
|
64 |
-
from accelerate.utils import set_module_tensor_to_device
|
65 |
-
|
66 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
67 |
-
|
68 |
-
|
69 |
-
def shave_segments(path, n_shave_prefix_segments=1):
|
70 |
-
"""
|
71 |
-
Removes segments. Positive values shave the first segments, negative shave the last segments.
|
72 |
-
"""
|
73 |
-
if n_shave_prefix_segments >= 0:
|
74 |
-
return ".".join(path.split(".")[n_shave_prefix_segments:])
|
75 |
-
else:
|
76 |
-
return ".".join(path.split(".")[:n_shave_prefix_segments])
|
77 |
-
|
78 |
-
|
79 |
-
def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
|
80 |
-
"""
|
81 |
-
Updates paths inside resnets to the new naming scheme (local renaming)
|
82 |
-
"""
|
83 |
-
mapping = []
|
84 |
-
for old_item in old_list:
|
85 |
-
new_item = old_item.replace("in_layers.0", "norm1")
|
86 |
-
new_item = new_item.replace("in_layers.2", "conv1")
|
87 |
-
|
88 |
-
new_item = new_item.replace("out_layers.0", "norm2")
|
89 |
-
new_item = new_item.replace("out_layers.3", "conv2")
|
90 |
-
|
91 |
-
new_item = new_item.replace("emb_layers.1", "time_emb_proj")
|
92 |
-
new_item = new_item.replace("skip_connection", "conv_shortcut")
|
93 |
-
|
94 |
-
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
95 |
-
|
96 |
-
mapping.append({"old": old_item, "new": new_item})
|
97 |
-
|
98 |
-
return mapping
|
99 |
-
|
100 |
-
|
101 |
-
def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
|
102 |
-
"""
|
103 |
-
Updates paths inside resnets to the new naming scheme (local renaming)
|
104 |
-
"""
|
105 |
-
mapping = []
|
106 |
-
for old_item in old_list:
|
107 |
-
new_item = old_item
|
108 |
-
|
109 |
-
new_item = new_item.replace("nin_shortcut", "conv_shortcut")
|
110 |
-
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
111 |
-
|
112 |
-
mapping.append({"old": old_item, "new": new_item})
|
113 |
-
|
114 |
-
return mapping
|
115 |
-
|
116 |
-
|
117 |
-
def renew_attention_paths(old_list, n_shave_prefix_segments=0):
|
118 |
-
"""
|
119 |
-
Updates paths inside attentions to the new naming scheme (local renaming)
|
120 |
-
"""
|
121 |
-
mapping = []
|
122 |
-
for old_item in old_list:
|
123 |
-
new_item = old_item
|
124 |
-
|
125 |
-
# new_item = new_item.replace('norm.weight', 'group_norm.weight')
|
126 |
-
# new_item = new_item.replace('norm.bias', 'group_norm.bias')
|
127 |
-
|
128 |
-
# new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
|
129 |
-
# new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
|
130 |
-
|
131 |
-
# new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
132 |
-
|
133 |
-
mapping.append({"old": old_item, "new": new_item})
|
134 |
-
|
135 |
-
return mapping
|
136 |
-
|
137 |
-
|
138 |
-
def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
|
139 |
-
"""
|
140 |
-
Updates paths inside attentions to the new naming scheme (local renaming)
|
141 |
-
"""
|
142 |
-
mapping = []
|
143 |
-
for old_item in old_list:
|
144 |
-
new_item = old_item
|
145 |
-
|
146 |
-
new_item = new_item.replace("norm.weight", "group_norm.weight")
|
147 |
-
new_item = new_item.replace("norm.bias", "group_norm.bias")
|
148 |
-
|
149 |
-
new_item = new_item.replace("q.weight", "to_q.weight")
|
150 |
-
new_item = new_item.replace("q.bias", "to_q.bias")
|
151 |
-
|
152 |
-
new_item = new_item.replace("k.weight", "to_k.weight")
|
153 |
-
new_item = new_item.replace("k.bias", "to_k.bias")
|
154 |
-
|
155 |
-
new_item = new_item.replace("v.weight", "to_v.weight")
|
156 |
-
new_item = new_item.replace("v.bias", "to_v.bias")
|
157 |
-
|
158 |
-
new_item = new_item.replace("proj_out.weight", "to_out.0.weight")
|
159 |
-
new_item = new_item.replace("proj_out.bias", "to_out.0.bias")
|
160 |
-
|
161 |
-
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
162 |
-
|
163 |
-
mapping.append({"old": old_item, "new": new_item})
|
164 |
-
|
165 |
-
return mapping
|
166 |
-
|
167 |
-
|
168 |
-
def assign_to_checkpoint(
|
169 |
-
paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
|
170 |
-
):
|
171 |
-
"""
|
172 |
-
This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
|
173 |
-
attention layers, and takes into account additional replacements that may arise.
|
174 |
-
|
175 |
-
Assigns the weights to the new checkpoint.
|
176 |
-
"""
|
177 |
-
assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
|
178 |
-
|
179 |
-
# Splits the attention layers into three variables.
|
180 |
-
if attention_paths_to_split is not None:
|
181 |
-
for path, path_map in attention_paths_to_split.items():
|
182 |
-
old_tensor = old_checkpoint[path]
|
183 |
-
channels = old_tensor.shape[0] // 3
|
184 |
-
|
185 |
-
target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
|
186 |
-
|
187 |
-
num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
|
188 |
-
|
189 |
-
old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
|
190 |
-
query, key, value = old_tensor.split(channels // num_heads, dim=1)
|
191 |
-
|
192 |
-
checkpoint[path_map["query"]] = query.reshape(target_shape)
|
193 |
-
checkpoint[path_map["key"]] = key.reshape(target_shape)
|
194 |
-
checkpoint[path_map["value"]] = value.reshape(target_shape)
|
195 |
-
|
196 |
-
for path in paths:
|
197 |
-
new_path = path["new"]
|
198 |
-
|
199 |
-
# These have already been assigned
|
200 |
-
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
|
201 |
-
continue
|
202 |
-
|
203 |
-
# Global renaming happens here
|
204 |
-
new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
|
205 |
-
new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
|
206 |
-
new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
|
207 |
-
|
208 |
-
if additional_replacements is not None:
|
209 |
-
for replacement in additional_replacements:
|
210 |
-
new_path = new_path.replace(replacement["old"], replacement["new"])
|
211 |
-
|
212 |
-
# proj_attn.weight has to be converted from conv 1D to linear
|
213 |
-
is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path)
|
214 |
-
shape = old_checkpoint[path["old"]].shape
|
215 |
-
if is_attn_weight and len(shape) == 3:
|
216 |
-
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
|
217 |
-
elif is_attn_weight and len(shape) == 4:
|
218 |
-
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0]
|
219 |
-
else:
|
220 |
-
checkpoint[new_path] = old_checkpoint[path["old"]]
|
221 |
-
|
222 |
-
|
223 |
-
def conv_attn_to_linear(checkpoint):
|
224 |
-
keys = list(checkpoint.keys())
|
225 |
-
attn_keys = ["query.weight", "key.weight", "value.weight"]
|
226 |
-
for key in keys:
|
227 |
-
if ".".join(key.split(".")[-2:]) in attn_keys:
|
228 |
-
if checkpoint[key].ndim > 2:
|
229 |
-
checkpoint[key] = checkpoint[key][:, :, 0, 0]
|
230 |
-
elif "proj_attn.weight" in key:
|
231 |
-
if checkpoint[key].ndim > 2:
|
232 |
-
checkpoint[key] = checkpoint[key][:, :, 0]
|
233 |
-
|
234 |
-
|
235 |
-
def create_unet_diffusers_config(original_config, image_size: int, controlnet=False):
|
236 |
-
"""
|
237 |
-
Creates a config for the diffusers based on the config of the LDM model.
|
238 |
-
"""
|
239 |
-
if controlnet:
|
240 |
-
unet_params = original_config.model.params.control_stage_config.params
|
241 |
-
else:
|
242 |
-
if "unet_config" in original_config.model.params and original_config.model.params.unet_config is not None:
|
243 |
-
unet_params = original_config.model.params.unet_config.params
|
244 |
-
else:
|
245 |
-
unet_params = original_config.model.params.network_config.params
|
246 |
-
|
247 |
-
vae_params = original_config.model.params.first_stage_config.params.ddconfig
|
248 |
-
|
249 |
-
block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult]
|
250 |
-
|
251 |
-
down_block_types = []
|
252 |
-
resolution = 1
|
253 |
-
for i in range(len(block_out_channels)):
|
254 |
-
block_type = "CrossAttnDownBlock2D" if resolution in unet_params.attention_resolutions else "DownBlock2D"
|
255 |
-
down_block_types.append(block_type)
|
256 |
-
if i != len(block_out_channels) - 1:
|
257 |
-
resolution *= 2
|
258 |
-
|
259 |
-
up_block_types = []
|
260 |
-
for i in range(len(block_out_channels)):
|
261 |
-
block_type = "CrossAttnUpBlock2D" if resolution in unet_params.attention_resolutions else "UpBlock2D"
|
262 |
-
up_block_types.append(block_type)
|
263 |
-
resolution //= 2
|
264 |
-
|
265 |
-
if unet_params.transformer_depth is not None:
|
266 |
-
transformer_layers_per_block = (
|
267 |
-
unet_params.transformer_depth
|
268 |
-
if isinstance(unet_params.transformer_depth, int)
|
269 |
-
else list(unet_params.transformer_depth)
|
270 |
-
)
|
271 |
-
else:
|
272 |
-
transformer_layers_per_block = 1
|
273 |
-
|
274 |
-
vae_scale_factor = 2 ** (len(vae_params.ch_mult) - 1)
|
275 |
-
|
276 |
-
head_dim = unet_params.num_heads if "num_heads" in unet_params else None
|
277 |
-
use_linear_projection = (
|
278 |
-
unet_params.use_linear_in_transformer if "use_linear_in_transformer" in unet_params else False
|
279 |
-
)
|
280 |
-
if use_linear_projection:
|
281 |
-
# stable diffusion 2-base-512 and 2-768
|
282 |
-
if head_dim is None:
|
283 |
-
head_dim_mult = unet_params.model_channels // unet_params.num_head_channels
|
284 |
-
head_dim = [head_dim_mult * c for c in list(unet_params.channel_mult)]
|
285 |
-
|
286 |
-
class_embed_type = None
|
287 |
-
addition_embed_type = None
|
288 |
-
addition_time_embed_dim = None
|
289 |
-
projection_class_embeddings_input_dim = None
|
290 |
-
context_dim = None
|
291 |
-
|
292 |
-
if unet_params.context_dim is not None:
|
293 |
-
context_dim = (
|
294 |
-
unet_params.context_dim if isinstance(unet_params.context_dim, int) else unet_params.context_dim[0]
|
295 |
-
)
|
296 |
-
|
297 |
-
if "num_classes" in unet_params:
|
298 |
-
if unet_params.num_classes == "sequential":
|
299 |
-
if context_dim in [2048, 1280]:
|
300 |
-
# SDXL
|
301 |
-
addition_embed_type = "text_time"
|
302 |
-
addition_time_embed_dim = 256
|
303 |
-
else:
|
304 |
-
class_embed_type = "projection"
|
305 |
-
assert "adm_in_channels" in unet_params
|
306 |
-
projection_class_embeddings_input_dim = unet_params.adm_in_channels
|
307 |
-
else:
|
308 |
-
raise NotImplementedError(f"Unknown conditional unet num_classes config: {unet_params.num_classes}")
|
309 |
-
|
310 |
-
config = {
|
311 |
-
"sample_size": image_size // vae_scale_factor,
|
312 |
-
"in_channels": unet_params.in_channels,
|
313 |
-
"down_block_types": tuple(down_block_types),
|
314 |
-
"block_out_channels": tuple(block_out_channels),
|
315 |
-
"layers_per_block": unet_params.num_res_blocks,
|
316 |
-
"cross_attention_dim": context_dim,
|
317 |
-
"attention_head_dim": head_dim,
|
318 |
-
"use_linear_projection": use_linear_projection,
|
319 |
-
"class_embed_type": class_embed_type,
|
320 |
-
"addition_embed_type": addition_embed_type,
|
321 |
-
"addition_time_embed_dim": addition_time_embed_dim,
|
322 |
-
"projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
|
323 |
-
"transformer_layers_per_block": transformer_layers_per_block,
|
324 |
-
}
|
325 |
-
|
326 |
-
if controlnet:
|
327 |
-
config["conditioning_channels"] = unet_params.hint_channels
|
328 |
-
else:
|
329 |
-
config["out_channels"] = unet_params.out_channels
|
330 |
-
config["up_block_types"] = tuple(up_block_types)
|
331 |
-
|
332 |
-
return config
|
333 |
-
|
334 |
-
|
335 |
-
def create_vae_diffusers_config(original_config, image_size: int):
|
336 |
-
"""
|
337 |
-
Creates a config for the diffusers based on the config of the LDM model.
|
338 |
-
"""
|
339 |
-
vae_params = original_config.model.params.first_stage_config.params.ddconfig
|
340 |
-
_ = original_config.model.params.first_stage_config.params.embed_dim
|
341 |
-
|
342 |
-
block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult]
|
343 |
-
down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
|
344 |
-
up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
|
345 |
-
|
346 |
-
config = {
|
347 |
-
"sample_size": image_size,
|
348 |
-
"in_channels": vae_params.in_channels,
|
349 |
-
"out_channels": vae_params.out_ch,
|
350 |
-
"down_block_types": tuple(down_block_types),
|
351 |
-
"up_block_types": tuple(up_block_types),
|
352 |
-
"block_out_channels": tuple(block_out_channels),
|
353 |
-
"latent_channels": vae_params.z_channels,
|
354 |
-
"layers_per_block": vae_params.num_res_blocks,
|
355 |
-
}
|
356 |
-
return config
|
357 |
-
|
358 |
-
|
359 |
-
def create_diffusers_schedular(original_config):
|
360 |
-
schedular = DDIMScheduler(
|
361 |
-
num_train_timesteps=original_config.model.params.timesteps,
|
362 |
-
beta_start=original_config.model.params.linear_start,
|
363 |
-
beta_end=original_config.model.params.linear_end,
|
364 |
-
beta_schedule="scaled_linear",
|
365 |
-
)
|
366 |
-
return schedular
|
367 |
-
|
368 |
-
|
369 |
-
def create_ldm_bert_config(original_config):
|
370 |
-
bert_params = original_config.model.parms.cond_stage_config.params
|
371 |
-
config = LDMBertConfig(
|
372 |
-
d_model=bert_params.n_embed,
|
373 |
-
encoder_layers=bert_params.n_layer,
|
374 |
-
encoder_ffn_dim=bert_params.n_embed * 4,
|
375 |
-
)
|
376 |
-
return config
|
377 |
-
|
378 |
-
|
379 |
-
def convert_ldm_unet_checkpoint(
|
380 |
-
checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False
|
381 |
-
):
|
382 |
-
"""
|
383 |
-
Takes a state dict and a config, and returns a converted checkpoint.
|
384 |
-
"""
|
385 |
-
|
386 |
-
if skip_extract_state_dict:
|
387 |
-
unet_state_dict = checkpoint
|
388 |
-
else:
|
389 |
-
# extract state_dict for UNet
|
390 |
-
unet_state_dict = {}
|
391 |
-
keys = list(checkpoint.keys())
|
392 |
-
|
393 |
-
if controlnet:
|
394 |
-
unet_key = "control_model."
|
395 |
-
else:
|
396 |
-
unet_key = "model.diffusion_model."
|
397 |
-
|
398 |
-
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
|
399 |
-
if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema:
|
400 |
-
logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.")
|
401 |
-
logger.warning(
|
402 |
-
"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
|
403 |
-
" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
|
404 |
-
)
|
405 |
-
for key in keys:
|
406 |
-
if key.startswith("model.diffusion_model"):
|
407 |
-
flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
|
408 |
-
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
|
409 |
-
else:
|
410 |
-
if sum(k.startswith("model_ema") for k in keys) > 100:
|
411 |
-
logger.warning(
|
412 |
-
"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
|
413 |
-
" weights (usually better for inference), please make sure to add the `--extract_ema` flag."
|
414 |
-
)
|
415 |
-
|
416 |
-
for key in keys:
|
417 |
-
if key.startswith(unet_key):
|
418 |
-
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
|
419 |
-
|
420 |
-
new_checkpoint = {}
|
421 |
-
|
422 |
-
new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
|
423 |
-
new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
|
424 |
-
new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
|
425 |
-
new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
|
426 |
-
|
427 |
-
if config["class_embed_type"] is None:
|
428 |
-
# No parameters to port
|
429 |
-
...
|
430 |
-
elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
|
431 |
-
new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
|
432 |
-
new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
|
433 |
-
new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
|
434 |
-
new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
|
435 |
-
else:
|
436 |
-
raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
|
437 |
-
|
438 |
-
if config["addition_embed_type"] == "text_time":
|
439 |
-
new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
|
440 |
-
new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
|
441 |
-
new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
|
442 |
-
new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
|
443 |
-
|
444 |
-
new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
|
445 |
-
new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
|
446 |
-
|
447 |
-
if not controlnet:
|
448 |
-
new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
|
449 |
-
new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
|
450 |
-
new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
|
451 |
-
new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
|
452 |
-
|
453 |
-
# Retrieves the keys for the input blocks only
|
454 |
-
num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
|
455 |
-
input_blocks = {
|
456 |
-
layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
|
457 |
-
for layer_id in range(num_input_blocks)
|
458 |
-
}
|
459 |
-
|
460 |
-
# Retrieves the keys for the middle blocks only
|
461 |
-
num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
|
462 |
-
middle_blocks = {
|
463 |
-
layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
|
464 |
-
for layer_id in range(num_middle_blocks)
|
465 |
-
}
|
466 |
-
|
467 |
-
# Retrieves the keys for the output blocks only
|
468 |
-
num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
|
469 |
-
output_blocks = {
|
470 |
-
layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
|
471 |
-
for layer_id in range(num_output_blocks)
|
472 |
-
}
|
473 |
-
|
474 |
-
for i in range(1, num_input_blocks):
|
475 |
-
block_id = (i - 1) // (config["layers_per_block"] + 1)
|
476 |
-
layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
|
477 |
-
|
478 |
-
resnets = [
|
479 |
-
key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
|
480 |
-
]
|
481 |
-
attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
|
482 |
-
|
483 |
-
if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
|
484 |
-
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
|
485 |
-
f"input_blocks.{i}.0.op.weight"
|
486 |
-
)
|
487 |
-
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
|
488 |
-
f"input_blocks.{i}.0.op.bias"
|
489 |
-
)
|
490 |
-
|
491 |
-
paths = renew_resnet_paths(resnets)
|
492 |
-
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
493 |
-
assign_to_checkpoint(
|
494 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
495 |
-
)
|
496 |
-
|
497 |
-
if len(attentions):
|
498 |
-
paths = renew_attention_paths(attentions)
|
499 |
-
meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
|
500 |
-
assign_to_checkpoint(
|
501 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
502 |
-
)
|
503 |
-
|
504 |
-
resnet_0 = middle_blocks[0]
|
505 |
-
attentions = middle_blocks[1]
|
506 |
-
resnet_1 = middle_blocks[2]
|
507 |
-
|
508 |
-
resnet_0_paths = renew_resnet_paths(resnet_0)
|
509 |
-
assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
|
510 |
-
|
511 |
-
resnet_1_paths = renew_resnet_paths(resnet_1)
|
512 |
-
assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
|
513 |
-
|
514 |
-
attentions_paths = renew_attention_paths(attentions)
|
515 |
-
meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
|
516 |
-
assign_to_checkpoint(
|
517 |
-
attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
518 |
-
)
|
519 |
-
|
520 |
-
for i in range(num_output_blocks):
|
521 |
-
block_id = i // (config["layers_per_block"] + 1)
|
522 |
-
layer_in_block_id = i % (config["layers_per_block"] + 1)
|
523 |
-
output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
|
524 |
-
output_block_list = {}
|
525 |
-
|
526 |
-
for layer in output_block_layers:
|
527 |
-
layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
|
528 |
-
if layer_id in output_block_list:
|
529 |
-
output_block_list[layer_id].append(layer_name)
|
530 |
-
else:
|
531 |
-
output_block_list[layer_id] = [layer_name]
|
532 |
-
|
533 |
-
if len(output_block_list) > 1:
|
534 |
-
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
|
535 |
-
attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
|
536 |
-
|
537 |
-
resnet_0_paths = renew_resnet_paths(resnets)
|
538 |
-
paths = renew_resnet_paths(resnets)
|
539 |
-
|
540 |
-
meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
541 |
-
assign_to_checkpoint(
|
542 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
543 |
-
)
|
544 |
-
|
545 |
-
output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
|
546 |
-
if ["conv.bias", "conv.weight"] in output_block_list.values():
|
547 |
-
index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
|
548 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
|
549 |
-
f"output_blocks.{i}.{index}.conv.weight"
|
550 |
-
]
|
551 |
-
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
|
552 |
-
f"output_blocks.{i}.{index}.conv.bias"
|
553 |
-
]
|
554 |
-
|
555 |
-
# Clear attentions as they have been attributed above.
|
556 |
-
if len(attentions) == 2:
|
557 |
-
attentions = []
|
558 |
-
|
559 |
-
if len(attentions):
|
560 |
-
paths = renew_attention_paths(attentions)
|
561 |
-
meta_path = {
|
562 |
-
"old": f"output_blocks.{i}.1",
|
563 |
-
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
|
564 |
-
}
|
565 |
-
assign_to_checkpoint(
|
566 |
-
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
567 |
-
)
|
568 |
-
else:
|
569 |
-
resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
|
570 |
-
for path in resnet_0_paths:
|
571 |
-
old_path = ".".join(["output_blocks", str(i), path["old"]])
|
572 |
-
new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
|
573 |
-
|
574 |
-
new_checkpoint[new_path] = unet_state_dict[old_path]
|
575 |
-
|
576 |
-
if controlnet:
|
577 |
-
# conditioning embedding
|
578 |
-
|
579 |
-
orig_index = 0
|
580 |
-
|
581 |
-
new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop(
|
582 |
-
f"input_hint_block.{orig_index}.weight"
|
583 |
-
)
|
584 |
-
new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop(
|
585 |
-
f"input_hint_block.{orig_index}.bias"
|
586 |
-
)
|
587 |
-
|
588 |
-
orig_index += 2
|
589 |
-
|
590 |
-
diffusers_index = 0
|
591 |
-
|
592 |
-
while diffusers_index < 6:
|
593 |
-
new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop(
|
594 |
-
f"input_hint_block.{orig_index}.weight"
|
595 |
-
)
|
596 |
-
new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop(
|
597 |
-
f"input_hint_block.{orig_index}.bias"
|
598 |
-
)
|
599 |
-
diffusers_index += 1
|
600 |
-
orig_index += 2
|
601 |
-
|
602 |
-
new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop(
|
603 |
-
f"input_hint_block.{orig_index}.weight"
|
604 |
-
)
|
605 |
-
new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop(
|
606 |
-
f"input_hint_block.{orig_index}.bias"
|
607 |
-
)
|
608 |
-
|
609 |
-
# down blocks
|
610 |
-
for i in range(num_input_blocks):
|
611 |
-
new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight")
|
612 |
-
new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias")
|
613 |
-
|
614 |
-
# mid block
|
615 |
-
new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight")
|
616 |
-
new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias")
|
617 |
-
|
618 |
-
return new_checkpoint
|
619 |
-
|
620 |
-
|
621 |
-
def convert_ldm_vae_checkpoint(checkpoint, config):
|
622 |
-
# extract state dict for VAE
|
623 |
-
vae_state_dict = {}
|
624 |
-
keys = list(checkpoint.keys())
|
625 |
-
vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else ""
|
626 |
-
for key in keys:
|
627 |
-
if key.startswith(vae_key):
|
628 |
-
vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
|
629 |
-
|
630 |
-
new_checkpoint = {}
|
631 |
-
|
632 |
-
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
|
633 |
-
new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
|
634 |
-
new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
|
635 |
-
new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
|
636 |
-
new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
|
637 |
-
new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
|
638 |
-
|
639 |
-
new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
|
640 |
-
new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
|
641 |
-
new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
|
642 |
-
new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
|
643 |
-
new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
|
644 |
-
new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
|
645 |
-
|
646 |
-
new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
|
647 |
-
new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
|
648 |
-
new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
|
649 |
-
new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
|
650 |
-
|
651 |
-
# Retrieves the keys for the encoder down blocks only
|
652 |
-
num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
|
653 |
-
down_blocks = {
|
654 |
-
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
|
655 |
-
}
|
656 |
-
|
657 |
-
# Retrieves the keys for the decoder up blocks only
|
658 |
-
num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
|
659 |
-
up_blocks = {
|
660 |
-
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
|
661 |
-
}
|
662 |
-
|
663 |
-
for i in range(num_down_blocks):
|
664 |
-
resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
|
665 |
-
|
666 |
-
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
|
667 |
-
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
|
668 |
-
f"encoder.down.{i}.downsample.conv.weight"
|
669 |
-
)
|
670 |
-
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
|
671 |
-
f"encoder.down.{i}.downsample.conv.bias"
|
672 |
-
)
|
673 |
-
|
674 |
-
paths = renew_vae_resnet_paths(resnets)
|
675 |
-
meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
|
676 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
677 |
-
|
678 |
-
mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
|
679 |
-
num_mid_res_blocks = 2
|
680 |
-
for i in range(1, num_mid_res_blocks + 1):
|
681 |
-
resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
|
682 |
-
|
683 |
-
paths = renew_vae_resnet_paths(resnets)
|
684 |
-
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
685 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
686 |
-
|
687 |
-
mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
|
688 |
-
paths = renew_vae_attention_paths(mid_attentions)
|
689 |
-
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
690 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
691 |
-
conv_attn_to_linear(new_checkpoint)
|
692 |
-
|
693 |
-
for i in range(num_up_blocks):
|
694 |
-
block_id = num_up_blocks - 1 - i
|
695 |
-
resnets = [
|
696 |
-
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
|
697 |
-
]
|
698 |
-
|
699 |
-
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
|
700 |
-
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
|
701 |
-
f"decoder.up.{block_id}.upsample.conv.weight"
|
702 |
-
]
|
703 |
-
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
|
704 |
-
f"decoder.up.{block_id}.upsample.conv.bias"
|
705 |
-
]
|
706 |
-
|
707 |
-
paths = renew_vae_resnet_paths(resnets)
|
708 |
-
meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
|
709 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
710 |
-
|
711 |
-
mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
|
712 |
-
num_mid_res_blocks = 2
|
713 |
-
for i in range(1, num_mid_res_blocks + 1):
|
714 |
-
resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
|
715 |
-
|
716 |
-
paths = renew_vae_resnet_paths(resnets)
|
717 |
-
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
718 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
719 |
-
|
720 |
-
mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
|
721 |
-
paths = renew_vae_attention_paths(mid_attentions)
|
722 |
-
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
723 |
-
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
724 |
-
conv_attn_to_linear(new_checkpoint)
|
725 |
-
return new_checkpoint
|
726 |
-
|
727 |
-
|
728 |
-
def convert_ldm_bert_checkpoint(checkpoint, config):
|
729 |
-
def _copy_attn_layer(hf_attn_layer, pt_attn_layer):
|
730 |
-
hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight
|
731 |
-
hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight
|
732 |
-
hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight
|
733 |
-
|
734 |
-
hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight
|
735 |
-
hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias
|
736 |
-
|
737 |
-
def _copy_linear(hf_linear, pt_linear):
|
738 |
-
hf_linear.weight = pt_linear.weight
|
739 |
-
hf_linear.bias = pt_linear.bias
|
740 |
-
|
741 |
-
def _copy_layer(hf_layer, pt_layer):
|
742 |
-
# copy layer norms
|
743 |
-
_copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0])
|
744 |
-
_copy_linear(hf_layer.final_layer_norm, pt_layer[1][0])
|
745 |
-
|
746 |
-
# copy attn
|
747 |
-
_copy_attn_layer(hf_layer.self_attn, pt_layer[0][1])
|
748 |
-
|
749 |
-
# copy MLP
|
750 |
-
pt_mlp = pt_layer[1][1]
|
751 |
-
_copy_linear(hf_layer.fc1, pt_mlp.net[0][0])
|
752 |
-
_copy_linear(hf_layer.fc2, pt_mlp.net[2])
|
753 |
-
|
754 |
-
def _copy_layers(hf_layers, pt_layers):
|
755 |
-
for i, hf_layer in enumerate(hf_layers):
|
756 |
-
if i != 0:
|
757 |
-
i += i
|
758 |
-
pt_layer = pt_layers[i : i + 2]
|
759 |
-
_copy_layer(hf_layer, pt_layer)
|
760 |
-
|
761 |
-
hf_model = LDMBertModel(config).eval()
|
762 |
-
|
763 |
-
# copy embeds
|
764 |
-
hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight
|
765 |
-
hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight
|
766 |
-
|
767 |
-
# copy layer norm
|
768 |
-
_copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm)
|
769 |
-
|
770 |
-
# copy hidden layers
|
771 |
-
_copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers)
|
772 |
-
|
773 |
-
_copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits)
|
774 |
-
|
775 |
-
return hf_model
|
776 |
-
|
777 |
-
|
778 |
-
def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None):
|
779 |
-
if text_encoder is None:
|
780 |
-
config_name = "openai/clip-vit-large-patch14"
|
781 |
-
config = CLIPTextConfig.from_pretrained(config_name)
|
782 |
-
|
783 |
-
ctx = init_empty_weights if is_accelerate_available() else nullcontext
|
784 |
-
with ctx():
|
785 |
-
text_model = CLIPTextModel(config)
|
786 |
-
|
787 |
-
keys = list(checkpoint.keys())
|
788 |
-
|
789 |
-
text_model_dict = {}
|
790 |
-
|
791 |
-
remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"]
|
792 |
-
|
793 |
-
for key in keys:
|
794 |
-
for prefix in remove_prefixes:
|
795 |
-
if key.startswith(prefix):
|
796 |
-
text_model_dict[key[len(prefix + ".") :]] = checkpoint[key]
|
797 |
-
|
798 |
-
if is_accelerate_available():
|
799 |
-
for param_name, param in text_model_dict.items():
|
800 |
-
set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
|
801 |
-
else:
|
802 |
-
if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
|
803 |
-
text_model_dict.pop("text_model.embeddings.position_ids", None)
|
804 |
-
|
805 |
-
text_model.load_state_dict(text_model_dict)
|
806 |
-
|
807 |
-
return text_model
|
808 |
-
|
809 |
-
|
810 |
-
textenc_conversion_lst = [
|
811 |
-
("positional_embedding", "text_model.embeddings.position_embedding.weight"),
|
812 |
-
("token_embedding.weight", "text_model.embeddings.token_embedding.weight"),
|
813 |
-
("ln_final.weight", "text_model.final_layer_norm.weight"),
|
814 |
-
("ln_final.bias", "text_model.final_layer_norm.bias"),
|
815 |
-
("text_projection", "text_projection.weight"),
|
816 |
-
]
|
817 |
-
textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst}
|
818 |
-
|
819 |
-
textenc_transformer_conversion_lst = [
|
820 |
-
# (stable-diffusion, HF Diffusers)
|
821 |
-
("resblocks.", "text_model.encoder.layers."),
|
822 |
-
("ln_1", "layer_norm1"),
|
823 |
-
("ln_2", "layer_norm2"),
|
824 |
-
(".c_fc.", ".fc1."),
|
825 |
-
(".c_proj.", ".fc2."),
|
826 |
-
(".attn", ".self_attn"),
|
827 |
-
("ln_final.", "transformer.text_model.final_layer_norm."),
|
828 |
-
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
|
829 |
-
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
|
830 |
-
]
|
831 |
-
protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst}
|
832 |
-
textenc_pattern = re.compile("|".join(protected.keys()))
|
833 |
-
|
834 |
-
|
835 |
-
def convert_paint_by_example_checkpoint(checkpoint):
|
836 |
-
config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14")
|
837 |
-
model = PaintByExampleImageEncoder(config)
|
838 |
-
|
839 |
-
keys = list(checkpoint.keys())
|
840 |
-
|
841 |
-
text_model_dict = {}
|
842 |
-
|
843 |
-
for key in keys:
|
844 |
-
if key.startswith("cond_stage_model.transformer"):
|
845 |
-
text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key]
|
846 |
-
|
847 |
-
# load clip vision
|
848 |
-
model.model.load_state_dict(text_model_dict)
|
849 |
-
|
850 |
-
# load mapper
|
851 |
-
keys_mapper = {
|
852 |
-
k[len("cond_stage_model.mapper.res") :]: v
|
853 |
-
for k, v in checkpoint.items()
|
854 |
-
if k.startswith("cond_stage_model.mapper")
|
855 |
-
}
|
856 |
-
|
857 |
-
MAPPING = {
|
858 |
-
"attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"],
|
859 |
-
"attn.c_proj": ["attn1.to_out.0"],
|
860 |
-
"ln_1": ["norm1"],
|
861 |
-
"ln_2": ["norm3"],
|
862 |
-
"mlp.c_fc": ["ff.net.0.proj"],
|
863 |
-
"mlp.c_proj": ["ff.net.2"],
|
864 |
-
}
|
865 |
-
|
866 |
-
mapped_weights = {}
|
867 |
-
for key, value in keys_mapper.items():
|
868 |
-
prefix = key[: len("blocks.i")]
|
869 |
-
suffix = key.split(prefix)[-1].split(".")[-1]
|
870 |
-
name = key.split(prefix)[-1].split(suffix)[0][1:-1]
|
871 |
-
mapped_names = MAPPING[name]
|
872 |
-
|
873 |
-
num_splits = len(mapped_names)
|
874 |
-
for i, mapped_name in enumerate(mapped_names):
|
875 |
-
new_name = ".".join([prefix, mapped_name, suffix])
|
876 |
-
shape = value.shape[0] // num_splits
|
877 |
-
mapped_weights[new_name] = value[i * shape : (i + 1) * shape]
|
878 |
-
|
879 |
-
model.mapper.load_state_dict(mapped_weights)
|
880 |
-
|
881 |
-
# load final layer norm
|
882 |
-
model.final_layer_norm.load_state_dict(
|
883 |
-
{
|
884 |
-
"bias": checkpoint["cond_stage_model.final_ln.bias"],
|
885 |
-
"weight": checkpoint["cond_stage_model.final_ln.weight"],
|
886 |
-
}
|
887 |
-
)
|
888 |
-
|
889 |
-
# load final proj
|
890 |
-
model.proj_out.load_state_dict(
|
891 |
-
{
|
892 |
-
"bias": checkpoint["proj_out.bias"],
|
893 |
-
"weight": checkpoint["proj_out.weight"],
|
894 |
-
}
|
895 |
-
)
|
896 |
-
|
897 |
-
# load uncond vector
|
898 |
-
model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"])
|
899 |
-
return model
|
900 |
-
|
901 |
-
|
902 |
-
def convert_open_clip_checkpoint(
|
903 |
-
checkpoint, config_name, prefix="cond_stage_model.model.", has_projection=False, **config_kwargs
|
904 |
-
):
|
905 |
-
# text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder")
|
906 |
-
# text_model = CLIPTextModelWithProjection.from_pretrained(
|
907 |
-
# "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280
|
908 |
-
# )
|
909 |
-
config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs)
|
910 |
-
|
911 |
-
ctx = init_empty_weights if is_accelerate_available() else nullcontext
|
912 |
-
with ctx():
|
913 |
-
text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config)
|
914 |
-
|
915 |
-
keys = list(checkpoint.keys())
|
916 |
-
|
917 |
-
keys_to_ignore = []
|
918 |
-
if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23:
|
919 |
-
# make sure to remove all keys > 22
|
920 |
-
keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")]
|
921 |
-
keys_to_ignore += ["cond_stage_model.model.text_projection"]
|
922 |
-
|
923 |
-
text_model_dict = {}
|
924 |
-
|
925 |
-
if prefix + "text_projection" in checkpoint:
|
926 |
-
d_model = int(checkpoint[prefix + "text_projection"].shape[0])
|
927 |
-
else:
|
928 |
-
d_model = 1024
|
929 |
-
|
930 |
-
text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids")
|
931 |
-
|
932 |
-
for key in keys:
|
933 |
-
if key in keys_to_ignore:
|
934 |
-
continue
|
935 |
-
if key[len(prefix) :] in textenc_conversion_map:
|
936 |
-
if key.endswith("text_projection"):
|
937 |
-
value = checkpoint[key].T.contiguous()
|
938 |
-
else:
|
939 |
-
value = checkpoint[key]
|
940 |
-
|
941 |
-
text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value
|
942 |
-
|
943 |
-
if key.startswith(prefix + "transformer."):
|
944 |
-
new_key = key[len(prefix + "transformer.") :]
|
945 |
-
if new_key.endswith(".in_proj_weight"):
|
946 |
-
new_key = new_key[: -len(".in_proj_weight")]
|
947 |
-
new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
|
948 |
-
text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :]
|
949 |
-
text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :]
|
950 |
-
text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :]
|
951 |
-
elif new_key.endswith(".in_proj_bias"):
|
952 |
-
new_key = new_key[: -len(".in_proj_bias")]
|
953 |
-
new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
|
954 |
-
text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model]
|
955 |
-
text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2]
|
956 |
-
text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :]
|
957 |
-
else:
|
958 |
-
new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
|
959 |
-
|
960 |
-
text_model_dict[new_key] = checkpoint[key]
|
961 |
-
|
962 |
-
if is_accelerate_available():
|
963 |
-
for param_name, param in text_model_dict.items():
|
964 |
-
set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
|
965 |
-
else:
|
966 |
-
if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
|
967 |
-
text_model_dict.pop("text_model.embeddings.position_ids", None)
|
968 |
-
|
969 |
-
text_model.load_state_dict(text_model_dict)
|
970 |
-
|
971 |
-
return text_model
|
972 |
-
|
973 |
-
|
974 |
-
def stable_unclip_image_encoder(original_config):
|
975 |
-
"""
|
976 |
-
Returns the image processor and clip image encoder for the img2img unclip pipeline.
|
977 |
-
|
978 |
-
We currently know of two types of stable unclip models which separately use the clip and the openclip image
|
979 |
-
encoders.
|
980 |
-
"""
|
981 |
-
|
982 |
-
image_embedder_config = original_config.model.params.embedder_config
|
983 |
-
|
984 |
-
sd_clip_image_embedder_class = image_embedder_config.target
|
985 |
-
sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1]
|
986 |
-
|
987 |
-
if sd_clip_image_embedder_class == "ClipImageEmbedder":
|
988 |
-
clip_model_name = image_embedder_config.params.model
|
989 |
-
|
990 |
-
if clip_model_name == "ViT-L/14":
|
991 |
-
feature_extractor = CLIPImageProcessor()
|
992 |
-
image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
|
993 |
-
else:
|
994 |
-
raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}")
|
995 |
-
|
996 |
-
elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder":
|
997 |
-
feature_extractor = CLIPImageProcessor()
|
998 |
-
image_encoder = CLIPVisionModelWithProjection.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")
|
999 |
-
else:
|
1000 |
-
raise NotImplementedError(
|
1001 |
-
f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}"
|
1002 |
-
)
|
1003 |
-
|
1004 |
-
return feature_extractor, image_encoder
|
1005 |
-
|
1006 |
-
|
1007 |
-
def stable_unclip_image_noising_components(
|
1008 |
-
original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None
|
1009 |
-
):
|
1010 |
-
"""
|
1011 |
-
Returns the noising components for the img2img and txt2img unclip pipelines.
|
1012 |
-
|
1013 |
-
Converts the stability noise augmentor into
|
1014 |
-
1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats
|
1015 |
-
2. a `DDPMScheduler` for holding the noise schedule
|
1016 |
-
|
1017 |
-
If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided.
|
1018 |
-
"""
|
1019 |
-
noise_aug_config = original_config.model.params.noise_aug_config
|
1020 |
-
noise_aug_class = noise_aug_config.target
|
1021 |
-
noise_aug_class = noise_aug_class.split(".")[-1]
|
1022 |
-
|
1023 |
-
if noise_aug_class == "CLIPEmbeddingNoiseAugmentation":
|
1024 |
-
noise_aug_config = noise_aug_config.params
|
1025 |
-
embedding_dim = noise_aug_config.timestep_dim
|
1026 |
-
max_noise_level = noise_aug_config.noise_schedule_config.timesteps
|
1027 |
-
beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule
|
1028 |
-
|
1029 |
-
image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim)
|
1030 |
-
image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule)
|
1031 |
-
|
1032 |
-
if "clip_stats_path" in noise_aug_config:
|
1033 |
-
if clip_stats_path is None:
|
1034 |
-
raise ValueError("This stable unclip config requires a `clip_stats_path`")
|
1035 |
-
|
1036 |
-
clip_mean, clip_std = torch.load(clip_stats_path, map_location=device)
|
1037 |
-
clip_mean = clip_mean[None, :]
|
1038 |
-
clip_std = clip_std[None, :]
|
1039 |
-
|
1040 |
-
clip_stats_state_dict = {
|
1041 |
-
"mean": clip_mean,
|
1042 |
-
"std": clip_std,
|
1043 |
-
}
|
1044 |
-
|
1045 |
-
image_normalizer.load_state_dict(clip_stats_state_dict)
|
1046 |
-
else:
|
1047 |
-
raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}")
|
1048 |
-
|
1049 |
-
return image_normalizer, image_noising_scheduler
|
1050 |
-
|
1051 |
-
|
1052 |
-
def convert_controlnet_checkpoint(
|
1053 |
-
checkpoint,
|
1054 |
-
original_config,
|
1055 |
-
checkpoint_path,
|
1056 |
-
image_size,
|
1057 |
-
upcast_attention,
|
1058 |
-
extract_ema,
|
1059 |
-
use_linear_projection=None,
|
1060 |
-
cross_attention_dim=None,
|
1061 |
-
):
|
1062 |
-
ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True)
|
1063 |
-
ctrlnet_config["upcast_attention"] = upcast_attention
|
1064 |
-
|
1065 |
-
ctrlnet_config.pop("sample_size")
|
1066 |
-
|
1067 |
-
if use_linear_projection is not None:
|
1068 |
-
ctrlnet_config["use_linear_projection"] = use_linear_projection
|
1069 |
-
|
1070 |
-
if cross_attention_dim is not None:
|
1071 |
-
ctrlnet_config["cross_attention_dim"] = cross_attention_dim
|
1072 |
-
|
1073 |
-
controlnet = ControlNetModel(**ctrlnet_config)
|
1074 |
-
|
1075 |
-
# Some controlnet ckpt files are distributed independently from the rest of the
|
1076 |
-
# model components i.e. https://huggingface.co/thibaud/controlnet-sd21/
|
1077 |
-
if "time_embed.0.weight" in checkpoint:
|
1078 |
-
skip_extract_state_dict = True
|
1079 |
-
else:
|
1080 |
-
skip_extract_state_dict = False
|
1081 |
-
|
1082 |
-
converted_ctrl_checkpoint = convert_ldm_unet_checkpoint(
|
1083 |
-
checkpoint,
|
1084 |
-
ctrlnet_config,
|
1085 |
-
path=checkpoint_path,
|
1086 |
-
extract_ema=extract_ema,
|
1087 |
-
controlnet=True,
|
1088 |
-
skip_extract_state_dict=skip_extract_state_dict,
|
1089 |
-
)
|
1090 |
-
|
1091 |
-
controlnet.load_state_dict(converted_ctrl_checkpoint)
|
1092 |
-
|
1093 |
-
return controlnet
|
1094 |
-
|
1095 |
-
|
1096 |
-
def download_from_original_stable_diffusion_ckpt(
|
1097 |
-
checkpoint_path: str,
|
1098 |
-
original_config_file: str = None,
|
1099 |
-
image_size: Optional[int] = None,
|
1100 |
-
prediction_type: str = None,
|
1101 |
-
model_type: str = None,
|
1102 |
-
extract_ema: bool = False,
|
1103 |
-
scheduler_type: str = "pndm",
|
1104 |
-
num_in_channels: Optional[int] = None,
|
1105 |
-
upcast_attention: Optional[bool] = None,
|
1106 |
-
device: str = None,
|
1107 |
-
from_safetensors: bool = False,
|
1108 |
-
stable_unclip: Optional[str] = None,
|
1109 |
-
stable_unclip_prior: Optional[str] = None,
|
1110 |
-
clip_stats_path: Optional[str] = None,
|
1111 |
-
controlnet: Optional[bool] = None,
|
1112 |
-
load_safety_checker: bool = True,
|
1113 |
-
pipeline_class: DiffusionPipeline = None,
|
1114 |
-
local_files_only=False,
|
1115 |
-
vae_path=None,
|
1116 |
-
vae=None,
|
1117 |
-
text_encoder=None,
|
1118 |
-
tokenizer=None,
|
1119 |
-
) -> DiffusionPipeline:
|
1120 |
-
"""
|
1121 |
-
Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
|
1122 |
-
config file.
|
1123 |
-
|
1124 |
-
Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the
|
1125 |
-
global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is
|
1126 |
-
recommended that you override the default values and/or supply an `original_config_file` wherever possible.
|
1127 |
-
|
1128 |
-
Args:
|
1129 |
-
checkpoint_path (`str`): Path to `.ckpt` file.
|
1130 |
-
original_config_file (`str`):
|
1131 |
-
Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically
|
1132 |
-
inferred by looking for a key that only exists in SD2.0 models.
|
1133 |
-
image_size (`int`, *optional*, defaults to 512):
|
1134 |
-
The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2
|
1135 |
-
Base. Use 768 for Stable Diffusion v2.
|
1136 |
-
prediction_type (`str`, *optional*):
|
1137 |
-
The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable
|
1138 |
-
Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2.
|
1139 |
-
num_in_channels (`int`, *optional*, defaults to None):
|
1140 |
-
The number of input channels. If `None`, it will be automatically inferred.
|
1141 |
-
scheduler_type (`str`, *optional*, defaults to 'pndm'):
|
1142 |
-
Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
|
1143 |
-
"ddim"]`.
|
1144 |
-
model_type (`str`, *optional*, defaults to `None`):
|
1145 |
-
The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder",
|
1146 |
-
"FrozenCLIPEmbedder", "PaintByExample"]`.
|
1147 |
-
is_img2img (`bool`, *optional*, defaults to `False`):
|
1148 |
-
Whether the model should be loaded as an img2img pipeline.
|
1149 |
-
extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for
|
1150 |
-
checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to
|
1151 |
-
`False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for
|
1152 |
-
inference. Non-EMA weights are usually better to continue fine-tuning.
|
1153 |
-
upcast_attention (`bool`, *optional*, defaults to `None`):
|
1154 |
-
Whether the attention computation should always be upcasted. This is necessary when running stable
|
1155 |
-
diffusion 2.1.
|
1156 |
-
device (`str`, *optional*, defaults to `None`):
|
1157 |
-
The device to use. Pass `None` to determine automatically.
|
1158 |
-
from_safetensors (`str`, *optional*, defaults to `False`):
|
1159 |
-
If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.
|
1160 |
-
load_safety_checker (`bool`, *optional*, defaults to `True`):
|
1161 |
-
Whether to load the safety checker or not. Defaults to `True`.
|
1162 |
-
pipeline_class (`str`, *optional*, defaults to `None`):
|
1163 |
-
The pipeline class to use. Pass `None` to determine automatically.
|
1164 |
-
local_files_only (`bool`, *optional*, defaults to `False`):
|
1165 |
-
Whether or not to only look at local files (i.e., do not try to download the model).
|
1166 |
-
vae (`AutoencoderKL`, *optional*, defaults to `None`):
|
1167 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
|
1168 |
-
this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
|
1169 |
-
text_encoder (`CLIPTextModel`, *optional*, defaults to `None`):
|
1170 |
-
An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel)
|
1171 |
-
to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)
|
1172 |
-
variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
|
1173 |
-
tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`):
|
1174 |
-
An instance of
|
1175 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer)
|
1176 |
-
to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if
|
1177 |
-
needed.
|
1178 |
-
return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file.
|
1179 |
-
"""
|
1180 |
-
|
1181 |
-
# import pipelines here to avoid circular import error when using from_single_file method
|
1182 |
-
from diffusers import (
|
1183 |
-
LDMTextToImagePipeline,
|
1184 |
-
PaintByExamplePipeline,
|
1185 |
-
StableDiffusionControlNetPipeline,
|
1186 |
-
StableDiffusionInpaintPipeline,
|
1187 |
-
StableDiffusionPipeline,
|
1188 |
-
StableDiffusionXLImg2ImgPipeline,
|
1189 |
-
StableDiffusionXLPipeline,
|
1190 |
-
StableUnCLIPImg2ImgPipeline,
|
1191 |
-
StableUnCLIPPipeline,
|
1192 |
-
)
|
1193 |
-
|
1194 |
-
if pipeline_class is None:
|
1195 |
-
pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline
|
1196 |
-
|
1197 |
-
if prediction_type == "v-prediction":
|
1198 |
-
prediction_type = "v_prediction"
|
1199 |
-
|
1200 |
-
if not is_omegaconf_available():
|
1201 |
-
raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
|
1202 |
-
|
1203 |
-
from omegaconf import OmegaConf
|
1204 |
-
|
1205 |
-
if from_safetensors:
|
1206 |
-
if not is_safetensors_available():
|
1207 |
-
raise ValueError(BACKENDS_MAPPING["safetensors"][1])
|
1208 |
-
|
1209 |
-
from safetensors.torch import load_file as safe_load
|
1210 |
-
|
1211 |
-
checkpoint = safe_load(checkpoint_path, device="cpu")
|
1212 |
-
else:
|
1213 |
-
if device is None:
|
1214 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
1215 |
-
checkpoint = torch.load(checkpoint_path, map_location=device)
|
1216 |
-
else:
|
1217 |
-
checkpoint = torch.load(checkpoint_path, map_location=device)
|
1218 |
-
|
1219 |
-
# Sometimes models don't have the global_step item
|
1220 |
-
if "global_step" in checkpoint:
|
1221 |
-
global_step = checkpoint["global_step"]
|
1222 |
-
else:
|
1223 |
-
logger.debug("global_step key not found in model")
|
1224 |
-
global_step = None
|
1225 |
-
|
1226 |
-
# NOTE: this while loop isn't great but this controlnet checkpoint has one additional
|
1227 |
-
# "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
|
1228 |
-
while "state_dict" in checkpoint:
|
1229 |
-
checkpoint = checkpoint["state_dict"]
|
1230 |
-
|
1231 |
-
if original_config_file is None:
|
1232 |
-
key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
1233 |
-
key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias"
|
1234 |
-
key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
|
1235 |
-
|
1236 |
-
# model_type = "v1"
|
1237 |
-
config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
|
1238 |
-
|
1239 |
-
if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024:
|
1240 |
-
# model_type = "v2"
|
1241 |
-
config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
|
1242 |
-
|
1243 |
-
if global_step == 110000:
|
1244 |
-
# v2.1 needs to upcast attention
|
1245 |
-
upcast_attention = True
|
1246 |
-
elif key_name_sd_xl_base in checkpoint:
|
1247 |
-
# only base xl has two text embedders
|
1248 |
-
config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
|
1249 |
-
elif key_name_sd_xl_refiner in checkpoint:
|
1250 |
-
# only refiner xl has embedder and one text embedders
|
1251 |
-
config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml"
|
1252 |
-
|
1253 |
-
original_config_file = BytesIO(requests.get(config_url).content)
|
1254 |
-
|
1255 |
-
original_config = OmegaConf.load(original_config_file)
|
1256 |
-
|
1257 |
-
# Convert the text model.
|
1258 |
-
if (
|
1259 |
-
model_type is None
|
1260 |
-
and "cond_stage_config" in original_config.model.params
|
1261 |
-
and original_config.model.params.cond_stage_config is not None
|
1262 |
-
):
|
1263 |
-
model_type = original_config.model.params.cond_stage_config.target.split(".")[-1]
|
1264 |
-
logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}")
|
1265 |
-
elif model_type is None and original_config.model.params.network_config is not None:
|
1266 |
-
if original_config.model.params.network_config.params.context_dim == 2048:
|
1267 |
-
model_type = "SDXL"
|
1268 |
-
else:
|
1269 |
-
model_type = "SDXL-Refiner"
|
1270 |
-
if image_size is None:
|
1271 |
-
image_size = 1024
|
1272 |
-
|
1273 |
-
if num_in_channels is None and pipeline_class == StableDiffusionInpaintPipeline:
|
1274 |
-
num_in_channels = 9
|
1275 |
-
elif num_in_channels is None:
|
1276 |
-
num_in_channels = 4
|
1277 |
-
|
1278 |
-
if "unet_config" in original_config.model.params:
|
1279 |
-
original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
|
1280 |
-
|
1281 |
-
if (
|
1282 |
-
"parameterization" in original_config["model"]["params"]
|
1283 |
-
and original_config["model"]["params"]["parameterization"] == "v"
|
1284 |
-
):
|
1285 |
-
if prediction_type is None:
|
1286 |
-
# NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"`
|
1287 |
-
# as it relies on a brittle global step parameter here
|
1288 |
-
prediction_type = "epsilon" if global_step == 875000 else "v_prediction"
|
1289 |
-
if image_size is None:
|
1290 |
-
# NOTE: For stable diffusion 2 base one has to pass `image_size==512`
|
1291 |
-
# as it relies on a brittle global step parameter here
|
1292 |
-
image_size = 512 if global_step == 875000 else 768
|
1293 |
-
else:
|
1294 |
-
if prediction_type is None:
|
1295 |
-
prediction_type = "epsilon"
|
1296 |
-
if image_size is None:
|
1297 |
-
image_size = 512
|
1298 |
-
|
1299 |
-
if controlnet is None and "control_stage_config" in original_config.model.params:
|
1300 |
-
controlnet = convert_controlnet_checkpoint(
|
1301 |
-
checkpoint, original_config, checkpoint_path, image_size, upcast_attention, extract_ema
|
1302 |
-
)
|
1303 |
-
|
1304 |
-
num_train_timesteps = getattr(original_config.model.params, "timesteps", None) or 1000
|
1305 |
-
|
1306 |
-
if model_type in ["SDXL", "SDXL-Refiner"]:
|
1307 |
-
scheduler_dict = {
|
1308 |
-
"beta_schedule": "scaled_linear",
|
1309 |
-
"beta_start": 0.00085,
|
1310 |
-
"beta_end": 0.012,
|
1311 |
-
"interpolation_type": "linear",
|
1312 |
-
"num_train_timesteps": num_train_timesteps,
|
1313 |
-
"prediction_type": "epsilon",
|
1314 |
-
"sample_max_value": 1.0,
|
1315 |
-
"set_alpha_to_one": False,
|
1316 |
-
"skip_prk_steps": True,
|
1317 |
-
"steps_offset": 1,
|
1318 |
-
"timestep_spacing": "leading",
|
1319 |
-
}
|
1320 |
-
scheduler = EulerDiscreteScheduler.from_config(scheduler_dict)
|
1321 |
-
scheduler_type = "euler"
|
1322 |
-
else:
|
1323 |
-
beta_start = getattr(original_config.model.params, "linear_start", None) or 0.02
|
1324 |
-
beta_end = getattr(original_config.model.params, "linear_end", None) or 0.085
|
1325 |
-
scheduler = DDIMScheduler(
|
1326 |
-
beta_end=beta_end,
|
1327 |
-
beta_schedule="scaled_linear",
|
1328 |
-
beta_start=beta_start,
|
1329 |
-
num_train_timesteps=num_train_timesteps,
|
1330 |
-
steps_offset=1,
|
1331 |
-
clip_sample=False,
|
1332 |
-
set_alpha_to_one=False,
|
1333 |
-
prediction_type=prediction_type,
|
1334 |
-
)
|
1335 |
-
# make sure scheduler works correctly with DDIM
|
1336 |
-
scheduler.register_to_config(clip_sample=False)
|
1337 |
-
|
1338 |
-
if scheduler_type == "pndm":
|
1339 |
-
config = dict(scheduler.config)
|
1340 |
-
config["skip_prk_steps"] = True
|
1341 |
-
scheduler = PNDMScheduler.from_config(config)
|
1342 |
-
elif scheduler_type == "lms":
|
1343 |
-
scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
|
1344 |
-
elif scheduler_type == "heun":
|
1345 |
-
scheduler = HeunDiscreteScheduler.from_config(scheduler.config)
|
1346 |
-
elif scheduler_type == "euler":
|
1347 |
-
scheduler = EulerDiscreteScheduler.from_config(scheduler.config)
|
1348 |
-
elif scheduler_type == "euler-ancestral":
|
1349 |
-
scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
|
1350 |
-
elif scheduler_type == "dpm":
|
1351 |
-
scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
|
1352 |
-
elif scheduler_type == "ddim":
|
1353 |
-
scheduler = scheduler
|
1354 |
-
else:
|
1355 |
-
raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!")
|
1356 |
-
|
1357 |
-
# Convert the UNet2DConditionModel model.
|
1358 |
-
unet_config = create_unet_diffusers_config(original_config, image_size=image_size)
|
1359 |
-
unet_config["upcast_attention"] = upcast_attention
|
1360 |
-
converted_unet_checkpoint = convert_ldm_unet_checkpoint(
|
1361 |
-
checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema
|
1362 |
-
)
|
1363 |
-
|
1364 |
-
ctx = init_empty_weights if is_accelerate_available() else nullcontext
|
1365 |
-
with ctx():
|
1366 |
-
unet = UNet2DConditionModel(**unet_config)
|
1367 |
-
|
1368 |
-
if is_accelerate_available():
|
1369 |
-
for param_name, param in converted_unet_checkpoint.items():
|
1370 |
-
set_module_tensor_to_device(unet, param_name, "cpu", value=param)
|
1371 |
-
else:
|
1372 |
-
unet.load_state_dict(converted_unet_checkpoint)
|
1373 |
-
|
1374 |
-
# Convert the VAE model.
|
1375 |
-
if vae_path is None and vae is None:
|
1376 |
-
vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
|
1377 |
-
converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
|
1378 |
-
|
1379 |
-
if (
|
1380 |
-
"model" in original_config
|
1381 |
-
and "params" in original_config.model
|
1382 |
-
and "scale_factor" in original_config.model.params
|
1383 |
-
):
|
1384 |
-
vae_scaling_factor = original_config.model.params.scale_factor
|
1385 |
-
else:
|
1386 |
-
vae_scaling_factor = 0.18215 # default SD scaling factor
|
1387 |
-
|
1388 |
-
vae_config["scaling_factor"] = vae_scaling_factor
|
1389 |
-
|
1390 |
-
ctx = init_empty_weights if is_accelerate_available() else nullcontext
|
1391 |
-
with ctx():
|
1392 |
-
vae = AutoencoderKL(**vae_config)
|
1393 |
-
|
1394 |
-
if is_accelerate_available():
|
1395 |
-
for param_name, param in converted_vae_checkpoint.items():
|
1396 |
-
set_module_tensor_to_device(vae, param_name, "cpu", value=param)
|
1397 |
-
else:
|
1398 |
-
vae.load_state_dict(converted_vae_checkpoint)
|
1399 |
-
elif vae is None:
|
1400 |
-
vae = AutoencoderKL.from_pretrained(vae_path)
|
1401 |
-
|
1402 |
-
if model_type == "FrozenOpenCLIPEmbedder":
|
1403 |
-
config_name = "stabilityai/stable-diffusion-2"
|
1404 |
-
config_kwargs = {"subfolder": "text_encoder"}
|
1405 |
-
|
1406 |
-
text_model = convert_open_clip_checkpoint(checkpoint, config_name, **config_kwargs)
|
1407 |
-
tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2", subfolder="tokenizer")
|
1408 |
-
|
1409 |
-
if stable_unclip is None:
|
1410 |
-
if controlnet:
|
1411 |
-
pipe = pipeline_class(
|
1412 |
-
vae=vae,
|
1413 |
-
text_encoder=text_model,
|
1414 |
-
tokenizer=tokenizer,
|
1415 |
-
unet=unet,
|
1416 |
-
scheduler=scheduler,
|
1417 |
-
controlnet=controlnet,
|
1418 |
-
safety_checker=None,
|
1419 |
-
feature_extractor=None,
|
1420 |
-
requires_safety_checker=False,
|
1421 |
-
)
|
1422 |
-
else:
|
1423 |
-
pipe = pipeline_class(
|
1424 |
-
vae=vae,
|
1425 |
-
text_encoder=text_model,
|
1426 |
-
tokenizer=tokenizer,
|
1427 |
-
unet=unet,
|
1428 |
-
scheduler=scheduler,
|
1429 |
-
safety_checker=None,
|
1430 |
-
feature_extractor=None,
|
1431 |
-
requires_safety_checker=False,
|
1432 |
-
)
|
1433 |
-
else:
|
1434 |
-
image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components(
|
1435 |
-
original_config, clip_stats_path=clip_stats_path, device=device
|
1436 |
-
)
|
1437 |
-
|
1438 |
-
if stable_unclip == "img2img":
|
1439 |
-
feature_extractor, image_encoder = stable_unclip_image_encoder(original_config)
|
1440 |
-
|
1441 |
-
pipe = StableUnCLIPImg2ImgPipeline(
|
1442 |
-
# image encoding components
|
1443 |
-
feature_extractor=feature_extractor,
|
1444 |
-
image_encoder=image_encoder,
|
1445 |
-
# image noising components
|
1446 |
-
image_normalizer=image_normalizer,
|
1447 |
-
image_noising_scheduler=image_noising_scheduler,
|
1448 |
-
# regular denoising components
|
1449 |
-
tokenizer=tokenizer,
|
1450 |
-
text_encoder=text_model,
|
1451 |
-
unet=unet,
|
1452 |
-
scheduler=scheduler,
|
1453 |
-
# vae
|
1454 |
-
vae=vae,
|
1455 |
-
)
|
1456 |
-
elif stable_unclip == "txt2img":
|
1457 |
-
if stable_unclip_prior is None or stable_unclip_prior == "karlo":
|
1458 |
-
karlo_model = "kakaobrain/karlo-v1-alpha"
|
1459 |
-
prior = PriorTransformer.from_pretrained(karlo_model, subfolder="prior")
|
1460 |
-
|
1461 |
-
prior_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
1462 |
-
prior_text_model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
|
1463 |
-
|
1464 |
-
prior_scheduler = UnCLIPScheduler.from_pretrained(karlo_model, subfolder="prior_scheduler")
|
1465 |
-
prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config)
|
1466 |
-
else:
|
1467 |
-
raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}")
|
1468 |
-
|
1469 |
-
pipe = StableUnCLIPPipeline(
|
1470 |
-
# prior components
|
1471 |
-
prior_tokenizer=prior_tokenizer,
|
1472 |
-
prior_text_encoder=prior_text_model,
|
1473 |
-
prior=prior,
|
1474 |
-
prior_scheduler=prior_scheduler,
|
1475 |
-
# image noising components
|
1476 |
-
image_normalizer=image_normalizer,
|
1477 |
-
image_noising_scheduler=image_noising_scheduler,
|
1478 |
-
# regular denoising components
|
1479 |
-
tokenizer=tokenizer,
|
1480 |
-
text_encoder=text_model,
|
1481 |
-
unet=unet,
|
1482 |
-
scheduler=scheduler,
|
1483 |
-
# vae
|
1484 |
-
vae=vae,
|
1485 |
-
)
|
1486 |
-
else:
|
1487 |
-
raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}")
|
1488 |
-
elif model_type == "PaintByExample":
|
1489 |
-
vision_model = convert_paint_by_example_checkpoint(checkpoint)
|
1490 |
-
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
1491 |
-
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
1492 |
-
pipe = PaintByExamplePipeline(
|
1493 |
-
vae=vae,
|
1494 |
-
image_encoder=vision_model,
|
1495 |
-
unet=unet,
|
1496 |
-
scheduler=scheduler,
|
1497 |
-
safety_checker=None,
|
1498 |
-
feature_extractor=feature_extractor,
|
1499 |
-
)
|
1500 |
-
elif model_type == "FrozenCLIPEmbedder":
|
1501 |
-
text_model = convert_ldm_clip_checkpoint(
|
1502 |
-
checkpoint, local_files_only=local_files_only, text_encoder=text_encoder
|
1503 |
-
)
|
1504 |
-
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") if tokenizer is None else tokenizer
|
1505 |
-
|
1506 |
-
if load_safety_checker:
|
1507 |
-
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
1508 |
-
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
1509 |
-
else:
|
1510 |
-
safety_checker = None
|
1511 |
-
feature_extractor = None
|
1512 |
-
|
1513 |
-
if controlnet:
|
1514 |
-
pipe = pipeline_class(
|
1515 |
-
vae=vae,
|
1516 |
-
text_encoder=text_model,
|
1517 |
-
tokenizer=tokenizer,
|
1518 |
-
unet=unet,
|
1519 |
-
controlnet=controlnet,
|
1520 |
-
scheduler=scheduler,
|
1521 |
-
safety_checker=safety_checker,
|
1522 |
-
feature_extractor=feature_extractor,
|
1523 |
-
)
|
1524 |
-
else:
|
1525 |
-
pipe = pipeline_class(
|
1526 |
-
vae=vae,
|
1527 |
-
text_encoder=text_model,
|
1528 |
-
tokenizer=tokenizer,
|
1529 |
-
unet=unet,
|
1530 |
-
scheduler=scheduler,
|
1531 |
-
safety_checker=safety_checker,
|
1532 |
-
feature_extractor=feature_extractor,
|
1533 |
-
)
|
1534 |
-
elif model_type in ["SDXL", "SDXL-Refiner"]:
|
1535 |
-
if model_type == "SDXL":
|
1536 |
-
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
1537 |
-
text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only)
|
1538 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!")
|
1539 |
-
|
1540 |
-
config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
|
1541 |
-
config_kwargs = {"projection_dim": 1280}
|
1542 |
-
text_encoder_2 = convert_open_clip_checkpoint(
|
1543 |
-
checkpoint, config_name, prefix="conditioner.embedders.1.model.", has_projection=True, **config_kwargs
|
1544 |
-
)
|
1545 |
-
|
1546 |
-
pipe = StableDiffusionXLPipeline(
|
1547 |
-
vae=vae,
|
1548 |
-
text_encoder=text_encoder,
|
1549 |
-
tokenizer=tokenizer,
|
1550 |
-
text_encoder_2=text_encoder_2,
|
1551 |
-
tokenizer_2=tokenizer_2,
|
1552 |
-
unet=unet,
|
1553 |
-
scheduler=scheduler,
|
1554 |
-
force_zeros_for_empty_prompt=True,
|
1555 |
-
)
|
1556 |
-
else:
|
1557 |
-
tokenizer = None
|
1558 |
-
text_encoder = None
|
1559 |
-
tokenizer_2 = CLIPTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!")
|
1560 |
-
|
1561 |
-
config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
|
1562 |
-
config_kwargs = {"projection_dim": 1280}
|
1563 |
-
text_encoder_2 = convert_open_clip_checkpoint(
|
1564 |
-
checkpoint, config_name, prefix="conditioner.embedders.0.model.", has_projection=True, **config_kwargs
|
1565 |
-
)
|
1566 |
-
|
1567 |
-
pipe = StableDiffusionXLImg2ImgPipeline(
|
1568 |
-
vae=vae,
|
1569 |
-
text_encoder=text_encoder,
|
1570 |
-
tokenizer=tokenizer,
|
1571 |
-
text_encoder_2=text_encoder_2,
|
1572 |
-
tokenizer_2=tokenizer_2,
|
1573 |
-
unet=unet,
|
1574 |
-
scheduler=scheduler,
|
1575 |
-
requires_aesthetics_score=True,
|
1576 |
-
force_zeros_for_empty_prompt=False,
|
1577 |
-
)
|
1578 |
-
else:
|
1579 |
-
text_config = create_ldm_bert_config(original_config)
|
1580 |
-
text_model = convert_ldm_bert_checkpoint(checkpoint, text_config)
|
1581 |
-
tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
|
1582 |
-
pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
|
1583 |
-
|
1584 |
-
return pipe
|
1585 |
-
|
1586 |
-
|
1587 |
-
def download_controlnet_from_original_ckpt(
|
1588 |
-
checkpoint_path: str,
|
1589 |
-
original_config_file: str,
|
1590 |
-
image_size: int = 512,
|
1591 |
-
extract_ema: bool = False,
|
1592 |
-
num_in_channels: Optional[int] = None,
|
1593 |
-
upcast_attention: Optional[bool] = None,
|
1594 |
-
device: str = None,
|
1595 |
-
from_safetensors: bool = False,
|
1596 |
-
use_linear_projection: Optional[bool] = None,
|
1597 |
-
cross_attention_dim: Optional[bool] = None,
|
1598 |
-
) -> DiffusionPipeline:
|
1599 |
-
if not is_omegaconf_available():
|
1600 |
-
raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
|
1601 |
-
|
1602 |
-
from omegaconf import OmegaConf
|
1603 |
-
|
1604 |
-
if from_safetensors:
|
1605 |
-
if not is_safetensors_available():
|
1606 |
-
raise ValueError(BACKENDS_MAPPING["safetensors"][1])
|
1607 |
-
|
1608 |
-
from safetensors import safe_open
|
1609 |
-
|
1610 |
-
checkpoint = {}
|
1611 |
-
with safe_open(checkpoint_path, framework="pt", device="cpu") as f:
|
1612 |
-
for key in f.keys():
|
1613 |
-
checkpoint[key] = f.get_tensor(key)
|
1614 |
-
else:
|
1615 |
-
if device is None:
|
1616 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
1617 |
-
checkpoint = torch.load(checkpoint_path, map_location=device)
|
1618 |
-
else:
|
1619 |
-
checkpoint = torch.load(checkpoint_path, map_location=device)
|
1620 |
-
|
1621 |
-
# NOTE: this while loop isn't great but this controlnet checkpoint has one additional
|
1622 |
-
# "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
|
1623 |
-
while "state_dict" in checkpoint:
|
1624 |
-
checkpoint = checkpoint["state_dict"]
|
1625 |
-
|
1626 |
-
original_config = OmegaConf.load(original_config_file)
|
1627 |
-
|
1628 |
-
if num_in_channels is not None:
|
1629 |
-
original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
|
1630 |
-
|
1631 |
-
if "control_stage_config" not in original_config.model.params:
|
1632 |
-
raise ValueError("`control_stage_config` not present in original config")
|
1633 |
-
|
1634 |
-
controlnet = convert_controlnet_checkpoint(
|
1635 |
-
checkpoint,
|
1636 |
-
original_config,
|
1637 |
-
checkpoint_path,
|
1638 |
-
image_size,
|
1639 |
-
upcast_attention,
|
1640 |
-
extract_ema,
|
1641 |
-
use_linear_projection=use_linear_projection,
|
1642 |
-
cross_attention_dim=cross_attention_dim,
|
1643 |
-
)
|
1644 |
-
|
1645 |
-
return controlnet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py
DELETED
@@ -1,317 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import random
|
18 |
-
import unittest
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import torch
|
22 |
-
from transformers import XLMRobertaTokenizerFast
|
23 |
-
|
24 |
-
from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
|
25 |
-
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
|
26 |
-
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
|
27 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
|
28 |
-
|
29 |
-
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
|
30 |
-
|
31 |
-
|
32 |
-
enable_full_determinism()
|
33 |
-
|
34 |
-
|
35 |
-
class Dummies:
|
36 |
-
@property
|
37 |
-
def text_embedder_hidden_size(self):
|
38 |
-
return 32
|
39 |
-
|
40 |
-
@property
|
41 |
-
def time_input_dim(self):
|
42 |
-
return 32
|
43 |
-
|
44 |
-
@property
|
45 |
-
def block_out_channels_0(self):
|
46 |
-
return self.time_input_dim
|
47 |
-
|
48 |
-
@property
|
49 |
-
def time_embed_dim(self):
|
50 |
-
return self.time_input_dim * 4
|
51 |
-
|
52 |
-
@property
|
53 |
-
def cross_attention_dim(self):
|
54 |
-
return 32
|
55 |
-
|
56 |
-
@property
|
57 |
-
def dummy_tokenizer(self):
|
58 |
-
tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
|
59 |
-
return tokenizer
|
60 |
-
|
61 |
-
@property
|
62 |
-
def dummy_text_encoder(self):
|
63 |
-
torch.manual_seed(0)
|
64 |
-
config = MCLIPConfig(
|
65 |
-
numDims=self.cross_attention_dim,
|
66 |
-
transformerDimensions=self.text_embedder_hidden_size,
|
67 |
-
hidden_size=self.text_embedder_hidden_size,
|
68 |
-
intermediate_size=37,
|
69 |
-
num_attention_heads=4,
|
70 |
-
num_hidden_layers=5,
|
71 |
-
vocab_size=1005,
|
72 |
-
)
|
73 |
-
|
74 |
-
text_encoder = MultilingualCLIP(config)
|
75 |
-
text_encoder = text_encoder.eval()
|
76 |
-
|
77 |
-
return text_encoder
|
78 |
-
|
79 |
-
@property
|
80 |
-
def dummy_unet(self):
|
81 |
-
torch.manual_seed(0)
|
82 |
-
|
83 |
-
model_kwargs = {
|
84 |
-
"in_channels": 4,
|
85 |
-
# Out channels is double in channels because predicts mean and variance
|
86 |
-
"out_channels": 8,
|
87 |
-
"addition_embed_type": "text_image",
|
88 |
-
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
|
89 |
-
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
|
90 |
-
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
|
91 |
-
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
|
92 |
-
"layers_per_block": 1,
|
93 |
-
"encoder_hid_dim": self.text_embedder_hidden_size,
|
94 |
-
"encoder_hid_dim_type": "text_image_proj",
|
95 |
-
"cross_attention_dim": self.cross_attention_dim,
|
96 |
-
"attention_head_dim": 4,
|
97 |
-
"resnet_time_scale_shift": "scale_shift",
|
98 |
-
"class_embed_type": None,
|
99 |
-
}
|
100 |
-
|
101 |
-
model = UNet2DConditionModel(**model_kwargs)
|
102 |
-
return model
|
103 |
-
|
104 |
-
@property
|
105 |
-
def dummy_movq_kwargs(self):
|
106 |
-
return {
|
107 |
-
"block_out_channels": [32, 64],
|
108 |
-
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
|
109 |
-
"in_channels": 3,
|
110 |
-
"latent_channels": 4,
|
111 |
-
"layers_per_block": 1,
|
112 |
-
"norm_num_groups": 8,
|
113 |
-
"norm_type": "spatial",
|
114 |
-
"num_vq_embeddings": 12,
|
115 |
-
"out_channels": 3,
|
116 |
-
"up_block_types": [
|
117 |
-
"AttnUpDecoderBlock2D",
|
118 |
-
"UpDecoderBlock2D",
|
119 |
-
],
|
120 |
-
"vq_embed_dim": 4,
|
121 |
-
}
|
122 |
-
|
123 |
-
@property
|
124 |
-
def dummy_movq(self):
|
125 |
-
torch.manual_seed(0)
|
126 |
-
model = VQModel(**self.dummy_movq_kwargs)
|
127 |
-
return model
|
128 |
-
|
129 |
-
def get_dummy_components(self):
|
130 |
-
text_encoder = self.dummy_text_encoder
|
131 |
-
tokenizer = self.dummy_tokenizer
|
132 |
-
unet = self.dummy_unet
|
133 |
-
movq = self.dummy_movq
|
134 |
-
|
135 |
-
scheduler = DDIMScheduler(
|
136 |
-
num_train_timesteps=1000,
|
137 |
-
beta_schedule="linear",
|
138 |
-
beta_start=0.00085,
|
139 |
-
beta_end=0.012,
|
140 |
-
clip_sample=False,
|
141 |
-
set_alpha_to_one=False,
|
142 |
-
steps_offset=1,
|
143 |
-
prediction_type="epsilon",
|
144 |
-
thresholding=False,
|
145 |
-
)
|
146 |
-
|
147 |
-
components = {
|
148 |
-
"text_encoder": text_encoder,
|
149 |
-
"tokenizer": tokenizer,
|
150 |
-
"unet": unet,
|
151 |
-
"scheduler": scheduler,
|
152 |
-
"movq": movq,
|
153 |
-
}
|
154 |
-
return components
|
155 |
-
|
156 |
-
def get_dummy_inputs(self, device, seed=0):
|
157 |
-
image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device)
|
158 |
-
negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device)
|
159 |
-
if str(device).startswith("mps"):
|
160 |
-
generator = torch.manual_seed(seed)
|
161 |
-
else:
|
162 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
163 |
-
inputs = {
|
164 |
-
"prompt": "horse",
|
165 |
-
"image_embeds": image_embeds,
|
166 |
-
"negative_image_embeds": negative_image_embeds,
|
167 |
-
"generator": generator,
|
168 |
-
"height": 64,
|
169 |
-
"width": 64,
|
170 |
-
"guidance_scale": 4.0,
|
171 |
-
"num_inference_steps": 2,
|
172 |
-
"output_type": "np",
|
173 |
-
}
|
174 |
-
return inputs
|
175 |
-
|
176 |
-
|
177 |
-
class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
178 |
-
pipeline_class = KandinskyPipeline
|
179 |
-
params = [
|
180 |
-
"prompt",
|
181 |
-
"image_embeds",
|
182 |
-
"negative_image_embeds",
|
183 |
-
]
|
184 |
-
batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"]
|
185 |
-
required_optional_params = [
|
186 |
-
"generator",
|
187 |
-
"height",
|
188 |
-
"width",
|
189 |
-
"latents",
|
190 |
-
"guidance_scale",
|
191 |
-
"negative_prompt",
|
192 |
-
"num_inference_steps",
|
193 |
-
"return_dict",
|
194 |
-
"guidance_scale",
|
195 |
-
"num_images_per_prompt",
|
196 |
-
"output_type",
|
197 |
-
"return_dict",
|
198 |
-
]
|
199 |
-
test_xformers_attention = False
|
200 |
-
|
201 |
-
def get_dummy_components(self):
|
202 |
-
dummy = Dummies()
|
203 |
-
return dummy.get_dummy_components()
|
204 |
-
|
205 |
-
def get_dummy_inputs(self, device, seed=0):
|
206 |
-
dummy = Dummies()
|
207 |
-
return dummy.get_dummy_inputs(device=device, seed=seed)
|
208 |
-
|
209 |
-
def test_kandinsky(self):
|
210 |
-
device = "cpu"
|
211 |
-
|
212 |
-
components = self.get_dummy_components()
|
213 |
-
|
214 |
-
pipe = self.pipeline_class(**components)
|
215 |
-
pipe = pipe.to(device)
|
216 |
-
|
217 |
-
pipe.set_progress_bar_config(disable=None)
|
218 |
-
|
219 |
-
output = pipe(**self.get_dummy_inputs(device))
|
220 |
-
image = output.images
|
221 |
-
|
222 |
-
image_from_tuple = pipe(
|
223 |
-
**self.get_dummy_inputs(device),
|
224 |
-
return_dict=False,
|
225 |
-
)[0]
|
226 |
-
|
227 |
-
image_slice = image[0, -3:, -3:, -1]
|
228 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
229 |
-
|
230 |
-
assert image.shape == (1, 64, 64, 3)
|
231 |
-
|
232 |
-
expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024])
|
233 |
-
|
234 |
-
assert (
|
235 |
-
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
236 |
-
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
|
237 |
-
assert (
|
238 |
-
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
239 |
-
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
|
240 |
-
|
241 |
-
@require_torch_gpu
|
242 |
-
def test_offloads(self):
|
243 |
-
pipes = []
|
244 |
-
components = self.get_dummy_components()
|
245 |
-
sd_pipe = self.pipeline_class(**components).to(torch_device)
|
246 |
-
pipes.append(sd_pipe)
|
247 |
-
|
248 |
-
components = self.get_dummy_components()
|
249 |
-
sd_pipe = self.pipeline_class(**components)
|
250 |
-
sd_pipe.enable_model_cpu_offload()
|
251 |
-
pipes.append(sd_pipe)
|
252 |
-
|
253 |
-
components = self.get_dummy_components()
|
254 |
-
sd_pipe = self.pipeline_class(**components)
|
255 |
-
sd_pipe.enable_sequential_cpu_offload()
|
256 |
-
pipes.append(sd_pipe)
|
257 |
-
|
258 |
-
image_slices = []
|
259 |
-
for pipe in pipes:
|
260 |
-
inputs = self.get_dummy_inputs(torch_device)
|
261 |
-
image = pipe(**inputs).images
|
262 |
-
|
263 |
-
image_slices.append(image[0, -3:, -3:, -1].flatten())
|
264 |
-
|
265 |
-
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
|
266 |
-
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
|
267 |
-
|
268 |
-
|
269 |
-
@slow
|
270 |
-
@require_torch_gpu
|
271 |
-
class KandinskyPipelineIntegrationTests(unittest.TestCase):
|
272 |
-
def tearDown(self):
|
273 |
-
# clean up the VRAM after each test
|
274 |
-
super().tearDown()
|
275 |
-
gc.collect()
|
276 |
-
torch.cuda.empty_cache()
|
277 |
-
|
278 |
-
def test_kandinsky_text2img(self):
|
279 |
-
expected_image = load_numpy(
|
280 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
281 |
-
"/kandinsky/kandinsky_text2img_cat_fp16.npy"
|
282 |
-
)
|
283 |
-
|
284 |
-
pipe_prior = KandinskyPriorPipeline.from_pretrained(
|
285 |
-
"kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
|
286 |
-
)
|
287 |
-
pipe_prior.to(torch_device)
|
288 |
-
|
289 |
-
pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
|
290 |
-
pipeline = pipeline.to(torch_device)
|
291 |
-
pipeline.set_progress_bar_config(disable=None)
|
292 |
-
|
293 |
-
prompt = "red cat, 4k photo"
|
294 |
-
|
295 |
-
generator = torch.Generator(device="cuda").manual_seed(0)
|
296 |
-
image_emb, zero_image_emb = pipe_prior(
|
297 |
-
prompt,
|
298 |
-
generator=generator,
|
299 |
-
num_inference_steps=5,
|
300 |
-
negative_prompt="",
|
301 |
-
).to_tuple()
|
302 |
-
|
303 |
-
generator = torch.Generator(device="cuda").manual_seed(0)
|
304 |
-
output = pipeline(
|
305 |
-
prompt,
|
306 |
-
image_embeds=image_emb,
|
307 |
-
negative_image_embeds=zero_image_emb,
|
308 |
-
generator=generator,
|
309 |
-
num_inference_steps=100,
|
310 |
-
output_type="np",
|
311 |
-
)
|
312 |
-
|
313 |
-
image = output.images[0]
|
314 |
-
|
315 |
-
assert image.shape == (512, 512, 3)
|
316 |
-
|
317 |
-
assert_mean_pixel_difference(image, expected_image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
bbox_head=dict(
|
4 |
-
loss_cls=dict(
|
5 |
-
_delete_=True,
|
6 |
-
type='GHMC',
|
7 |
-
bins=30,
|
8 |
-
momentum=0.75,
|
9 |
-
use_sigmoid=True,
|
10 |
-
loss_weight=1.0),
|
11 |
-
loss_bbox=dict(
|
12 |
-
_delete_=True,
|
13 |
-
type='GHMR',
|
14 |
-
mu=0.02,
|
15 |
-
bins=10,
|
16 |
-
momentum=0.7,
|
17 |
-
loss_weight=10.0)))
|
18 |
-
optimizer_config = dict(
|
19 |
-
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
_base_ = ['grid_rcnn_r50_fpn_gn-head_2x_coco.py']
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(
|
4 |
-
policy='step',
|
5 |
-
warmup='linear',
|
6 |
-
warmup_iters=500,
|
7 |
-
warmup_ratio=0.001,
|
8 |
-
step=[8, 11])
|
9 |
-
checkpoint_config = dict(interval=1)
|
10 |
-
# runtime settings
|
11 |
-
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py
DELETED
@@ -1,794 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
|
5 |
-
from mmcv.ops import DeformConv2d
|
6 |
-
from mmcv.runner import force_fp32
|
7 |
-
|
8 |
-
from mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator,
|
9 |
-
build_assigner, build_sampler, distance2bbox,
|
10 |
-
multi_apply, multiclass_nms, reduce_mean)
|
11 |
-
from ..builder import HEADS, build_loss
|
12 |
-
from .atss_head import ATSSHead
|
13 |
-
from .fcos_head import FCOSHead
|
14 |
-
|
15 |
-
INF = 1e8
|
16 |
-
|
17 |
-
|
18 |
-
@HEADS.register_module()
|
19 |
-
class VFNetHead(ATSSHead, FCOSHead):
|
20 |
-
"""Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
|
21 |
-
Detector.<https://arxiv.org/abs/2008.13367>`_.
|
22 |
-
|
23 |
-
The VFNet predicts IoU-aware classification scores which mix the
|
24 |
-
object presence confidence and object localization accuracy as the
|
25 |
-
detection score. It is built on the FCOS architecture and uses ATSS
|
26 |
-
for defining positive/negative training examples. The VFNet is trained
|
27 |
-
with Varifocal Loss and empolys star-shaped deformable convolution to
|
28 |
-
extract features for a bbox.
|
29 |
-
|
30 |
-
Args:
|
31 |
-
num_classes (int): Number of categories excluding the background
|
32 |
-
category.
|
33 |
-
in_channels (int): Number of channels in the input feature map.
|
34 |
-
regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
|
35 |
-
level points.
|
36 |
-
center_sampling (bool): If true, use center sampling. Default: False.
|
37 |
-
center_sample_radius (float): Radius of center sampling. Default: 1.5.
|
38 |
-
sync_num_pos (bool): If true, synchronize the number of positive
|
39 |
-
examples across GPUs. Default: True
|
40 |
-
gradient_mul (float): The multiplier to gradients from bbox refinement
|
41 |
-
and recognition. Default: 0.1.
|
42 |
-
bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
|
43 |
-
'stride'. Default: reg_denom
|
44 |
-
loss_cls_fl (dict): Config of focal loss.
|
45 |
-
use_vfl (bool): If true, use varifocal loss for training.
|
46 |
-
Default: True.
|
47 |
-
loss_cls (dict): Config of varifocal loss.
|
48 |
-
loss_bbox (dict): Config of localization loss, GIoU Loss.
|
49 |
-
loss_bbox (dict): Config of localization refinement loss, GIoU Loss.
|
50 |
-
norm_cfg (dict): dictionary to construct and config norm layer.
|
51 |
-
Default: norm_cfg=dict(type='GN', num_groups=32,
|
52 |
-
requires_grad=True).
|
53 |
-
use_atss (bool): If true, use ATSS to define positive/negative
|
54 |
-
examples. Default: True.
|
55 |
-
anchor_generator (dict): Config of anchor generator for ATSS.
|
56 |
-
|
57 |
-
Example:
|
58 |
-
>>> self = VFNetHead(11, 7)
|
59 |
-
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
|
60 |
-
>>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
|
61 |
-
>>> assert len(cls_score) == len(self.scales)
|
62 |
-
""" # noqa: E501
|
63 |
-
|
64 |
-
def __init__(self,
|
65 |
-
num_classes,
|
66 |
-
in_channels,
|
67 |
-
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
|
68 |
-
(512, INF)),
|
69 |
-
center_sampling=False,
|
70 |
-
center_sample_radius=1.5,
|
71 |
-
sync_num_pos=True,
|
72 |
-
gradient_mul=0.1,
|
73 |
-
bbox_norm_type='reg_denom',
|
74 |
-
loss_cls_fl=dict(
|
75 |
-
type='FocalLoss',
|
76 |
-
use_sigmoid=True,
|
77 |
-
gamma=2.0,
|
78 |
-
alpha=0.25,
|
79 |
-
loss_weight=1.0),
|
80 |
-
use_vfl=True,
|
81 |
-
loss_cls=dict(
|
82 |
-
type='VarifocalLoss',
|
83 |
-
use_sigmoid=True,
|
84 |
-
alpha=0.75,
|
85 |
-
gamma=2.0,
|
86 |
-
iou_weighted=True,
|
87 |
-
loss_weight=1.0),
|
88 |
-
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
|
89 |
-
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),
|
90 |
-
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
|
91 |
-
use_atss=True,
|
92 |
-
anchor_generator=dict(
|
93 |
-
type='AnchorGenerator',
|
94 |
-
ratios=[1.0],
|
95 |
-
octave_base_scale=8,
|
96 |
-
scales_per_octave=1,
|
97 |
-
center_offset=0.0,
|
98 |
-
strides=[8, 16, 32, 64, 128]),
|
99 |
-
**kwargs):
|
100 |
-
# dcn base offsets, adapted from reppoints_head.py
|
101 |
-
self.num_dconv_points = 9
|
102 |
-
self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
|
103 |
-
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
|
104 |
-
dcn_base = np.arange(-self.dcn_pad,
|
105 |
-
self.dcn_pad + 1).astype(np.float64)
|
106 |
-
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
|
107 |
-
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
|
108 |
-
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
|
109 |
-
(-1))
|
110 |
-
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
|
111 |
-
|
112 |
-
super(FCOSHead, self).__init__(
|
113 |
-
num_classes, in_channels, norm_cfg=norm_cfg, **kwargs)
|
114 |
-
self.regress_ranges = regress_ranges
|
115 |
-
self.reg_denoms = [
|
116 |
-
regress_range[-1] for regress_range in regress_ranges
|
117 |
-
]
|
118 |
-
self.reg_denoms[-1] = self.reg_denoms[-2] * 2
|
119 |
-
self.center_sampling = center_sampling
|
120 |
-
self.center_sample_radius = center_sample_radius
|
121 |
-
self.sync_num_pos = sync_num_pos
|
122 |
-
self.bbox_norm_type = bbox_norm_type
|
123 |
-
self.gradient_mul = gradient_mul
|
124 |
-
self.use_vfl = use_vfl
|
125 |
-
if self.use_vfl:
|
126 |
-
self.loss_cls = build_loss(loss_cls)
|
127 |
-
else:
|
128 |
-
self.loss_cls = build_loss(loss_cls_fl)
|
129 |
-
self.loss_bbox = build_loss(loss_bbox)
|
130 |
-
self.loss_bbox_refine = build_loss(loss_bbox_refine)
|
131 |
-
|
132 |
-
# for getting ATSS targets
|
133 |
-
self.use_atss = use_atss
|
134 |
-
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
|
135 |
-
self.anchor_generator = build_anchor_generator(anchor_generator)
|
136 |
-
self.anchor_center_offset = anchor_generator['center_offset']
|
137 |
-
self.num_anchors = self.anchor_generator.num_base_anchors[0]
|
138 |
-
self.sampling = False
|
139 |
-
if self.train_cfg:
|
140 |
-
self.assigner = build_assigner(self.train_cfg.assigner)
|
141 |
-
sampler_cfg = dict(type='PseudoSampler')
|
142 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
143 |
-
|
144 |
-
def _init_layers(self):
|
145 |
-
"""Initialize layers of the head."""
|
146 |
-
super(FCOSHead, self)._init_cls_convs()
|
147 |
-
super(FCOSHead, self)._init_reg_convs()
|
148 |
-
self.relu = nn.ReLU(inplace=True)
|
149 |
-
self.vfnet_reg_conv = ConvModule(
|
150 |
-
self.feat_channels,
|
151 |
-
self.feat_channels,
|
152 |
-
3,
|
153 |
-
stride=1,
|
154 |
-
padding=1,
|
155 |
-
conv_cfg=self.conv_cfg,
|
156 |
-
norm_cfg=self.norm_cfg,
|
157 |
-
bias=self.conv_bias)
|
158 |
-
self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
|
159 |
-
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
160 |
-
|
161 |
-
self.vfnet_reg_refine_dconv = DeformConv2d(
|
162 |
-
self.feat_channels,
|
163 |
-
self.feat_channels,
|
164 |
-
self.dcn_kernel,
|
165 |
-
1,
|
166 |
-
padding=self.dcn_pad)
|
167 |
-
self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
|
168 |
-
self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
169 |
-
|
170 |
-
self.vfnet_cls_dconv = DeformConv2d(
|
171 |
-
self.feat_channels,
|
172 |
-
self.feat_channels,
|
173 |
-
self.dcn_kernel,
|
174 |
-
1,
|
175 |
-
padding=self.dcn_pad)
|
176 |
-
self.vfnet_cls = nn.Conv2d(
|
177 |
-
self.feat_channels, self.cls_out_channels, 3, padding=1)
|
178 |
-
|
179 |
-
def init_weights(self):
|
180 |
-
"""Initialize weights of the head."""
|
181 |
-
for m in self.cls_convs:
|
182 |
-
if isinstance(m.conv, nn.Conv2d):
|
183 |
-
normal_init(m.conv, std=0.01)
|
184 |
-
for m in self.reg_convs:
|
185 |
-
if isinstance(m.conv, nn.Conv2d):
|
186 |
-
normal_init(m.conv, std=0.01)
|
187 |
-
normal_init(self.vfnet_reg_conv.conv, std=0.01)
|
188 |
-
normal_init(self.vfnet_reg, std=0.01)
|
189 |
-
normal_init(self.vfnet_reg_refine_dconv, std=0.01)
|
190 |
-
normal_init(self.vfnet_reg_refine, std=0.01)
|
191 |
-
normal_init(self.vfnet_cls_dconv, std=0.01)
|
192 |
-
bias_cls = bias_init_with_prob(0.01)
|
193 |
-
normal_init(self.vfnet_cls, std=0.01, bias=bias_cls)
|
194 |
-
|
195 |
-
def forward(self, feats):
|
196 |
-
"""Forward features from the upstream network.
|
197 |
-
|
198 |
-
Args:
|
199 |
-
feats (tuple[Tensor]): Features from the upstream network, each is
|
200 |
-
a 4D-tensor.
|
201 |
-
|
202 |
-
Returns:
|
203 |
-
tuple:
|
204 |
-
cls_scores (list[Tensor]): Box iou-aware scores for each scale
|
205 |
-
level, each is a 4D-tensor, the channel number is
|
206 |
-
num_points * num_classes.
|
207 |
-
bbox_preds (list[Tensor]): Box offsets for each
|
208 |
-
scale level, each is a 4D-tensor, the channel number is
|
209 |
-
num_points * 4.
|
210 |
-
bbox_preds_refine (list[Tensor]): Refined Box offsets for
|
211 |
-
each scale level, each is a 4D-tensor, the channel
|
212 |
-
number is num_points * 4.
|
213 |
-
"""
|
214 |
-
return multi_apply(self.forward_single, feats, self.scales,
|
215 |
-
self.scales_refine, self.strides, self.reg_denoms)
|
216 |
-
|
217 |
-
def forward_single(self, x, scale, scale_refine, stride, reg_denom):
|
218 |
-
"""Forward features of a single scale level.
|
219 |
-
|
220 |
-
Args:
|
221 |
-
x (Tensor): FPN feature maps of the specified stride.
|
222 |
-
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
|
223 |
-
the bbox prediction.
|
224 |
-
scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
|
225 |
-
resize the refined bbox prediction.
|
226 |
-
stride (int): The corresponding stride for feature maps,
|
227 |
-
used to normalize the bbox prediction when
|
228 |
-
bbox_norm_type = 'stride'.
|
229 |
-
reg_denom (int): The corresponding regression range for feature
|
230 |
-
maps, only used to normalize the bbox prediction when
|
231 |
-
bbox_norm_type = 'reg_denom'.
|
232 |
-
|
233 |
-
Returns:
|
234 |
-
tuple: iou-aware cls scores for each box, bbox predictions and
|
235 |
-
refined bbox predictions of input feature maps.
|
236 |
-
"""
|
237 |
-
cls_feat = x
|
238 |
-
reg_feat = x
|
239 |
-
|
240 |
-
for cls_layer in self.cls_convs:
|
241 |
-
cls_feat = cls_layer(cls_feat)
|
242 |
-
|
243 |
-
for reg_layer in self.reg_convs:
|
244 |
-
reg_feat = reg_layer(reg_feat)
|
245 |
-
|
246 |
-
# predict the bbox_pred of different level
|
247 |
-
reg_feat_init = self.vfnet_reg_conv(reg_feat)
|
248 |
-
if self.bbox_norm_type == 'reg_denom':
|
249 |
-
bbox_pred = scale(
|
250 |
-
self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
|
251 |
-
elif self.bbox_norm_type == 'stride':
|
252 |
-
bbox_pred = scale(
|
253 |
-
self.vfnet_reg(reg_feat_init)).float().exp() * stride
|
254 |
-
else:
|
255 |
-
raise NotImplementedError
|
256 |
-
|
257 |
-
# compute star deformable convolution offsets
|
258 |
-
# converting dcn_offset to reg_feat.dtype thus VFNet can be
|
259 |
-
# trained with FP16
|
260 |
-
dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
|
261 |
-
stride).to(reg_feat.dtype)
|
262 |
-
|
263 |
-
# refine the bbox_pred
|
264 |
-
reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
|
265 |
-
bbox_pred_refine = scale_refine(
|
266 |
-
self.vfnet_reg_refine(reg_feat)).float().exp()
|
267 |
-
bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
|
268 |
-
|
269 |
-
# predict the iou-aware cls score
|
270 |
-
cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
|
271 |
-
cls_score = self.vfnet_cls(cls_feat)
|
272 |
-
|
273 |
-
return cls_score, bbox_pred, bbox_pred_refine
|
274 |
-
|
275 |
-
def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
|
276 |
-
"""Compute the star deformable conv offsets.
|
277 |
-
|
278 |
-
Args:
|
279 |
-
bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
|
280 |
-
gradient_mul (float): Gradient multiplier.
|
281 |
-
stride (int): The corresponding stride for feature maps,
|
282 |
-
used to project the bbox onto the feature map.
|
283 |
-
|
284 |
-
Returns:
|
285 |
-
dcn_offsets (Tensor): The offsets for deformable convolution.
|
286 |
-
"""
|
287 |
-
dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
|
288 |
-
bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
|
289 |
-
gradient_mul * bbox_pred
|
290 |
-
# map to the feature map scale
|
291 |
-
bbox_pred_grad_mul = bbox_pred_grad_mul / stride
|
292 |
-
N, C, H, W = bbox_pred.size()
|
293 |
-
|
294 |
-
x1 = bbox_pred_grad_mul[:, 0, :, :]
|
295 |
-
y1 = bbox_pred_grad_mul[:, 1, :, :]
|
296 |
-
x2 = bbox_pred_grad_mul[:, 2, :, :]
|
297 |
-
y2 = bbox_pred_grad_mul[:, 3, :, :]
|
298 |
-
bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
|
299 |
-
N, 2 * self.num_dconv_points, H, W)
|
300 |
-
bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
|
301 |
-
bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
|
302 |
-
bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
|
303 |
-
bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
|
304 |
-
bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
|
305 |
-
bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
|
306 |
-
bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
|
307 |
-
bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
|
308 |
-
bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
|
309 |
-
bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
|
310 |
-
bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
|
311 |
-
bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
|
312 |
-
dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
|
313 |
-
|
314 |
-
return dcn_offset
|
315 |
-
|
316 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
|
317 |
-
def loss(self,
|
318 |
-
cls_scores,
|
319 |
-
bbox_preds,
|
320 |
-
bbox_preds_refine,
|
321 |
-
gt_bboxes,
|
322 |
-
gt_labels,
|
323 |
-
img_metas,
|
324 |
-
gt_bboxes_ignore=None):
|
325 |
-
"""Compute loss of the head.
|
326 |
-
|
327 |
-
Args:
|
328 |
-
cls_scores (list[Tensor]): Box iou-aware scores for each scale
|
329 |
-
level, each is a 4D-tensor, the channel number is
|
330 |
-
num_points * num_classes.
|
331 |
-
bbox_preds (list[Tensor]): Box offsets for each
|
332 |
-
scale level, each is a 4D-tensor, the channel number is
|
333 |
-
num_points * 4.
|
334 |
-
bbox_preds_refine (list[Tensor]): Refined Box offsets for
|
335 |
-
each scale level, each is a 4D-tensor, the channel
|
336 |
-
number is num_points * 4.
|
337 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
338 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
339 |
-
gt_labels (list[Tensor]): class indices corresponding to each box
|
340 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
341 |
-
image size, scaling factor, etc.
|
342 |
-
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
|
343 |
-
boxes can be ignored when computing the loss.
|
344 |
-
Default: None.
|
345 |
-
|
346 |
-
Returns:
|
347 |
-
dict[str, Tensor]: A dictionary of loss components.
|
348 |
-
"""
|
349 |
-
assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
|
350 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
351 |
-
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
|
352 |
-
bbox_preds[0].device)
|
353 |
-
labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
|
354 |
-
cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,
|
355 |
-
gt_bboxes_ignore)
|
356 |
-
|
357 |
-
num_imgs = cls_scores[0].size(0)
|
358 |
-
# flatten cls_scores, bbox_preds and bbox_preds_refine
|
359 |
-
flatten_cls_scores = [
|
360 |
-
cls_score.permute(0, 2, 3,
|
361 |
-
1).reshape(-1,
|
362 |
-
self.cls_out_channels).contiguous()
|
363 |
-
for cls_score in cls_scores
|
364 |
-
]
|
365 |
-
flatten_bbox_preds = [
|
366 |
-
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
|
367 |
-
for bbox_pred in bbox_preds
|
368 |
-
]
|
369 |
-
flatten_bbox_preds_refine = [
|
370 |
-
bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
|
371 |
-
for bbox_pred_refine in bbox_preds_refine
|
372 |
-
]
|
373 |
-
flatten_cls_scores = torch.cat(flatten_cls_scores)
|
374 |
-
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
|
375 |
-
flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
|
376 |
-
flatten_labels = torch.cat(labels)
|
377 |
-
flatten_bbox_targets = torch.cat(bbox_targets)
|
378 |
-
# repeat points to align with bbox_preds
|
379 |
-
flatten_points = torch.cat(
|
380 |
-
[points.repeat(num_imgs, 1) for points in all_level_points])
|
381 |
-
|
382 |
-
# FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
|
383 |
-
bg_class_ind = self.num_classes
|
384 |
-
pos_inds = torch.where(
|
385 |
-
((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
|
386 |
-
num_pos = len(pos_inds)
|
387 |
-
|
388 |
-
pos_bbox_preds = flatten_bbox_preds[pos_inds]
|
389 |
-
pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
|
390 |
-
pos_labels = flatten_labels[pos_inds]
|
391 |
-
|
392 |
-
# sync num_pos across all gpus
|
393 |
-
if self.sync_num_pos:
|
394 |
-
num_pos_avg_per_gpu = reduce_mean(
|
395 |
-
pos_inds.new_tensor(num_pos).float()).item()
|
396 |
-
num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
|
397 |
-
else:
|
398 |
-
num_pos_avg_per_gpu = num_pos
|
399 |
-
|
400 |
-
if num_pos > 0:
|
401 |
-
pos_bbox_targets = flatten_bbox_targets[pos_inds]
|
402 |
-
pos_points = flatten_points[pos_inds]
|
403 |
-
|
404 |
-
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
|
405 |
-
pos_decoded_target_preds = distance2bbox(pos_points,
|
406 |
-
pos_bbox_targets)
|
407 |
-
iou_targets_ini = bbox_overlaps(
|
408 |
-
pos_decoded_bbox_preds,
|
409 |
-
pos_decoded_target_preds.detach(),
|
410 |
-
is_aligned=True).clamp(min=1e-6)
|
411 |
-
bbox_weights_ini = iou_targets_ini.clone().detach()
|
412 |
-
iou_targets_ini_avg_per_gpu = reduce_mean(
|
413 |
-
bbox_weights_ini.sum()).item()
|
414 |
-
bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0)
|
415 |
-
loss_bbox = self.loss_bbox(
|
416 |
-
pos_decoded_bbox_preds,
|
417 |
-
pos_decoded_target_preds.detach(),
|
418 |
-
weight=bbox_weights_ini,
|
419 |
-
avg_factor=bbox_avg_factor_ini)
|
420 |
-
|
421 |
-
pos_decoded_bbox_preds_refine = \
|
422 |
-
distance2bbox(pos_points, pos_bbox_preds_refine)
|
423 |
-
iou_targets_rf = bbox_overlaps(
|
424 |
-
pos_decoded_bbox_preds_refine,
|
425 |
-
pos_decoded_target_preds.detach(),
|
426 |
-
is_aligned=True).clamp(min=1e-6)
|
427 |
-
bbox_weights_rf = iou_targets_rf.clone().detach()
|
428 |
-
iou_targets_rf_avg_per_gpu = reduce_mean(
|
429 |
-
bbox_weights_rf.sum()).item()
|
430 |
-
bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0)
|
431 |
-
loss_bbox_refine = self.loss_bbox_refine(
|
432 |
-
pos_decoded_bbox_preds_refine,
|
433 |
-
pos_decoded_target_preds.detach(),
|
434 |
-
weight=bbox_weights_rf,
|
435 |
-
avg_factor=bbox_avg_factor_rf)
|
436 |
-
|
437 |
-
# build IoU-aware cls_score targets
|
438 |
-
if self.use_vfl:
|
439 |
-
pos_ious = iou_targets_rf.clone().detach()
|
440 |
-
cls_iou_targets = torch.zeros_like(flatten_cls_scores)
|
441 |
-
cls_iou_targets[pos_inds, pos_labels] = pos_ious
|
442 |
-
else:
|
443 |
-
loss_bbox = pos_bbox_preds.sum() * 0
|
444 |
-
loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
|
445 |
-
if self.use_vfl:
|
446 |
-
cls_iou_targets = torch.zeros_like(flatten_cls_scores)
|
447 |
-
|
448 |
-
if self.use_vfl:
|
449 |
-
loss_cls = self.loss_cls(
|
450 |
-
flatten_cls_scores,
|
451 |
-
cls_iou_targets,
|
452 |
-
avg_factor=num_pos_avg_per_gpu)
|
453 |
-
else:
|
454 |
-
loss_cls = self.loss_cls(
|
455 |
-
flatten_cls_scores,
|
456 |
-
flatten_labels,
|
457 |
-
weight=label_weights,
|
458 |
-
avg_factor=num_pos_avg_per_gpu)
|
459 |
-
|
460 |
-
return dict(
|
461 |
-
loss_cls=loss_cls,
|
462 |
-
loss_bbox=loss_bbox,
|
463 |
-
loss_bbox_rf=loss_bbox_refine)
|
464 |
-
|
465 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
|
466 |
-
def get_bboxes(self,
|
467 |
-
cls_scores,
|
468 |
-
bbox_preds,
|
469 |
-
bbox_preds_refine,
|
470 |
-
img_metas,
|
471 |
-
cfg=None,
|
472 |
-
rescale=None,
|
473 |
-
with_nms=True):
|
474 |
-
"""Transform network outputs for a batch into bbox predictions.
|
475 |
-
|
476 |
-
Args:
|
477 |
-
cls_scores (list[Tensor]): Box iou-aware scores for each scale
|
478 |
-
level with shape (N, num_points * num_classes, H, W).
|
479 |
-
bbox_preds (list[Tensor]): Box offsets for each scale
|
480 |
-
level with shape (N, num_points * 4, H, W).
|
481 |
-
bbox_preds_refine (list[Tensor]): Refined Box offsets for
|
482 |
-
each scale level with shape (N, num_points * 4, H, W).
|
483 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
484 |
-
image size, scaling factor, etc.
|
485 |
-
cfg (mmcv.Config): Test / postprocessing configuration,
|
486 |
-
if None, test_cfg would be used. Default: None.
|
487 |
-
rescale (bool): If True, return boxes in original image space.
|
488 |
-
Default: False.
|
489 |
-
with_nms (bool): If True, do nms before returning boxes.
|
490 |
-
Default: True.
|
491 |
-
|
492 |
-
Returns:
|
493 |
-
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
|
494 |
-
The first item is an (n, 5) tensor, where the first 4 columns
|
495 |
-
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
|
496 |
-
5-th column is a score between 0 and 1. The second item is a
|
497 |
-
(n,) tensor where each item is the predicted class label of
|
498 |
-
the corresponding box.
|
499 |
-
"""
|
500 |
-
assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
|
501 |
-
num_levels = len(cls_scores)
|
502 |
-
|
503 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
504 |
-
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
|
505 |
-
bbox_preds[0].device)
|
506 |
-
result_list = []
|
507 |
-
for img_id in range(len(img_metas)):
|
508 |
-
cls_score_list = [
|
509 |
-
cls_scores[i][img_id].detach() for i in range(num_levels)
|
510 |
-
]
|
511 |
-
bbox_pred_list = [
|
512 |
-
bbox_preds_refine[i][img_id].detach()
|
513 |
-
for i in range(num_levels)
|
514 |
-
]
|
515 |
-
img_shape = img_metas[img_id]['img_shape']
|
516 |
-
scale_factor = img_metas[img_id]['scale_factor']
|
517 |
-
det_bboxes = self._get_bboxes_single(cls_score_list,
|
518 |
-
bbox_pred_list, mlvl_points,
|
519 |
-
img_shape, scale_factor, cfg,
|
520 |
-
rescale, with_nms)
|
521 |
-
result_list.append(det_bboxes)
|
522 |
-
return result_list
|
523 |
-
|
524 |
-
def _get_bboxes_single(self,
|
525 |
-
cls_scores,
|
526 |
-
bbox_preds,
|
527 |
-
mlvl_points,
|
528 |
-
img_shape,
|
529 |
-
scale_factor,
|
530 |
-
cfg,
|
531 |
-
rescale=False,
|
532 |
-
with_nms=True):
|
533 |
-
"""Transform outputs for a single batch item into bbox predictions.
|
534 |
-
|
535 |
-
Args:
|
536 |
-
cls_scores (list[Tensor]): Box iou-aware scores for a single scale
|
537 |
-
level with shape (num_points * num_classes, H, W).
|
538 |
-
bbox_preds (list[Tensor]): Box offsets for a single scale
|
539 |
-
level with shape (num_points * 4, H, W).
|
540 |
-
mlvl_points (list[Tensor]): Box reference for a single scale level
|
541 |
-
with shape (num_total_points, 4).
|
542 |
-
img_shape (tuple[int]): Shape of the input image,
|
543 |
-
(height, width, 3).
|
544 |
-
scale_factor (ndarray): Scale factor of the image arrange as
|
545 |
-
(w_scale, h_scale, w_scale, h_scale).
|
546 |
-
cfg (mmcv.Config | None): Test / postprocessing configuration,
|
547 |
-
if None, test_cfg would be used.
|
548 |
-
rescale (bool): If True, return boxes in original image space.
|
549 |
-
Default: False.
|
550 |
-
with_nms (bool): If True, do nms before returning boxes.
|
551 |
-
Default: True.
|
552 |
-
|
553 |
-
Returns:
|
554 |
-
tuple(Tensor):
|
555 |
-
det_bboxes (Tensor): BBox predictions in shape (n, 5), where
|
556 |
-
the first 4 columns are bounding box positions
|
557 |
-
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
|
558 |
-
between 0 and 1.
|
559 |
-
det_labels (Tensor): A (n,) tensor where each item is the
|
560 |
-
predicted class label of the corresponding box.
|
561 |
-
"""
|
562 |
-
cfg = self.test_cfg if cfg is None else cfg
|
563 |
-
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
|
564 |
-
mlvl_bboxes = []
|
565 |
-
mlvl_scores = []
|
566 |
-
for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds,
|
567 |
-
mlvl_points):
|
568 |
-
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
|
569 |
-
scores = cls_score.permute(1, 2, 0).reshape(
|
570 |
-
-1, self.cls_out_channels).contiguous().sigmoid()
|
571 |
-
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous()
|
572 |
-
|
573 |
-
nms_pre = cfg.get('nms_pre', -1)
|
574 |
-
if 0 < nms_pre < scores.shape[0]:
|
575 |
-
max_scores, _ = scores.max(dim=1)
|
576 |
-
_, topk_inds = max_scores.topk(nms_pre)
|
577 |
-
points = points[topk_inds, :]
|
578 |
-
bbox_pred = bbox_pred[topk_inds, :]
|
579 |
-
scores = scores[topk_inds, :]
|
580 |
-
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
|
581 |
-
mlvl_bboxes.append(bboxes)
|
582 |
-
mlvl_scores.append(scores)
|
583 |
-
mlvl_bboxes = torch.cat(mlvl_bboxes)
|
584 |
-
if rescale:
|
585 |
-
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
|
586 |
-
mlvl_scores = torch.cat(mlvl_scores)
|
587 |
-
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
|
588 |
-
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
|
589 |
-
# BG cat_id: num_class
|
590 |
-
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
|
591 |
-
if with_nms:
|
592 |
-
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
|
593 |
-
cfg.score_thr, cfg.nms,
|
594 |
-
cfg.max_per_img)
|
595 |
-
return det_bboxes, det_labels
|
596 |
-
else:
|
597 |
-
return mlvl_bboxes, mlvl_scores
|
598 |
-
|
599 |
-
def _get_points_single(self,
|
600 |
-
featmap_size,
|
601 |
-
stride,
|
602 |
-
dtype,
|
603 |
-
device,
|
604 |
-
flatten=False):
|
605 |
-
"""Get points according to feature map sizes."""
|
606 |
-
h, w = featmap_size
|
607 |
-
x_range = torch.arange(
|
608 |
-
0, w * stride, stride, dtype=dtype, device=device)
|
609 |
-
y_range = torch.arange(
|
610 |
-
0, h * stride, stride, dtype=dtype, device=device)
|
611 |
-
y, x = torch.meshgrid(y_range, x_range)
|
612 |
-
# to be compatible with anchor points in ATSS
|
613 |
-
if self.use_atss:
|
614 |
-
points = torch.stack(
|
615 |
-
(x.reshape(-1), y.reshape(-1)), dim=-1) + \
|
616 |
-
stride * self.anchor_center_offset
|
617 |
-
else:
|
618 |
-
points = torch.stack(
|
619 |
-
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
|
620 |
-
return points
|
621 |
-
|
622 |
-
def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
|
623 |
-
img_metas, gt_bboxes_ignore):
|
624 |
-
"""A wrapper for computing ATSS and FCOS targets for points in multiple
|
625 |
-
images.
|
626 |
-
|
627 |
-
Args:
|
628 |
-
cls_scores (list[Tensor]): Box iou-aware scores for each scale
|
629 |
-
level with shape (N, num_points * num_classes, H, W).
|
630 |
-
mlvl_points (list[Tensor]): Points of each fpn level, each has
|
631 |
-
shape (num_points, 2).
|
632 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
|
633 |
-
each has shape (num_gt, 4).
|
634 |
-
gt_labels (list[Tensor]): Ground truth labels of each box,
|
635 |
-
each has shape (num_gt,).
|
636 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
637 |
-
image size, scaling factor, etc.
|
638 |
-
gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
|
639 |
-
ignored, shape (num_ignored_gts, 4).
|
640 |
-
|
641 |
-
Returns:
|
642 |
-
tuple:
|
643 |
-
labels_list (list[Tensor]): Labels of each level.
|
644 |
-
label_weights (Tensor/None): Label weights of all levels.
|
645 |
-
bbox_targets_list (list[Tensor]): Regression targets of each
|
646 |
-
level, (l, t, r, b).
|
647 |
-
bbox_weights (Tensor/None): Bbox weights of all levels.
|
648 |
-
"""
|
649 |
-
if self.use_atss:
|
650 |
-
return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,
|
651 |
-
gt_labels, img_metas,
|
652 |
-
gt_bboxes_ignore)
|
653 |
-
else:
|
654 |
-
self.norm_on_bbox = False
|
655 |
-
return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)
|
656 |
-
|
657 |
-
def _get_target_single(self, *args, **kwargs):
|
658 |
-
"""Avoid ambiguity in multiple inheritance."""
|
659 |
-
if self.use_atss:
|
660 |
-
return ATSSHead._get_target_single(self, *args, **kwargs)
|
661 |
-
else:
|
662 |
-
return FCOSHead._get_target_single(self, *args, **kwargs)
|
663 |
-
|
664 |
-
def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
|
665 |
-
"""Compute FCOS regression and classification targets for points in
|
666 |
-
multiple images.
|
667 |
-
|
668 |
-
Args:
|
669 |
-
points (list[Tensor]): Points of each fpn level, each has shape
|
670 |
-
(num_points, 2).
|
671 |
-
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
|
672 |
-
each has shape (num_gt, 4).
|
673 |
-
gt_labels_list (list[Tensor]): Ground truth labels of each box,
|
674 |
-
each has shape (num_gt,).
|
675 |
-
|
676 |
-
Returns:
|
677 |
-
tuple:
|
678 |
-
labels (list[Tensor]): Labels of each level.
|
679 |
-
label_weights: None, to be compatible with ATSS targets.
|
680 |
-
bbox_targets (list[Tensor]): BBox targets of each level.
|
681 |
-
bbox_weights: None, to be compatible with ATSS targets.
|
682 |
-
"""
|
683 |
-
labels, bbox_targets = FCOSHead.get_targets(self, points,
|
684 |
-
gt_bboxes_list,
|
685 |
-
gt_labels_list)
|
686 |
-
label_weights = None
|
687 |
-
bbox_weights = None
|
688 |
-
return labels, label_weights, bbox_targets, bbox_weights
|
689 |
-
|
690 |
-
def get_atss_targets(self,
|
691 |
-
cls_scores,
|
692 |
-
mlvl_points,
|
693 |
-
gt_bboxes,
|
694 |
-
gt_labels,
|
695 |
-
img_metas,
|
696 |
-
gt_bboxes_ignore=None):
|
697 |
-
"""A wrapper for computing ATSS targets for points in multiple images.
|
698 |
-
|
699 |
-
Args:
|
700 |
-
cls_scores (list[Tensor]): Box iou-aware scores for each scale
|
701 |
-
level with shape (N, num_points * num_classes, H, W).
|
702 |
-
mlvl_points (list[Tensor]): Points of each fpn level, each has
|
703 |
-
shape (num_points, 2).
|
704 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
|
705 |
-
each has shape (num_gt, 4).
|
706 |
-
gt_labels (list[Tensor]): Ground truth labels of each box,
|
707 |
-
each has shape (num_gt,).
|
708 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
709 |
-
image size, scaling factor, etc.
|
710 |
-
gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
|
711 |
-
ignored, shape (num_ignored_gts, 4). Default: None.
|
712 |
-
|
713 |
-
Returns:
|
714 |
-
tuple:
|
715 |
-
labels_list (list[Tensor]): Labels of each level.
|
716 |
-
label_weights (Tensor): Label weights of all levels.
|
717 |
-
bbox_targets_list (list[Tensor]): Regression targets of each
|
718 |
-
level, (l, t, r, b).
|
719 |
-
bbox_weights (Tensor): Bbox weights of all levels.
|
720 |
-
"""
|
721 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
722 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
723 |
-
|
724 |
-
device = cls_scores[0].device
|
725 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
726 |
-
featmap_sizes, img_metas, device=device)
|
727 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
728 |
-
|
729 |
-
cls_reg_targets = ATSSHead.get_targets(
|
730 |
-
self,
|
731 |
-
anchor_list,
|
732 |
-
valid_flag_list,
|
733 |
-
gt_bboxes,
|
734 |
-
img_metas,
|
735 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
736 |
-
gt_labels_list=gt_labels,
|
737 |
-
label_channels=label_channels,
|
738 |
-
unmap_outputs=True)
|
739 |
-
if cls_reg_targets is None:
|
740 |
-
return None
|
741 |
-
|
742 |
-
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
|
743 |
-
bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
|
744 |
-
|
745 |
-
bbox_targets_list = [
|
746 |
-
bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
|
747 |
-
]
|
748 |
-
|
749 |
-
num_imgs = len(img_metas)
|
750 |
-
# transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
|
751 |
-
bbox_targets_list = self.transform_bbox_targets(
|
752 |
-
bbox_targets_list, mlvl_points, num_imgs)
|
753 |
-
|
754 |
-
labels_list = [labels.reshape(-1) for labels in labels_list]
|
755 |
-
label_weights_list = [
|
756 |
-
label_weights.reshape(-1) for label_weights in label_weights_list
|
757 |
-
]
|
758 |
-
bbox_weights_list = [
|
759 |
-
bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
|
760 |
-
]
|
761 |
-
label_weights = torch.cat(label_weights_list)
|
762 |
-
bbox_weights = torch.cat(bbox_weights_list)
|
763 |
-
return labels_list, label_weights, bbox_targets_list, bbox_weights
|
764 |
-
|
765 |
-
def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
|
766 |
-
"""Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
|
767 |
-
|
768 |
-
Args:
|
769 |
-
decoded_bboxes (list[Tensor]): Regression targets of each level,
|
770 |
-
in the form of (x1, y1, x2, y2).
|
771 |
-
mlvl_points (list[Tensor]): Points of each fpn level, each has
|
772 |
-
shape (num_points, 2).
|
773 |
-
num_imgs (int): the number of images in a batch.
|
774 |
-
|
775 |
-
Returns:
|
776 |
-
bbox_targets (list[Tensor]): Regression targets of each level in
|
777 |
-
the form of (l, t, r, b).
|
778 |
-
"""
|
779 |
-
# TODO: Re-implemented in Class PointCoder
|
780 |
-
assert len(decoded_bboxes) == len(mlvl_points)
|
781 |
-
num_levels = len(decoded_bboxes)
|
782 |
-
mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
|
783 |
-
bbox_targets = []
|
784 |
-
for i in range(num_levels):
|
785 |
-
bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i])
|
786 |
-
bbox_targets.append(bbox_target)
|
787 |
-
|
788 |
-
return bbox_targets
|
789 |
-
|
790 |
-
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
791 |
-
missing_keys, unexpected_keys, error_msgs):
|
792 |
-
"""Override the method in the parent class to avoid changing para's
|
793 |
-
name."""
|
794 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import math
|
4 |
-
|
5 |
-
|
6 |
-
######################################################################################
|
7 |
-
# position embedding
|
8 |
-
######################################################################################
|
9 |
-
class PositionEmbeddingLearned(nn.Module):
|
10 |
-
"""
|
11 |
-
This is a learned version of the position embedding
|
12 |
-
"""
|
13 |
-
def __init__(self, num_pos_feats=256):
|
14 |
-
super().__init__()
|
15 |
-
self.row_embed = nn.Embedding(32, num_pos_feats)
|
16 |
-
self.col_embed = nn.Embedding(32, num_pos_feats)
|
17 |
-
self.reset_parameters()
|
18 |
-
|
19 |
-
def reset_parameters(self):
|
20 |
-
nn.init.uniform_(self.row_embed.weight)
|
21 |
-
nn.init.uniform_(self.col_embed.weight)
|
22 |
-
|
23 |
-
def forward(self, x, mask):
|
24 |
-
h, w = x.shape[-2:]
|
25 |
-
i = torch.arange(w, device=x.device)
|
26 |
-
j = torch.arange(h, device=x.device)
|
27 |
-
x_emb = self.col_embed(i).unsqueeze(0).repeat(h, 1, 1)
|
28 |
-
y_emb = self.row_embed(j).unsqueeze(1).repeat(1, w, 1)
|
29 |
-
pos = (x_emb + y_emb).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
|
30 |
-
return pos
|
31 |
-
|
32 |
-
|
33 |
-
class PositionEmbeddingSine(nn.Module):
|
34 |
-
"""
|
35 |
-
This is a standard version of the position embedding, very similar to the one used by the
|
36 |
-
"Attention is all you need" paper, generalized to work on examples
|
37 |
-
"""
|
38 |
-
def __init__(self, feats_dim=512, temperature=10000, normalize=False, scale=None):
|
39 |
-
"""
|
40 |
-
explicitly encode the position using the sinusoid:
|
41 |
-
PE(pos,2i) = sin(pos/temperature^(2*i/d_model))
|
42 |
-
PE(pos,2i+1) = cos(pos/temperature^(2*i/d_model))
|
43 |
-
:param feats_dim: the dimension of features, each dimension of the positional embedding to a sinusoid
|
44 |
-
:param temperature: wavelengths from a geometric progression from scale
|
45 |
-
:param normalize: whether to normalize the position to (0,1)
|
46 |
-
:param scale: scale for the position embedding
|
47 |
-
"""
|
48 |
-
super(PositionEmbeddingSine, self).__init__()
|
49 |
-
self.feats_dim = feats_dim
|
50 |
-
self.T = temperature
|
51 |
-
self.norm = normalize
|
52 |
-
if scale is None:
|
53 |
-
scale = 2 * math.pi
|
54 |
-
self.scale = scale
|
55 |
-
|
56 |
-
def forward(self, x, mask):
|
57 |
-
x_embed = mask.cumsum(1, dtype=torch.float32)
|
58 |
-
y_embed = mask.cumsum(2, dtype=torch.float32)
|
59 |
-
if self.norm:
|
60 |
-
eps = 1e-5
|
61 |
-
x_embed = x_embed / (x_embed[:, -1:, :] + eps) * self.scale
|
62 |
-
y_embed = y_embed / (y_embed[:, :, -1:] + eps) * self.scale
|
63 |
-
|
64 |
-
dim_t = torch.arange(self.feats_dim, dtype=torch.float32, device=x.device)
|
65 |
-
dim_t = self.T ** (2*(dim_t//2)/self.feats_dim)
|
66 |
-
pos_x = x_embed[:, :, :, None] / dim_t
|
67 |
-
pos_y = y_embed[:, :, :, None] / dim_t
|
68 |
-
|
69 |
-
pos_x[:, :, :, 0::2], pos_x[:, :, :, 1::2] = pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()
|
70 |
-
pos_y[:, :, :, 0::2], pos_y[:, :, :, 1::2] = pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()
|
71 |
-
pos = (pos_x + pos_y).permute(0, 3, 1, 2) * 0.5
|
72 |
-
return pos
|
73 |
-
|
74 |
-
|
75 |
-
def build_position_embed(embed_type='learned', feats_dim=512, temperature=10000):
|
76 |
-
if embed_type == 'sine':
|
77 |
-
pos_embed = PositionEmbeddingSine(feats_dim, temperature, normalize=True)
|
78 |
-
elif embed_type == 'learned':
|
79 |
-
pos_embed = PositionEmbeddingLearned(feats_dim)
|
80 |
-
else:
|
81 |
-
raise ValueError(f"nor supported {embed_type}")
|
82 |
-
return pos_embed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
import onnxruntime
|
2 |
-
import librosa
|
3 |
-
import numpy as np
|
4 |
-
import soundfile
|
5 |
-
|
6 |
-
|
7 |
-
class ContentVec:
|
8 |
-
def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
|
9 |
-
print("load model(s) from {}".format(vec_path))
|
10 |
-
if device == "cpu" or device is None:
|
11 |
-
providers = ["CPUExecutionProvider"]
|
12 |
-
elif device == "cuda":
|
13 |
-
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
14 |
-
elif device == "dml":
|
15 |
-
providers = ["DmlExecutionProvider"]
|
16 |
-
else:
|
17 |
-
raise RuntimeError("Unsportted Device")
|
18 |
-
self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
|
19 |
-
|
20 |
-
def __call__(self, wav):
|
21 |
-
return self.forward(wav)
|
22 |
-
|
23 |
-
def forward(self, wav):
|
24 |
-
feats = wav
|
25 |
-
if feats.ndim == 2: # double channels
|
26 |
-
feats = feats.mean(-1)
|
27 |
-
assert feats.ndim == 1, feats.ndim
|
28 |
-
feats = np.expand_dims(np.expand_dims(feats, 0), 0)
|
29 |
-
onnx_input = {self.model.get_inputs()[0].name: feats}
|
30 |
-
logits = self.model.run(None, onnx_input)[0]
|
31 |
-
return logits.transpose(0, 2, 1)
|
32 |
-
|
33 |
-
|
34 |
-
def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
|
35 |
-
if f0_predictor == "pm":
|
36 |
-
from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
|
37 |
-
|
38 |
-
f0_predictor_object = PMF0Predictor(
|
39 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
40 |
-
)
|
41 |
-
elif f0_predictor == "harvest":
|
42 |
-
from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
|
43 |
-
HarvestF0Predictor,
|
44 |
-
)
|
45 |
-
|
46 |
-
f0_predictor_object = HarvestF0Predictor(
|
47 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
48 |
-
)
|
49 |
-
elif f0_predictor == "dio":
|
50 |
-
from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
|
51 |
-
|
52 |
-
f0_predictor_object = DioF0Predictor(
|
53 |
-
hop_length=hop_length, sampling_rate=sampling_rate
|
54 |
-
)
|
55 |
-
else:
|
56 |
-
raise Exception("Unknown f0 predictor")
|
57 |
-
return f0_predictor_object
|
58 |
-
|
59 |
-
|
60 |
-
class OnnxRVC:
|
61 |
-
def __init__(
|
62 |
-
self,
|
63 |
-
model_path,
|
64 |
-
sr=40000,
|
65 |
-
hop_size=512,
|
66 |
-
vec_path="vec-768-layer-12",
|
67 |
-
device="cpu",
|
68 |
-
):
|
69 |
-
vec_path = f"pretrained/{vec_path}.onnx"
|
70 |
-
self.vec_model = ContentVec(vec_path, device)
|
71 |
-
if device == "cpu" or device is None:
|
72 |
-
providers = ["CPUExecutionProvider"]
|
73 |
-
elif device == "cuda":
|
74 |
-
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
75 |
-
elif device == "dml":
|
76 |
-
providers = ["DmlExecutionProvider"]
|
77 |
-
else:
|
78 |
-
raise RuntimeError("Unsportted Device")
|
79 |
-
self.model = onnxruntime.InferenceSession(model_path, providers=providers)
|
80 |
-
self.sampling_rate = sr
|
81 |
-
self.hop_size = hop_size
|
82 |
-
|
83 |
-
def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
|
84 |
-
onnx_input = {
|
85 |
-
self.model.get_inputs()[0].name: hubert,
|
86 |
-
self.model.get_inputs()[1].name: hubert_length,
|
87 |
-
self.model.get_inputs()[2].name: pitch,
|
88 |
-
self.model.get_inputs()[3].name: pitchf,
|
89 |
-
self.model.get_inputs()[4].name: ds,
|
90 |
-
self.model.get_inputs()[5].name: rnd,
|
91 |
-
}
|
92 |
-
return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
|
93 |
-
|
94 |
-
def inference(
|
95 |
-
self,
|
96 |
-
raw_path,
|
97 |
-
sid,
|
98 |
-
f0_method="dio",
|
99 |
-
f0_up_key=0,
|
100 |
-
pad_time=0.5,
|
101 |
-
cr_threshold=0.02,
|
102 |
-
):
|
103 |
-
f0_min = 50
|
104 |
-
f0_max = 1100
|
105 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
106 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
107 |
-
f0_predictor = get_f0_predictor(
|
108 |
-
f0_method,
|
109 |
-
hop_length=self.hop_size,
|
110 |
-
sampling_rate=self.sampling_rate,
|
111 |
-
threshold=cr_threshold,
|
112 |
-
)
|
113 |
-
wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
|
114 |
-
org_length = len(wav)
|
115 |
-
if org_length / sr > 50.0:
|
116 |
-
raise RuntimeError("Reached Max Length")
|
117 |
-
|
118 |
-
wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
|
119 |
-
wav16k = wav16k
|
120 |
-
|
121 |
-
hubert = self.vec_model(wav16k)
|
122 |
-
hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
|
123 |
-
hubert_length = hubert.shape[1]
|
124 |
-
|
125 |
-
pitchf = f0_predictor.compute_f0(wav, hubert_length)
|
126 |
-
pitchf = pitchf * 2 ** (f0_up_key / 12)
|
127 |
-
pitch = pitchf.copy()
|
128 |
-
f0_mel = 1127 * np.log(1 + pitch / 700)
|
129 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
130 |
-
f0_mel_max - f0_mel_min
|
131 |
-
) + 1
|
132 |
-
f0_mel[f0_mel <= 1] = 1
|
133 |
-
f0_mel[f0_mel > 255] = 255
|
134 |
-
pitch = np.rint(f0_mel).astype(np.int64)
|
135 |
-
|
136 |
-
pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
|
137 |
-
pitch = pitch.reshape(1, len(pitch))
|
138 |
-
ds = np.array([sid]).astype(np.int64)
|
139 |
-
|
140 |
-
rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
|
141 |
-
hubert_length = np.array([hubert_length]).astype(np.int64)
|
142 |
-
|
143 |
-
out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
|
144 |
-
out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
|
145 |
-
return out_wav[0:org_length]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/__init__.py
DELETED
File without changes
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
from operator import itemgetter
|
2 |
-
from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence
|
3 |
-
|
4 |
-
from . import errors
|
5 |
-
from .protocol import is_renderable, rich_cast
|
6 |
-
|
7 |
-
if TYPE_CHECKING:
|
8 |
-
from .console import Console, ConsoleOptions, RenderableType
|
9 |
-
|
10 |
-
|
11 |
-
class Measurement(NamedTuple):
|
12 |
-
"""Stores the minimum and maximum widths (in characters) required to render an object."""
|
13 |
-
|
14 |
-
minimum: int
|
15 |
-
"""Minimum number of cells required to render."""
|
16 |
-
maximum: int
|
17 |
-
"""Maximum number of cells required to render."""
|
18 |
-
|
19 |
-
@property
|
20 |
-
def span(self) -> int:
|
21 |
-
"""Get difference between maximum and minimum."""
|
22 |
-
return self.maximum - self.minimum
|
23 |
-
|
24 |
-
def normalize(self) -> "Measurement":
|
25 |
-
"""Get measurement that ensures that minimum <= maximum and minimum >= 0
|
26 |
-
|
27 |
-
Returns:
|
28 |
-
Measurement: A normalized measurement.
|
29 |
-
"""
|
30 |
-
minimum, maximum = self
|
31 |
-
minimum = min(max(0, minimum), maximum)
|
32 |
-
return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
|
33 |
-
|
34 |
-
def with_maximum(self, width: int) -> "Measurement":
|
35 |
-
"""Get a RenderableWith where the widths are <= width.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
width (int): Maximum desired width.
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
Measurement: New Measurement object.
|
42 |
-
"""
|
43 |
-
minimum, maximum = self
|
44 |
-
return Measurement(min(minimum, width), min(maximum, width))
|
45 |
-
|
46 |
-
def with_minimum(self, width: int) -> "Measurement":
|
47 |
-
"""Get a RenderableWith where the widths are >= width.
|
48 |
-
|
49 |
-
Args:
|
50 |
-
width (int): Minimum desired width.
|
51 |
-
|
52 |
-
Returns:
|
53 |
-
Measurement: New Measurement object.
|
54 |
-
"""
|
55 |
-
minimum, maximum = self
|
56 |
-
width = max(0, width)
|
57 |
-
return Measurement(max(minimum, width), max(maximum, width))
|
58 |
-
|
59 |
-
def clamp(
|
60 |
-
self, min_width: Optional[int] = None, max_width: Optional[int] = None
|
61 |
-
) -> "Measurement":
|
62 |
-
"""Clamp a measurement within the specified range.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None.
|
66 |
-
max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None.
|
67 |
-
|
68 |
-
Returns:
|
69 |
-
Measurement: New Measurement object.
|
70 |
-
"""
|
71 |
-
measurement = self
|
72 |
-
if min_width is not None:
|
73 |
-
measurement = measurement.with_minimum(min_width)
|
74 |
-
if max_width is not None:
|
75 |
-
measurement = measurement.with_maximum(max_width)
|
76 |
-
return measurement
|
77 |
-
|
78 |
-
@classmethod
|
79 |
-
def get(
|
80 |
-
cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
|
81 |
-
) -> "Measurement":
|
82 |
-
"""Get a measurement for a renderable.
|
83 |
-
|
84 |
-
Args:
|
85 |
-
console (~rich.console.Console): Console instance.
|
86 |
-
options (~rich.console.ConsoleOptions): Console options.
|
87 |
-
renderable (RenderableType): An object that may be rendered with Rich.
|
88 |
-
|
89 |
-
Raises:
|
90 |
-
errors.NotRenderableError: If the object is not renderable.
|
91 |
-
|
92 |
-
Returns:
|
93 |
-
Measurement: Measurement object containing range of character widths required to render the object.
|
94 |
-
"""
|
95 |
-
_max_width = options.max_width
|
96 |
-
if _max_width < 1:
|
97 |
-
return Measurement(0, 0)
|
98 |
-
if isinstance(renderable, str):
|
99 |
-
renderable = console.render_str(
|
100 |
-
renderable, markup=options.markup, highlight=False
|
101 |
-
)
|
102 |
-
renderable = rich_cast(renderable)
|
103 |
-
if is_renderable(renderable):
|
104 |
-
get_console_width: Optional[
|
105 |
-
Callable[["Console", "ConsoleOptions"], "Measurement"]
|
106 |
-
] = getattr(renderable, "__rich_measure__", None)
|
107 |
-
if get_console_width is not None:
|
108 |
-
render_width = (
|
109 |
-
get_console_width(console, options)
|
110 |
-
.normalize()
|
111 |
-
.with_maximum(_max_width)
|
112 |
-
)
|
113 |
-
if render_width.maximum < 1:
|
114 |
-
return Measurement(0, 0)
|
115 |
-
return render_width.normalize()
|
116 |
-
else:
|
117 |
-
return Measurement(0, _max_width)
|
118 |
-
else:
|
119 |
-
raise errors.NotRenderableError(
|
120 |
-
f"Unable to get render width for {renderable!r}; "
|
121 |
-
"a str, Segment, or object with __rich_console__ method is required"
|
122 |
-
)
|
123 |
-
|
124 |
-
|
125 |
-
def measure_renderables(
|
126 |
-
console: "Console",
|
127 |
-
options: "ConsoleOptions",
|
128 |
-
renderables: Sequence["RenderableType"],
|
129 |
-
) -> "Measurement":
|
130 |
-
"""Get a measurement that would fit a number of renderables.
|
131 |
-
|
132 |
-
Args:
|
133 |
-
console (~rich.console.Console): Console instance.
|
134 |
-
options (~rich.console.ConsoleOptions): Console options.
|
135 |
-
renderables (Iterable[RenderableType]): One or more renderable objects.
|
136 |
-
|
137 |
-
Returns:
|
138 |
-
Measurement: Measurement object containing range of character widths required to
|
139 |
-
contain all given renderables.
|
140 |
-
"""
|
141 |
-
if not renderables:
|
142 |
-
return Measurement(0, 0)
|
143 |
-
get_measurement = Measurement.get
|
144 |
-
measurements = [
|
145 |
-
get_measurement(console, options, renderable) for renderable in renderables
|
146 |
-
]
|
147 |
-
measured_width = Measurement(
|
148 |
-
max(measurements, key=itemgetter(0)).minimum,
|
149 |
-
max(measurements, key=itemgetter(1)).maximum,
|
150 |
-
)
|
151 |
-
return measured_width
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoBG/Auto-BoardGame/description_generator.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
|
2 |
-
import numpy as np
|
3 |
-
import re
|
4 |
-
import spacy
|
5 |
-
import openai
|
6 |
-
from operator import itemgetter
|
7 |
-
#user input manager class
|
8 |
-
class input_manager:
|
9 |
-
|
10 |
-
#initialize key dictionary from vector data frame
|
11 |
-
def __init__(self,key_df, slim_df, search_tokens):
|
12 |
-
self.key_df = key_df
|
13 |
-
self.slim_df = slim_df
|
14 |
-
self.search_tokens = search_tokens
|
15 |
-
self.key = dict(zip(list(key_df.columns),np.zeros(len(key_df.columns))))
|
16 |
-
self.nlp = spacy.load("en_core_web_md")
|
17 |
-
|
18 |
-
#translate input text to vector
|
19 |
-
def set_input(self,input_cats):
|
20 |
-
#need setup to apply correct group tag to values
|
21 |
-
#separate known/unknown features
|
22 |
-
k_flags = [cat for cat in input_cats if cat in list(self.key.keys())]
|
23 |
-
unk_flags = [cat for cat in input_cats if cat not in list(self.key.keys())]
|
24 |
-
|
25 |
-
#process within feature class similarity for each unknown input
|
26 |
-
if len(unk_flags)>0:
|
27 |
-
|
28 |
-
outs = []
|
29 |
-
for word in unk_flags:
|
30 |
-
if re.match(r"game_type_",word):
|
31 |
-
tok = self.nlp(word.split("_")[-1])
|
32 |
-
mtch = max([(key,key.similarity(tok)) for key in self.search_tokens[0]],key=itemgetter(1))
|
33 |
-
#if no known match is found (model doesn't recognize input word), we're going to discard - other solutions performance prohibitive
|
34 |
-
if mtch[1]>0:
|
35 |
-
outs.append("game_type_"+mtch[0])
|
36 |
-
elif re.match(r"mechanic_",word):
|
37 |
-
tok = self.nlp(word.split("_")[-1])
|
38 |
-
mtch = max([(key,key.similarity(tok)) for key in self.search_tokens[1]],key=itemgetter(1))
|
39 |
-
if mtch[1]>0:
|
40 |
-
outs.append("mechanic_"+mtch[0])
|
41 |
-
elif re.match(r"category_",word):
|
42 |
-
tok = self.nlp(word.split("_")[-1])
|
43 |
-
mtch=max([(key,key.similarity(tok)) for key in self.search_tokens[2]],key=itemgetter(1))
|
44 |
-
if mtch[1]>0:
|
45 |
-
outs.append("category_"+mtch[0])
|
46 |
-
elif re.match(r"family_",word):
|
47 |
-
tok = self.nlp(word.split("_")[-1])
|
48 |
-
mtch=max([(key,key.similarity(tok)) for key in self.search_tokens[3]],key=itemgetter(1))
|
49 |
-
if mtch[1]>0:
|
50 |
-
outs.append("family_"+str(mtch[0]))
|
51 |
-
|
52 |
-
#if unks are processed, rejoin nearest match to known.
|
53 |
-
k_flags = list(set(k_flags+outs))
|
54 |
-
|
55 |
-
#preserve global key and ouput copy w/input keys activated to 1
|
56 |
-
d = self.key.copy()
|
57 |
-
for cat in k_flags:
|
58 |
-
d[cat] = 1.0
|
59 |
-
|
60 |
-
# DELETE ME
|
61 |
-
return d
|
62 |
-
|
63 |
-
def input_parser(self,in_vec):
|
64 |
-
#extracting keys from processed vector
|
65 |
-
ks = [k for k,v in in_vec.items() if v == 1]
|
66 |
-
|
67 |
-
return ks
|
68 |
-
|
69 |
-
class model_control:
|
70 |
-
def __init__(self, apikey, model_id):
|
71 |
-
self.api_key = apikey
|
72 |
-
openai.api_key = self.api_key
|
73 |
-
|
74 |
-
self.prompt = None
|
75 |
-
|
76 |
-
self.model = openai.FineTune.retrieve(id=model_id).fine_tuned_model
|
77 |
-
|
78 |
-
def prompt_formatter(self,ks):
|
79 |
-
self.prompt = ". ".join(ks) + "\n\n###\n\n"
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
def call_api(self,status=0):
|
84 |
-
if status == 0:
|
85 |
-
temp=0.5
|
86 |
-
pres=0.7
|
87 |
-
elif status == 1:
|
88 |
-
temp=0.4
|
89 |
-
pres=0.6
|
90 |
-
elif status == 2:
|
91 |
-
temp=0.5
|
92 |
-
pres=0.8
|
93 |
-
|
94 |
-
answer = openai.Completion.create(
|
95 |
-
model=self.model,
|
96 |
-
prompt=self.prompt,
|
97 |
-
max_tokens=512,
|
98 |
-
temperature=temp,
|
99 |
-
stop=["END"],
|
100 |
-
presence_penalty=pres,
|
101 |
-
frequency_penalty=0.5
|
102 |
-
)
|
103 |
-
return answer['choices'][0]['text']
|
104 |
-
|
105 |
-
def resp_cleanup(self,text):
|
106 |
-
|
107 |
-
if ((text[-1] != "!") & (text[-1] != ".") & (text[-1] != "?")):
|
108 |
-
text = " ".join([e+'.' for e in text.split('.')[0:-1] if e])
|
109 |
-
|
110 |
-
sent = re.split(r'([.?!:])', text)
|
111 |
-
phrases = ["[Dd]esigned by","[Dd]esigner of","[Aa]rt by","[Aa]rtist of","[Pp]ublished","[Pp]ublisher of"]
|
112 |
-
|
113 |
-
pat = re.compile("(?:" + "|".join(phrases) + ")")
|
114 |
-
fix = re.compile("(?<=[.!?])[.!?]")
|
115 |
-
|
116 |
-
text = re.sub(fix,'',''.join([s for s in sent if pat.search(s) == None]))
|
117 |
-
|
118 |
-
|
119 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
from typing import Dict, Any
|
4 |
-
import uuid
|
5 |
-
from datetime import datetime
|
6 |
-
import pytz
|
7 |
-
|
8 |
-
import huggingface_hub
|
9 |
-
from huggingface_hub import Repository
|
10 |
-
|
11 |
-
|
12 |
-
class InteractionsLogger:
|
13 |
-
def __init__(self, name: str, persist=False):
|
14 |
-
self.persist = persist
|
15 |
-
self.counter = 0
|
16 |
-
self.name = name # unique id
|
17 |
-
HF_TOKEN = os.environ.get("HF_TOKEN")
|
18 |
-
HF_DATASET_REPO_URL = os.environ.get("HF_DATASET_REPO_URL")
|
19 |
-
if (HF_TOKEN is not None) and (HF_DATASET_REPO_URL is not None):
|
20 |
-
self.repo = Repository(
|
21 |
-
local_dir="data", clone_from=HF_DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
22 |
-
)
|
23 |
-
else:
|
24 |
-
self.persist = False
|
25 |
-
|
26 |
-
def set_goal(self, goal: str):
|
27 |
-
# Initialize two variables for saving two files (self.messages for
|
28 |
-
# training and self.structure_data for later use)
|
29 |
-
self.messages = [{"goal": goal}]
|
30 |
-
self.structured_data = {"goal": goal}
|
31 |
-
|
32 |
-
def add_system(self, more: Dict):
|
33 |
-
self.convos = [{"from": "system"} | more]
|
34 |
-
|
35 |
-
def add_ai(self, msg: str):
|
36 |
-
self.convos.append({"from": "ai", "value": msg})
|
37 |
-
self.messages.append({"id": f"{self.name}_{self.counter}", "conversations": self.convos})
|
38 |
-
self.counter += 1
|
39 |
-
|
40 |
-
def add_structured_data(self, data: Dict[str, Any]):
|
41 |
-
self.structured_data.update({f"turn_{self.counter}": data})
|
42 |
-
|
43 |
-
def add_message(self, data: Dict[str, Any]):
|
44 |
-
self.structured_data.update(data)
|
45 |
-
|
46 |
-
def save(self):
|
47 |
-
# add current datetime
|
48 |
-
self.add_message({"datetime": datetime.now(pytz.utc).strftime("%m/%d/%Y %H:%M:%S %Z%z")})
|
49 |
-
if self.persist:
|
50 |
-
# TODO: want to add retry in a loop?
|
51 |
-
self.repo.git_pull()
|
52 |
-
fname = uuid.uuid4().hex[:16]
|
53 |
-
with open(f"./data/{fname}.json", "w") as f:
|
54 |
-
json.dump(self.messages, f, indent=2)
|
55 |
-
with open(f"./data/{fname}.clean.json", "w") as f:
|
56 |
-
json.dump(self.structured_data, f, indent=2)
|
57 |
-
commit_url = self.repo.push_to_hub()
|
58 |
-
|
59 |
-
def add_cost(self, cost):
|
60 |
-
self.messages.append({"metrics": cost})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
from argparse import ArgumentParser
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
import sys
|
5 |
-
from tqdm import tqdm
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
from torch.utils.data import DataLoader
|
9 |
-
import torchvision.transforms as transforms
|
10 |
-
|
11 |
-
sys.path.append(".")
|
12 |
-
sys.path.append("..")
|
13 |
-
|
14 |
-
from criteria.lpips.lpips import LPIPS
|
15 |
-
from datasets.gt_res_dataset import GTResDataset
|
16 |
-
|
17 |
-
|
18 |
-
def parse_args():
|
19 |
-
parser = ArgumentParser(add_help=False)
|
20 |
-
parser.add_argument('--mode', type=str, default='lpips', choices=['lpips', 'l2'])
|
21 |
-
parser.add_argument('--data_path', type=str, default='results')
|
22 |
-
parser.add_argument('--gt_path', type=str, default='gt_images')
|
23 |
-
parser.add_argument('--workers', type=int, default=4)
|
24 |
-
parser.add_argument('--batch_size', type=int, default=4)
|
25 |
-
parser.add_argument('--is_cars', action='store_true')
|
26 |
-
args = parser.parse_args()
|
27 |
-
return args
|
28 |
-
|
29 |
-
|
30 |
-
def run(args):
|
31 |
-
resize_dims = (256, 256)
|
32 |
-
if args.is_cars:
|
33 |
-
resize_dims = (192, 256)
|
34 |
-
transform = transforms.Compose([transforms.Resize(resize_dims),
|
35 |
-
transforms.ToTensor(),
|
36 |
-
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
|
37 |
-
|
38 |
-
print('Loading dataset')
|
39 |
-
dataset = GTResDataset(root_path=args.data_path,
|
40 |
-
gt_dir=args.gt_path,
|
41 |
-
transform=transform)
|
42 |
-
|
43 |
-
dataloader = DataLoader(dataset,
|
44 |
-
batch_size=args.batch_size,
|
45 |
-
shuffle=False,
|
46 |
-
num_workers=int(args.workers),
|
47 |
-
drop_last=True)
|
48 |
-
|
49 |
-
if args.mode == 'lpips':
|
50 |
-
loss_func = LPIPS(net_type='alex')
|
51 |
-
elif args.mode == 'l2':
|
52 |
-
loss_func = torch.nn.MSELoss()
|
53 |
-
else:
|
54 |
-
raise Exception('Not a valid mode!')
|
55 |
-
loss_func.cuda()
|
56 |
-
|
57 |
-
global_i = 0
|
58 |
-
scores_dict = {}
|
59 |
-
all_scores = []
|
60 |
-
for result_batch, gt_batch in tqdm(dataloader):
|
61 |
-
for i in range(args.batch_size):
|
62 |
-
loss = float(loss_func(result_batch[i:i + 1].cuda(), gt_batch[i:i + 1].cuda()))
|
63 |
-
all_scores.append(loss)
|
64 |
-
im_path = dataset.pairs[global_i][0]
|
65 |
-
scores_dict[os.path.basename(im_path)] = loss
|
66 |
-
global_i += 1
|
67 |
-
|
68 |
-
all_scores = list(scores_dict.values())
|
69 |
-
mean = np.mean(all_scores)
|
70 |
-
std = np.std(all_scores)
|
71 |
-
result_str = 'Average loss is {:.2f}+-{:.2f}'.format(mean, std)
|
72 |
-
print('Finished with ', args.data_path)
|
73 |
-
print(result_str)
|
74 |
-
|
75 |
-
out_path = os.path.join(os.path.dirname(args.data_path), 'inference_metrics')
|
76 |
-
if not os.path.exists(out_path):
|
77 |
-
os.makedirs(out_path)
|
78 |
-
|
79 |
-
with open(os.path.join(out_path, 'stat_{}.txt'.format(args.mode)), 'w') as f:
|
80 |
-
f.write(result_str)
|
81 |
-
with open(os.path.join(out_path, 'scores_{}.json'.format(args.mode)), 'w') as f:
|
82 |
-
json.dump(scores_dict, f)
|
83 |
-
|
84 |
-
|
85 |
-
if __name__ == '__main__':
|
86 |
-
args = parse_args()
|
87 |
-
run(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
from detectron2.data.detection_utils import create_keypoint_hflip_indices
|
2 |
-
|
3 |
-
from .coco import dataloader
|
4 |
-
|
5 |
-
dataloader.train.dataset.min_keypoints = 1
|
6 |
-
dataloader.train.dataset.names = "keypoints_coco_2017_train"
|
7 |
-
dataloader.test.dataset.names = "keypoints_coco_2017_val"
|
8 |
-
|
9 |
-
dataloader.train.mapper.update(
|
10 |
-
use_instance_mask=False,
|
11 |
-
use_keypoint=True,
|
12 |
-
keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names),
|
13 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Blockman GO Nueva versión APK: Todo lo que necesitas saber</h1>
|
3 |
-
<p>¿Estás buscando una aplicación divertida y emocionante que te permita jugar diferentes juegos, chatear con amigos y crear tus propios mundos? Si es así, es posible que desee echa un vistazo Blockman GO nueva versión APK. Esta es una aplicación gratuita que ofrece una gran cantidad de características y contenido para los jugadores de todas las edades y preferencias. En este artículo, le diremos todo lo que necesita saber sobre Blockman GO nueva versión APK, incluyendo lo que es, lo que es nuevo en la última versión, cómo descargarlo e instalarlo, y por qué debe jugar. </p>
|
4 |
-
<h2>blockman ir nueva versión apk</h2><br /><p><b><b>DOWNLOAD</b> ✶ <a href="https://bltlly.com/2v6MEL">https://bltlly.com/2v6MEL</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Blockman GO? </h2>
|
6 |
-
<p>Blockman GO es una aplicación gratuita que te permite jugar varios minijuegos de estilo bloque, chatear con otros jugadores y hacer amigos. También puede crear y compartir sus propios juegos utilizando el editor incorporado. Blockman GO tiene una interfaz simple y fácil de usar, y es compatible con varios idiomas. Puedes descargar Blockman GO desde la Google Play Store o el sitio web oficial. </p>
|
7 |
-
<h3>Una aplicación gratuita con minijuegos, chat y amigos</h3>
|
8 |
-
<p>Una de las principales características de Blockman GO es que ofrece una amplia gama de minijuegos que puedes jugar con otros jugadores online. Algunos de los minijuegos populares son Bed Wars, Sky Wars, Murder Mystery, Egg Wars, Build Battle, Parkour y más. Cada minijuego tiene sus propias reglas, objetivos y recompensas. También puedes chatear con otros jugadores en el lobby del juego o en el juego. Puedes hacer amistad con otros jugadores enviándoles solicitudes de amistad o uniéndote a sus clubes. También puedes invitar a tus amigos a jugar contigo en habitaciones privadas. </p>
|
9 |
-
<h3>Una plataforma para crear y compartir tus propios juegos</h3>
|
10 |
-
|
11 |
-
<h2>¿Qué hay de nuevo en la última versión de Blockman GO? </h2>
|
12 |
-
<p>La última versión de Blockman GO es Garena Blockman GO, que es una colaboración con Garena Free Fire, uno de los juegos de battle royale más populares del mundo. Garena Blockman GO introduce algunas nuevas características y mejoras en la aplicación, como:</p>
|
13 |
-
<h3>Garena Blockman GO: una colaboración con Garena Free Fire</h3>
|
14 |
-
<p>Garena Blockman GO es una versión especial de Blockman GO que cuenta con algunos elementos de Garena Free Fire. Por ejemplo, puedes obtener algunas pieles y artículos exclusivos de Garena Free Fire en Blockman GO. También puedes participar en algunos eventos y actividades relacionadas con Garena Free Fire.</p>
|
15 |
-
<h4>Frontline: un nuevo juego de disparos multijugador 30 vs 30</h4>
|
16 |
-
<p>Uno de los nuevos minijuegos en Garena Blockman GO es Frontline, que es un juego de disparos multijugador de 30 vs 30. Puedes elegir unirte al equipo azul o al equipo rojo, y luchar contra el equipo enemigo en un mapa grande. Puedes usar varias armas, vehículos y tácticas para ganar el juego. También puedes ganar monedas y puntos de experiencia jugando Frontline.</p>
|
17 |
-
<h4>Otras características y mejoras</h4>
|
18 |
-
<p>Garena Blockman GO también trae algunas otras características y mejoras a la aplicación, como:</p>
|
19 |
-
<p></p>
|
20 |
-
<ul>
|
21 |
-
<li>Una nueva interfaz de usuario más colorida y dinámica. </li>
|
22 |
-
<li>Un nuevo sistema de clasificación que muestra tu nivel y progreso en diferentes minijuegos. </li>
|
23 |
-
<li>Un nuevo sistema de chat que soporta mensajes de voz y texto. </li>
|
24 |
-
<li>Un nuevo sistema de recompensas que te da bonificaciones diarias de inicio de sesión, sorteos y logros. </li>
|
25 |
-
<li>Un nuevo sistema de tienda que te permite comprar y vender artículos usando monedas o diamantes. </li>
|
26 |
-
</ul>
|
27 |
-
<h2>Cómo descargar e instalar Blockman GO nueva versión APK? </h2>
|
28 |
-
<p>Si desea descargar e instalar Blockman GO nueva versión APK, puede seguir estos pasos:</p>
|
29 |
-
<h3>Pasos para descargar e instalar desde el sitio web oficial</h3>
|
30 |
-
<ol>
|
31 |
-
<li>Ir al sitio web oficial de Blockman GO y haga clic en el botón "Descargar". </li>
|
32 |
-
|
33 |
-
<li>Espera a que termine la descarga y luego abre el archivo APK. </li>
|
34 |
-
<li>Permite la instalación de fuentes desconocidas si tu dispositivo lo solicita. </li>
|
35 |
-
<li>Siga las instrucciones en la pantalla para completar la instalación. </li>
|
36 |
-
<li> Iniciar la aplicación y disfrutar de la reproducción de Blockman GO nueva versión APK.</li>
|
37 |
-
</ol>
|
38 |
-
<h3>Consejos para evitar malware y virus</h3>
|
39 |
-
<p>Al descargar e instalar Blockman GO nueva versión APK, usted debe tener cuidado de algunos riesgos potenciales, tales como malware y virus. Estos son algunos consejos para evitarlos:</p>
|
40 |
-
<ul>
|
41 |
-
<li>Solo descargar el archivo APK desde el sitio web oficial o la Google Play Store. No confíes en fuentes de terceros que dicen ofrecer el archivo APK. </li>
|
42 |
-
<li>Escanear el archivo APK con un software antivirus fiable antes de abrirlo. </li>
|
43 |
-
<li>No conceda permisos innecesarios ni acceso a la aplicación. </li>
|
44 |
-
<li> Actualizar la aplicación regularmente para obtener los últimos parches de seguridad y correcciones de errores. </li>
|
45 |
-
</ul>
|
46 |
-
<h2>¿Por qué debe jugar Blockman GO nueva versión APK? </h2>
|
47 |
-
<p>Blockman GO nueva versión APK es una gran aplicación para cualquier persona que ama los juegos, socializar y crear. Aquí hay algunas razones por las que deberías jugar:</p>
|
48 |
-
<h3>Disfruta de una variedad de juegos divertidos y creativos</h3>
|
49 |
-
<p>Blockman GO nueva versión APK ofrece una variedad de juegos divertidos y creativos que se puede jugar con otros jugadores en línea. Puedes elegir entre diferentes géneros, como acción, aventura, rompecabezas, estrategia, casual y más. También puedes probar algunos de los nuevos juegos que se agregan regularmente, como Frontline, Garena Free Fire y más. También puedes crear tus propios juegos usando el editor integrado y compartirlos con otros jugadores. </p>
|
50 |
-
<h3>Conoce y chatea con jugadores de todo el mundo</h3>
|
51 |
-
|
52 |
-
<h3>Personaliza tu avatar y decora tu hogar</h3>
|
53 |
-
<p>Blockman GO nueva versión APK también le permite personalizar su avatar y decorar su hogar. Puedes elegir entre diferentes pieles, trajes, accesorios, peinados y más para hacer que tu avatar luzca único. También puedes comprar o ganar algunos artículos de Garena Free Fire en Blockman GO. También puede decorar su hogar con diferentes muebles, fondos de pantalla, pisos, ventanas, puertas y más. También puedes invitar a otros jugadores a visitar tu casa o sus hogares. </p>
|
54 |
-
<h2>Conclusión</h2>
|
55 |
-
<p>Blockman GO nueva versión APK es una aplicación gratuita que le permite jugar diferentes juegos, chatear con amigos, y crear sus propios mundos. Tiene muchas características y contenido para jugadores de todas las edades y preferencias. También tiene algunas nuevas características y mejoras en la última versión, como Garena Blockman GO, Frontline y más. Puede descargar e instalar Blockman GO nueva versión APK desde el sitio web oficial o la Google Play Store. También debe tener cuidado con el malware y los virus al descargar e instalar la aplicación. Usted debe jugar Blockman GO nueva versión APK porque es divertido, creativo, y social. </p>
|
56 |
-
<h2>Preguntas frecuentes</h2>
|
57 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Blockman GO nueva versión APK:</p>
|
58 |
-
<ol>
|
59 |
-
<li><b>Es Blockman GO nueva versión APK seguro? </ <b>Es Blockman GO nueva versión APK seguro? </b></li>
|
60 |
-
<p>Blockman GO nueva versión APK es seguro, siempre y cuando se descarga desde el sitio web oficial o la Google Play Store. También debe escanear el archivo APK con un software antivirus confiable antes de abrirlo. También debe evitar conceder permisos innecesarios o acceso a la aplicación. También debe actualizar la aplicación regularmente para obtener los últimos parches de seguridad y correcciones de errores. </p>
|
61 |
-
<li><b> ¿Cómo puedo obtener monedas y diamantes en Blockman GO nueva versión APK? </b></li>
|
62 |
-
|
63 |
-
<li><b>¿Cómo puedo crear mis propios juegos en Blockman GO nueva versión APK? </b></li>
|
64 |
-
<p>Puede crear sus propios juegos en Blockman GO nueva versión APK utilizando el editor incorporado. Puede acceder al editor pulsando en el botón "Crear" en la pantalla principal. Puede usar varias herramientas y recursos para diseñar sus propios mapas, personajes, elementos, scripts y más. También puede probar sus juegos antes de publicarlos. Puede compartir sus juegos con otros jugadores cargándolos en la plataforma Blockman GO. También puedes jugar a juegos de otros jugadores y calificarlos. </p>
|
65 |
-
<li><b> ¿Cómo puedo unirse o crear un club en Blockman GO nueva versión APK? </b></li>
|
66 |
-
<p>Un club es un grupo de jugadores que comparten un interés común o objetivo en Blockman GO nueva versión APK. Puede unirse o crear un club tocando el botón "Club" en la pantalla principal. Puede buscar clubes existentes por nombre, categoría o popularidad. También puedes crear tu propio club eligiendo un nombre, icono, descripción y categoría. Puedes invitar a otros jugadores a unirse a tu club o aceptar sus solicitudes. También puede chatear con los miembros de su club, enviar regalos y participar en actividades del club. </p>
|
67 |
-
<li><b> ¿Cómo puedo contactar con el servicio al cliente de Blockman GO nueva versión APK? </b></li>
|
68 |
-
<p>Si usted tiene alguna pregunta, problemas, o retroalimentación acerca de Blockman GO nueva versión APK, puede ponerse en contacto con el servicio al cliente tocando el botón "Feedback" en la pantalla principal. Puede elegir enviar un correo electrónico, un mensaje o una captura de pantalla al servicio de atención al cliente. También puede consultar la sección de preguntas frecuentes para algunos problemas y soluciones comunes. </p>
|
69 |
-
</ol></p> 64aa2da5cf<br />
|
70 |
-
<br />
|
71 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar AcroRip 9.0 3 Grieta completa: Lo que usted necesita saber</h1>
|
3 |
-
<p>Si está buscando un software que le pueda ayudar a imprimir blanco y color juntos en varios sustratos, como telas, plásticos, metales, cerámica y más, es posible que haya oído hablar de AcroRip. AcroRip es un software RIP (procesador de imágenes raster) que puede controlar los canales de tinta de su impresora y optimizar la calidad y velocidad de impresión. Está especialmente diseñado para impresoras planas UV e impresoras directas para prendas de vestir que utilizan cabezales de impresión Epson. </p>
|
4 |
-
<h2>descargar acrorip 9.0 3 completo crack</h2><br /><p><b><b>Download</b> 🗹 <a href="https://bltlly.com/2v6JeM">https://bltlly.com/2v6JeM</a></b></p><br /><br />
|
5 |
-
<p>Sin embargo, AcroRip no es un software libre y requiere un dongle USB especial para ejecutarse. Esto podría hacer que algunas personas busquen una manera de descargar AcroRip 9.0 3 completo crack, que es la última versión del software a partir de ahora. ¿Pero vale la pena? ¿Cuáles son las características, beneficios, inconvenientes, alternativas, pasos de instalación y revisiones de AcroRip 9.0 3? En este artículo, responderemos estas preguntas y le ayudaremos a tomar una decisión informada. </p>
|
6 |
-
<h2>Características de AcroRip 9.0 3</h2>
|
7 |
-
<p>AcroRip 9.0 3 es una versión actualizada del software AcroRip anterior que tiene algunas características nuevas y mejoradas. Aquí están algunas de ellas:</p>
|
8 |
-
<ul>
|
9 |
-
<li><strong>Impresión en blanco y en color de una pasada</strong>: Esta característica le permite imprimir tinta blanca y de color al mismo tiempo, sin necesidad de dos pases para sustratos oscuros. Esto puede ahorrarle tiempo y mejorar la calidad de impresión. </li>
|
10 |
-
<li><strong>Tiempos de carga más rápidos y compatibilidad con Windows 7/8/10</strong>: Esta característica hace que el software sea más sensible y estable, así como compatible con diferentes versiones del sistema operativo Windows. </li>
|
11 |
-
<li><strong>Configuración de canal personalizado y función de onda</strong>: Esta función le permite cambiar los canales a pedido según sus requisitos personalizados. Por ejemplo, si una boquilla de color está obstruida, puede usar un canal blanco y usar tinta de color en ese canal. También puede ajustar la configuración de onda para reducir los problemas de bandas en las impresoras UV. </li>
|
12 |
-
|
13 |
-
<li><strong>Compatibilidad ampliada de controladores y impresoras</strong>: Esta función admite más modelos de impresoras Epson, como Stylus Photo, EcoTank, SureColor, Stylus Pro, Expression, etc.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>Beneficios de AcroRip 9.0 3</h2>
|
16 |
-
<p>AcroRip 9 . 3 tiene muchos beneficios para los usuarios que quieren imprimir blanco y color juntos en varios sustratos. Estos son algunos de ellos:</p>
|
17 |
-
<p></p>
|
18 |
-
<ul>
|
19 |
-
<li><strong>Calidad y velocidad de impresión mejoradas</strong>: AcroRip 9.0 3 puede optimizar la calidad y la velocidad de impresión mediante el control de los canales de tinta, el uso de la impresión de una sola pasada y el uso de la configuración RIP boost. También puede reducir los problemas de bandas mediante el uso de la función de onda. </li>
|
20 |
-
<li><strong>Menor consumo de tinta y costo</strong>: AcroRip 9.0 3 puede ahorrar tinta mediante el uso de ajustes de canal personalizados y el ajuste de la densidad de tinta y el tamaño de la gota. También puede usar tinta de color en canales blancos si es necesario, lo que puede reducir el desperdicio de tinta blanca. </li>
|
21 |
-
<li><strong>Precisión de color mejorada y perfiles ICC</strong>: AcroRip 9.0 3 puede mejorar la precisión y consistencia del color mediante el uso de perfiles ICC y herramientas de gestión de color. También puede soportar CMYK, RGB y colores planos. </li>
|
22 |
-
</ul>
|
23 |
-
<h2>Inconvenientes de AcroRip 9.0 3</h2>
|
24 |
-
<p>AcroRip 9.0 3 no es un software perfecto y tiene algunos inconvenientes que los usuarios deben tener en cuenta. Estos son algunos de ellos:</p>
|
25 |
-
<ul>
|
26 |
-
<li><strong>Necesidad de un dongle USB especial para ejecutar el software</strong>: AcroRip 9.0 3 requiere un dongle USB especial para activar el software y ejecutarlo en su computadora. Esto significa que necesita comprar el dongle desde el sitio web oficial o un distribuidor autorizado, y debe mantenerlo conectado cada vez que use el software. Si pierde o daña el dongle, es posible que ya no pueda usar el software. </li>
|
27 |
-
|
28 |
-
</ul>
|
29 |
-
<h2>Alternativas a AcroRip 9.0 3</h2>
|
30 |
-
<p>Si no está satisfecho con AcroRip 9.0 3 o desea probar otras opciones, hay algunas alternativas que puede considerar. Estos son algunos de ellos:</p>
|
31 |
-
<ul>
|
32 |
-
<li><strong>Cadlink</strong>: Cadlink es un software RIP que admite varios tipos de impresoras, como UV, DTG, solvente, eco-solvente, etc. Tiene características como gestión de tinta blanca, creación de perfiles ICC, corrección de color, impresión de datos variables, etc.</li>
|
33 |
-
<li><strong>EKprint</strong>: EKprint es un software RIP diseñado para impresoras DTG que utilizan cabezales de impresión Epson. Tiene características tales como impresi��n de un paso, cálculo de costo de tinta, verificación de boquilla, limpieza de la cabeza, etc.</li>
|
34 |
-
<li><strong>Otras opciones de software RIP</strong>: Hay muchas otras opciones de software RIP entre las que puede elegir, dependiendo de su modelo de impresora, presupuesto y preferencias. Algunos ejemplos son Wasatch SoftRIP, Onyx RIPCenter, PhotoPrint Server Pro, etc.</li>
|
35 |
-
</ul>
|
36 |
-
<h2>Instalación de AcroRip 9.0 3</h2>
|
37 |
-
<p>Si decide comprar AcroRip 9.0 3 desde el sitio web oficial o un distribuidor autorizado, tendrá que seguir estos pasos para instalar el software y el dongle:</p>
|
38 |
-
<ol>
|
39 |
-
<li><strong>Descargue el archivo de software desde el sitio web o el CD</strong>: Tendrá que descargar el archivo de software desde el sitio web o insertar el CD en su computadora. </li>
|
40 |
-
<li><strong>Extraiga el archivo y ejecute el archivo setup.exe</strong>: Necesitará extraer el archivo usando un programa como WinRAR o WinZip y ejecutar el archivo setup.exe como administrador. </li>
|
41 |
-
<li><strong>Siga las instrucciones del asistente de instalación</strong>: Tendrá que seguir las instrucciones del asistente de instalación y elegir su idioma, carpeta de destino, modelo de impresora, etc.</li>
|
42 |
-
<li><strong>Conecte el dongle USB en su computadora</strong>: Necesitará conectar el dongle USB a su computadora antes de iniciar el software. </li>
|
43 |
-
|
44 |
-
</ol>
|
45 |
-
<h2>Grieta de AcroRip 9.0 3</h2>
|
46 |
-
<p>Si tiene la tentación de descargar AcroRip 9.0 3 grieta completa de una fuente no oficial, como un sitio de torrent o un foro de crack, debe ser consciente de los riesgos y consecuencias de usar una versión agrietada del software. Estos son algunos de ellos:</p>
|
47 |
-
<ul>
|
48 |
-
<li><strong>Cuestiones legales</strong>: Descargar y usar una versión agrietada de AcroRip 9.0 3 es ilegal y viola los derechos de propiedad intelectual del desarrollador de software. Usted podría enfrentar acciones legales, multas o incluso tiempo en la cárcel si lo atrapan usando una versión rota del software. </li>
|
49 |
-
<li><strong>Problemas de seguridad</strong>: Descargar y usar una versión agrietada de AcroRip 9.0 3 es arriesgado y expone su computadora a malware, virus, spyware, ransomware y otros programas maliciosos. Puede perder sus datos, comprometer su privacidad o dañar su sistema si instala una versión rota del software. </li>
|
50 |
-
<li><strong>Problemas de rendimiento</strong>: Descargar y usar una versión agrietada de AcroRip 9.0 3 no es confiable e inestable. Es posible que experimente errores, bloqueos, bloqueos o problemas técnicos al usar una versión rota del software. También puede perderse actualizaciones, correcciones de errores y nuevas características que ofrece la versión oficial del software. </li>
|
51 |
-
</ul>
|
52 |
-
<p>Por lo tanto, le recomendamos encarecidamente que evite descargar y usar una versión agrietada de AcroRip 9.0 3 y en su lugar compre la versión oficial en el sitio web o en un distribuidor autorizado. </p>
|
53 |
-
<h2>Revisión de AcroRip 9.0 3</h2>
|
54 |
-
<p>AcroRip 9.0 3 es un software RIP popular y ampliamente utilizado que tiene muchas críticas positivas de los usuarios que lo han probado. Sin embargo, también tiene algunas críticas negativas de los usuarios que han encontrado algunos problemas con él. Aquí hay algunos pros y contras de AcroRip 9.0 3 basado en la retroalimentación del usuario:</p>
|
55 |
-
<tabla>
|
56 |
-
<tr>
|
57 |
-
<th>Pros</th>
|
58 |
-
<th>Contras</th>
|
59 |
-
</tr>
|
60 |
-
<tr>
|
61 |
-
<td>- Interfaz fácil de usar e intuitiva</td>
|
62 |
-
<td>- Caro y requiere un dongle</td>
|
63 |
-
</tr>
|
64 |
-
<tr>
|
65 |
-
|
66 |
-
<td>- Problemas antivirus y errores de configuración de lado a lado</td>
|
67 |
-
</tr>
|
68 |
-
<tr>
|
69 |
-
<td>- Tiempos de carga más rápidos y compatibilidad con Windows 7/8/10</td>
|
70 |
-
<td>- Atención al cliente limitada y documentación</td>
|
71 |
-
</tr>
|
72 |
-
<tr>
|
73 |
-
<td>- Configuración de canal personalizado y función de onda</td>
|
74 |
-
<td>- No compatible con Mac OS o Linux</td>
|
75 |
-
</tr>
|
76 |
-
<tr>
|
77 |
-
<td>- Configuración de impulso RIP y funcionalidad de alimentación de rollo</td>
|
78 |
-
<td>- No hay versión de prueba gratuita o demo disponible</td>
|
79 |
-
</tr>
|
80 |
-
<tr>
|
81 |
-
<td>- Compatibilidad ampliada de controladores y impresoras</td>
|
82 |
-
<td>- No hay comunidad en línea o foro para los usuarios</td>
|
83 |
-
</tr>
|
84 |
-
<tr>
|
85 |
-
<td>- Calidad y velocidad de impresión mejoradas</td>
|
86 |
-
<td></td>
|
87 |
-
</tr>
|
88 |
-
<tr>
|
89 |
-
<td>- Menor consumo y costo de tinta</td>
|
90 |
-
<td></td>
|
91 |
-
</tr>
|
92 |
-
<tr>
|
93 |
-
<td>- Precisión de color mejorada y perfiles ICC</td>
|
94 |
-
<td></td>
|
95 |
-
</tr>
|
96 |
-
<h1>Conclusión</h1>
|
97 |
-
<p>En conclusión, AcroRip 9.0 3 es un software RIP que puede ayudarlo a imprimir blanco y color juntos en varios sustratos, como telas, plásticos, metales, cerámica y más. Tiene muchas características, beneficios, inconvenientes, alternativas, pasos de instalación y comentarios que necesita saber antes de decidirse a descargarlo. </p>
|
98 |
-
<p>Si desea descargar AcroRip 9.0 3 full crack, debe ser consciente de los riesgos y consecuencias de usar una versión agrietada del software. Es ilegal, arriesgado, poco fiable e inestable. Le recomendamos que compre la versión oficial del sitio web o de un distribuidor autorizado. </p>
|
99 |
-
<p>Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejarlos a continuación. </p>
|
100 |
-
<h2>Preguntas frecuentes (preguntas frecuentes)</h2>
|
101 |
-
<p>Aquí hay algunas preguntas frecuentes que puede tener sobre AcroRip 9.0 3:</p>
|
102 |
-
<ol>
|
103 |
-
<li><strong>¿Cuál es el precio de AcroRip 9.0 3?</strong></li>
|
104 |
-
<p>El precio de AcroRip 9.0 3 varía según el vendedor y la región. Sin embargo, según el sitio web oficial, el precio es de $250 USD por un dongle. </p>
|
105 |
-
<li><strong> ¿Dónde puedo comprar AcroRip 9.0 3?</strong></li>
|
106 |
-
|
107 |
-
<li><strong>¿Cómo puedo actualizar AcroRip 9.0 3?</strong></li>
|
108 |
-
<p>Puede actualizar AcroRip 9.0 3 descargando la última versión desde el sitio web o el CD e instalándolo en su computadora. Tendrá que mantener el dongle conectado cuando actualice el software. </p>
|
109 |
-
<li><strong>¿Cuáles son los requisitos del sistema para AcroRip 9.0 3?</strong></li>
|
110 |
-
<p>Los requisitos del sistema para AcroRip 9.0 3 son los siguientes:</p>
|
111 |
-
<ul>
|
112 |
-
<li>Sistema operativo: Windows 7/8/10 (32 bits o 64 bits)</li>
|
113 |
-
<li>Procesador: Intel Core i3 o superior</li>
|
114 |
-
<li>Memoria: 4 GB de RAM o superior</li>
|
115 |
-
<li>Espacio en disco duro: 1 GB o superior</li>
|
116 |
-
<li>Pantalla: 1024 x 768 resolución o superior</li>
|
117 |
-
<li>Impresora: Impresora Epson con cabezal de impresión Epson</li>
|
118 |
-
</ul>
|
119 |
-
<li><strong>¿Cómo puedo contactar al equipo de soporte de AcroRip? </strong></li>
|
120 |
-
<p>Puede ponerse en contacto con el equipo de soporte de AcroRip enviando un correo electrónico a [email protected] o rellenando el formulario de contacto en el sitio web. También puede consultar la sección de preguntas frecuentes y el manual del usuario en el sitio web para obtener más información. </p>
|
121 |
-
<li><strong>¿Cómo puedo aprender más sobre AcroRip 9.0 3?</strong></li>
|
122 |
-
<p>Puede obtener más información sobre AcroRip 9.0 3 visitando el sitio web oficial, viendo los videos tutoriales, leyendo los comentarios de los usuarios y uniéndose al grupo de Facebook para usuarios de AcroRip. </p>
|
123 |
-
</ol></p> 64aa2da5cf<br />
|
124 |
-
<br />
|
125 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Garena Free Fire Mod v1.47.0 APK: Cómo obtener la última versión del popular juego Battle Royale</h1>
|
3 |
-
<p>Si eres un fan de los juegos battle royale, debes haber oído hablar de Garena Free Fire, uno de los juegos más descargados y jugados en dispositivos Android e iOS. En este artículo, le mostraremos cómo descargar e instalar Garena Free Fire Mod v1.47.0 APK, una versión modificada del juego que le da acceso a recursos ilimitados, trucos y más. </p>
|
4 |
-
<h2>¿Qué es el fuego libre de Garena? </h2>
|
5 |
-
<p>Garena Free Fire es un juego multijugador online battle royale desarrollado por 111 Dots Studio y publicado por Garena para dispositivos Android e iOS. El juego fue lanzado en 2017 y desde entonces ha ganado más de 500 millones de descargas solo en Google Play Store. </p>
|
6 |
-
<h2>descargar caso penal la conspiración mod apk estrellas ilimitadas</h2><br /><p><b><b>Download File</b> --->>> <a href="https://bltlly.com/2v6LUr">https://bltlly.com/2v6LUr</a></b></p><br /><br />
|
7 |
-
<p>En Garena Free Fire, puedes elegir entre una gran variedad de personajes, armas, vehículos y objetos para sobrevivir en un mapa reducido con hasta 50 jugadores. Puedes jugar en solitario, dúo o modo escuadrón, y personalizar tu personaje con diferentes pieles, trajes, accesorios y mascotas. También puedes unirte o crear un gremio, chatear con otros jugadores, participar en eventos, misiones y torneos, y posicionarte en la clasificación global. </p>
|
8 |
-
<h2>¿Qué es Garena Free Fire Mod v1.47.0 APK? </h2>
|
9 |
-
<p>Garena Free Fire Mod v1.47.0 APK es una versión modificada del juego original que le da algunas características adicionales y ventajas que no están disponibles en la versión oficial. Por ejemplo, puedes obtener diamantes y monedas ilimitadas, que son las principales monedas en el juego que puedes usar para comprar artículos, actualizar a tu personaje o hacer girar la rueda de la suerte. </p>
|
10 |
-
|
11 |
-
<h2>Cómo descargar e instalar Garena Free Fire Mod v1.47.0 APK? </h2>
|
12 |
-
<p>Si desea probar Garena Free Fire Mod v1.47.0 APK, es necesario seguir estos sencillos pasos:</p>
|
13 |
-
<h4>Paso 1: Descargar los archivos APK y OBB de una fuente de confianza</h4>
|
14 |
-
<p>Lo primero que tienes que hacer es descargar los archivos APK y OBB de Garena Free Fire Mod v1.47.0 de una fuente confiable. Puedes usar este enlace o este enlace para obtenerlos. </p>
|
15 |
-
<p>El archivo APK es de unos 509 MB de tamaño, mientras que el archivo OBB es de unos 600 MB de tamaño. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargarlo. </p>
|
16 |
-
<h4>Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo</h4>
|
17 |
-
<p>Lo siguiente que debe hacer es habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store o la App Store. Para hacer esto, vaya a la configuración de su dispositivo, luego a la seguridad, luego cambie la opción de fuentes desconocidas. </p>
|
18 |
-
<p></p>
|
19 |
-
<p>Si tienes Android 8.0 o superior, es posible que tengas que permitir la instalación de aplicaciones desde fuentes específicas. Para hacer esto, vaya a la configuración del dispositivo, luego las aplicaciones y las notificaciones, luego las avanzadas, luego el acceso especial a la aplicación, luego instale aplicaciones desconocidas, luego seleccione el navegador o el administrador de archivos que utilizó para descargar los archivos APK y OBB y luego active la opción permitir de esta fuente. </p>
|
20 |
-
<h4>Paso 3: Instalar el archivo APK y extraer el archivo OBB a la carpeta Android/obb</h4>
|
21 |
-
<p>Después de habilitar fuentes desconocidas, ahora puede instalar el archivo APK de Garena Free Fire Mod v1.47.0. Para hacer esto, busque el archivo APK en su dispositivo usando un administrador de archivos o un navegador, luego toque en él y siga las instrucciones en la pantalla. </p>
|
22 |
-
|
23 |
-
<h4>Paso 4: Iniciar el juego y disfrutar de las características de mod</h4>
|
24 |
-
<p>Ahora que ha instalado el archivo APK y extraído el archivo OBB, puede iniciar el juego y disfrutar de las características de mod. Para hacer esto, vaya a su cajón de aplicaciones o pantalla de inicio y toque en el icono de Garena Free Fire. Debería ver una pantalla de carga con un menú mod que le permite activar o desactivar varias características de la versión modded. </p>
|
25 |
-
<p>También puede acceder al menú mod tocando el icono flotante en la pantalla durante el juego. Puede ajustar la configuración de acuerdo a sus preferencias y jugar con diamantes y monedas ilimitadas, auto-objetivo y wallhack, desbloquear todos los personajes y pieles, sin retroceso y sin niebla, y más. </p>
|
26 |
-
<h2>¿Cuáles son las características de Garena Free Fire Mod v1.47.0 APK? </h2>
|
27 |
-
<p>Garena Free Fire Mod v1.47.0 APK tiene muchas características que lo hacen diferente de la versión original del juego. Estas son algunas de las principales características y beneficios de la versión modded:</p>
|
28 |
-
<h4>Diamantes y monedas ilimitadas</h4>
|
29 |
-
<p>Con Garena Free Fire Mod v1.47.0 APK, puede obtener diamantes y monedas ilimitadas en su cuenta. Los diamantes y las monedas son las principales monedas del juego que puedes usar para comprar objetos, mejorar a tu personaje o hacer girar la rueda de la suerte. Normalmente, tienes que gastar dinero real o completar tareas para conseguirlas, pero con esta versión modificada, puedes conseguirlas gratis y sin límite. </p>
|
30 |
-
<h4>Auto-objetivo y wallhack</h4>
|
31 |
-
<p>Otra característica de Garena Free Fire Mod v1.47.0 APK es el auto-objetivo y wallhack. El objetivo automático es un truco que te permite apuntar automáticamente a tus enemigos sin tener que ajustar manualmente tu punto de mira. Wallhack es un truco que le permite ver a sus enemigos a través de las paredes y otros obstáculos. Estos trucos pueden ayudarte a ganar más partidos y posicionarte más rápido al darte una ventaja injusta sobre tus oponentes. </p>
|
32 |
-
<h4>Desbloquear todos los caracteres y skins</h4>
|
33 |
-
|
34 |
-
<h4>Sin retroceso y sin niebla</h4>
|
35 |
-
<p>Otra característica de Garena Free Fire Mod v1.47.0 APK no hay retroceso y no hay niebla. El retroceso es una característica que hace que tu arma se mueva hacia arriba o hacia los lados cuando la disparas, afectando tu precisión y control. La niebla es una característica que reduce la visibilidad en ciertas áreas del mapa, lo que hace que sea más difícil detectar a tus enemigos u objetivos. Estas características pueden afectar negativamente a su juego por lo que es más difícil y frustrante. Con esta versión modificada, puedes eliminarlos completamente y disfrutar de un juego más suave y claro. </p>
|
36 |
-
<h2>Conclusión</h2>
|
37 |
-
<p>G arena Free Fire Mod v1.47.0 APK es una versión modificada del popular juego de batalla real que le da recursos ilimitados, trucos, y más. Es fácil de descargar e instalar, y funciona en la mayoría de los dispositivos Android. Con esta versión modificada, puedes disfrutar de un juego más divertido y emocionante con características como diamantes y monedas ilimitadas, puntero automático y wallhack, desbloquear todos los personajes y pieles, sin retroceso y sin niebla, y más. </p>
|
38 |
-
<p>Si usted está buscando una manera de darle vida a su experiencia Garena Free Fire, definitivamente debe probar Garena Free Fire Mod v1.47.0 APK. Es gratuito, seguro y actualizado regularmente. Sin embargo, también debes tener cuidado de no abusar de las características del mod o usarlas en partidos clasificados, ya que esto puede resultar en una prohibición o suspensión del juego. También debes respetar a otros jugadores y jugar limpio. </p>
|
39 |
-
<p>Entonces, ¿qué estás esperando? Descargar Garena Free Fire Mod v1.47.0 APK hoy y disfrutar del último juego de batalla real en su dispositivo Android! </p>
|
40 |
-
<h2>Preguntas frecuentes</h2>
|
41 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Garena Free Fire Mod v1.47.0 APK:</p>
|
42 |
-
<h4>Q: ¿Es seguro usar Garena Free Fire Mod v1.47.0 APK? </h4>
|
43 |
-
|
44 |
-
<h4>Q: ¿Garena Free Fire Mod v1.47.0 APK es compatible con mi dispositivo? </h4>
|
45 |
-
<p>A: Garena Free Fire Mod v1.47.0 APK es compatible con la mayoría de los dispositivos Android que tienen Android 4.0.3 o superior y al menos 2 GB de RAM. Sin embargo, es posible que algunos dispositivos no soporten las características de mod o ejecuten el juego sin problemas debido a limitaciones de hardware o conflictos de software. </p>
|
46 |
-
<h4>Q: ¿Cómo puedo actualizar Garena Free Fire Mod v1.47.0 APK? </h4>
|
47 |
-
<p>A: Para actualizar Garena Free Fire Mod v1.47.0 APK, es necesario descargar la última versión de los archivos APK y OBB de una fuente de confianza y seguir los mismos pasos que instalarlo por primera vez. También debes hacer una copia de seguridad de los datos del juego antes de actualizarlo para evitar perder tu progreso o configuración. </p>
|
48 |
-
<h4>Q: ¿Puedo jugar Garena Free Fire Mod v1.47.0 APK con mis amigos? </h4>
|
49 |
-
<p>A: Sí, puedes jugar Garena Free Fire Mod v1.47.0 APK con tus amigos, siempre y cuando también tienen la misma versión modded del juego instalado en sus dispositivos. Puedes unirte o crear un equipo con ellos y jugar juntos en cualquier modo del juego. </p>
|
50 |
-
<h4>Q: ¿Puedo utilizar Garena Free Fire Mod v1.47.0 APK en los partidos clasificados? </h4>
|
51 |
-
<p>A: No, no debe utilizar Garena Free Fire Mod v1.47.0 APK en los partidos clasificados, ya que esto puede resultar en una prohibición o suspensión del juego por violar los términos del servicio o hacer trampa. Solo debes usar las características mod en partidos casuales o habitaciones personalizadas para fines de diversión y entretenimiento. </p> 64aa2da5cf<br />
|
52 |
-
<br />
|
53 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
from contextlib import ExitStack, contextmanager
|
2 |
-
from typing import ContextManager, Generator, TypeVar
|
3 |
-
|
4 |
-
_T = TypeVar("_T", covariant=True)
|
5 |
-
|
6 |
-
|
7 |
-
class CommandContextMixIn:
|
8 |
-
def __init__(self) -> None:
|
9 |
-
super().__init__()
|
10 |
-
self._in_main_context = False
|
11 |
-
self._main_context = ExitStack()
|
12 |
-
|
13 |
-
@contextmanager
|
14 |
-
def main_context(self) -> Generator[None, None, None]:
|
15 |
-
assert not self._in_main_context
|
16 |
-
|
17 |
-
self._in_main_context = True
|
18 |
-
try:
|
19 |
-
with self._main_context:
|
20 |
-
yield
|
21 |
-
finally:
|
22 |
-
self._in_main_context = False
|
23 |
-
|
24 |
-
def enter_context(self, context_provider: ContextManager[_T]) -> _T:
|
25 |
-
assert self._in_main_context
|
26 |
-
|
27 |
-
return self._main_context.enter_context(context_provider)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py
DELETED
@@ -1,225 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import time
|
3 |
-
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union
|
4 |
-
|
5 |
-
if sys.version_info >= (3, 8):
|
6 |
-
from typing import Final
|
7 |
-
else:
|
8 |
-
from pip._vendor.typing_extensions import Final # pragma: no cover
|
9 |
-
|
10 |
-
from .segment import ControlCode, ControlType, Segment
|
11 |
-
|
12 |
-
if TYPE_CHECKING:
|
13 |
-
from .console import Console, ConsoleOptions, RenderResult
|
14 |
-
|
15 |
-
STRIP_CONTROL_CODES: Final = [
|
16 |
-
7, # Bell
|
17 |
-
8, # Backspace
|
18 |
-
11, # Vertical tab
|
19 |
-
12, # Form feed
|
20 |
-
13, # Carriage return
|
21 |
-
]
|
22 |
-
_CONTROL_STRIP_TRANSLATE: Final = {
|
23 |
-
_codepoint: None for _codepoint in STRIP_CONTROL_CODES
|
24 |
-
}
|
25 |
-
|
26 |
-
CONTROL_ESCAPE: Final = {
|
27 |
-
7: "\\a",
|
28 |
-
8: "\\b",
|
29 |
-
11: "\\v",
|
30 |
-
12: "\\f",
|
31 |
-
13: "\\r",
|
32 |
-
}
|
33 |
-
|
34 |
-
CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
|
35 |
-
ControlType.BELL: lambda: "\x07",
|
36 |
-
ControlType.CARRIAGE_RETURN: lambda: "\r",
|
37 |
-
ControlType.HOME: lambda: "\x1b[H",
|
38 |
-
ControlType.CLEAR: lambda: "\x1b[2J",
|
39 |
-
ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
|
40 |
-
ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
|
41 |
-
ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
|
42 |
-
ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
|
43 |
-
ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
|
44 |
-
ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
|
45 |
-
ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
|
46 |
-
ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
|
47 |
-
ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
|
48 |
-
ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
|
49 |
-
ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
|
50 |
-
ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
|
51 |
-
}
|
52 |
-
|
53 |
-
|
54 |
-
class Control:
|
55 |
-
"""A renderable that inserts a control code (non printable but may move cursor).
|
56 |
-
|
57 |
-
Args:
|
58 |
-
*codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
|
59 |
-
tuple of ControlType and an integer parameter
|
60 |
-
"""
|
61 |
-
|
62 |
-
__slots__ = ["segment"]
|
63 |
-
|
64 |
-
def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
|
65 |
-
control_codes: List[ControlCode] = [
|
66 |
-
(code,) if isinstance(code, ControlType) else code for code in codes
|
67 |
-
]
|
68 |
-
_format_map = CONTROL_CODES_FORMAT
|
69 |
-
rendered_codes = "".join(
|
70 |
-
_format_map[code](*parameters) for code, *parameters in control_codes
|
71 |
-
)
|
72 |
-
self.segment = Segment(rendered_codes, None, control_codes)
|
73 |
-
|
74 |
-
@classmethod
|
75 |
-
def bell(cls) -> "Control":
|
76 |
-
"""Ring the 'bell'."""
|
77 |
-
return cls(ControlType.BELL)
|
78 |
-
|
79 |
-
@classmethod
|
80 |
-
def home(cls) -> "Control":
|
81 |
-
"""Move cursor to 'home' position."""
|
82 |
-
return cls(ControlType.HOME)
|
83 |
-
|
84 |
-
@classmethod
|
85 |
-
def move(cls, x: int = 0, y: int = 0) -> "Control":
|
86 |
-
"""Move cursor relative to current position.
|
87 |
-
|
88 |
-
Args:
|
89 |
-
x (int): X offset.
|
90 |
-
y (int): Y offset.
|
91 |
-
|
92 |
-
Returns:
|
93 |
-
~Control: Control object.
|
94 |
-
|
95 |
-
"""
|
96 |
-
|
97 |
-
def get_codes() -> Iterable[ControlCode]:
|
98 |
-
control = ControlType
|
99 |
-
if x:
|
100 |
-
yield (
|
101 |
-
control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
|
102 |
-
abs(x),
|
103 |
-
)
|
104 |
-
if y:
|
105 |
-
yield (
|
106 |
-
control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
|
107 |
-
abs(y),
|
108 |
-
)
|
109 |
-
|
110 |
-
control = cls(*get_codes())
|
111 |
-
return control
|
112 |
-
|
113 |
-
@classmethod
|
114 |
-
def move_to_column(cls, x: int, y: int = 0) -> "Control":
|
115 |
-
"""Move to the given column, optionally add offset to row.
|
116 |
-
|
117 |
-
Returns:
|
118 |
-
x (int): absolute x (column)
|
119 |
-
y (int): optional y offset (row)
|
120 |
-
|
121 |
-
Returns:
|
122 |
-
~Control: Control object.
|
123 |
-
"""
|
124 |
-
|
125 |
-
return (
|
126 |
-
cls(
|
127 |
-
(ControlType.CURSOR_MOVE_TO_COLUMN, x),
|
128 |
-
(
|
129 |
-
ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
|
130 |
-
abs(y),
|
131 |
-
),
|
132 |
-
)
|
133 |
-
if y
|
134 |
-
else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
|
135 |
-
)
|
136 |
-
|
137 |
-
@classmethod
|
138 |
-
def move_to(cls, x: int, y: int) -> "Control":
|
139 |
-
"""Move cursor to absolute position.
|
140 |
-
|
141 |
-
Args:
|
142 |
-
x (int): x offset (column)
|
143 |
-
y (int): y offset (row)
|
144 |
-
|
145 |
-
Returns:
|
146 |
-
~Control: Control object.
|
147 |
-
"""
|
148 |
-
return cls((ControlType.CURSOR_MOVE_TO, x, y))
|
149 |
-
|
150 |
-
@classmethod
|
151 |
-
def clear(cls) -> "Control":
|
152 |
-
"""Clear the screen."""
|
153 |
-
return cls(ControlType.CLEAR)
|
154 |
-
|
155 |
-
@classmethod
|
156 |
-
def show_cursor(cls, show: bool) -> "Control":
|
157 |
-
"""Show or hide the cursor."""
|
158 |
-
return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
|
159 |
-
|
160 |
-
@classmethod
|
161 |
-
def alt_screen(cls, enable: bool) -> "Control":
|
162 |
-
"""Enable or disable alt screen."""
|
163 |
-
if enable:
|
164 |
-
return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
|
165 |
-
else:
|
166 |
-
return cls(ControlType.DISABLE_ALT_SCREEN)
|
167 |
-
|
168 |
-
@classmethod
|
169 |
-
def title(cls, title: str) -> "Control":
|
170 |
-
"""Set the terminal window title
|
171 |
-
|
172 |
-
Args:
|
173 |
-
title (str): The new terminal window title
|
174 |
-
"""
|
175 |
-
return cls((ControlType.SET_WINDOW_TITLE, title))
|
176 |
-
|
177 |
-
def __str__(self) -> str:
|
178 |
-
return self.segment.text
|
179 |
-
|
180 |
-
def __rich_console__(
|
181 |
-
self, console: "Console", options: "ConsoleOptions"
|
182 |
-
) -> "RenderResult":
|
183 |
-
if self.segment.text:
|
184 |
-
yield self.segment
|
185 |
-
|
186 |
-
|
187 |
-
def strip_control_codes(
|
188 |
-
text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
|
189 |
-
) -> str:
|
190 |
-
"""Remove control codes from text.
|
191 |
-
|
192 |
-
Args:
|
193 |
-
text (str): A string possibly contain control codes.
|
194 |
-
|
195 |
-
Returns:
|
196 |
-
str: String with control codes removed.
|
197 |
-
"""
|
198 |
-
return text.translate(_translate_table)
|
199 |
-
|
200 |
-
|
201 |
-
def escape_control_codes(
|
202 |
-
text: str,
|
203 |
-
_translate_table: Dict[int, str] = CONTROL_ESCAPE,
|
204 |
-
) -> str:
|
205 |
-
"""Replace control codes with their "escaped" equivalent in the given text.
|
206 |
-
(e.g. "\b" becomes "\\b")
|
207 |
-
|
208 |
-
Args:
|
209 |
-
text (str): A string possibly containing control codes.
|
210 |
-
|
211 |
-
Returns:
|
212 |
-
str: String with control codes replaced with their escaped version.
|
213 |
-
"""
|
214 |
-
return text.translate(_translate_table)
|
215 |
-
|
216 |
-
|
217 |
-
if __name__ == "__main__": # pragma: no cover
|
218 |
-
from pip._vendor.rich.console import Console
|
219 |
-
|
220 |
-
console = Console()
|
221 |
-
console.print("Look at the title of your terminal window ^")
|
222 |
-
# console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
|
223 |
-
for i in range(10):
|
224 |
-
console.set_window_title("🚀 Loading" + "." * i)
|
225 |
-
time.sleep(0.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
from collections.abc import Mapping
|
2 |
-
from typing import TYPE_CHECKING, Any, Optional, Tuple
|
3 |
-
|
4 |
-
from .highlighter import ReprHighlighter
|
5 |
-
from .panel import Panel
|
6 |
-
from .pretty import Pretty
|
7 |
-
from .table import Table
|
8 |
-
from .text import Text, TextType
|
9 |
-
|
10 |
-
if TYPE_CHECKING:
|
11 |
-
from .console import ConsoleRenderable
|
12 |
-
|
13 |
-
|
14 |
-
def render_scope(
|
15 |
-
scope: "Mapping[str, Any]",
|
16 |
-
*,
|
17 |
-
title: Optional[TextType] = None,
|
18 |
-
sort_keys: bool = True,
|
19 |
-
indent_guides: bool = False,
|
20 |
-
max_length: Optional[int] = None,
|
21 |
-
max_string: Optional[int] = None,
|
22 |
-
) -> "ConsoleRenderable":
|
23 |
-
"""Render python variables in a given scope.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
scope (Mapping): A mapping containing variable names and values.
|
27 |
-
title (str, optional): Optional title. Defaults to None.
|
28 |
-
sort_keys (bool, optional): Enable sorting of items. Defaults to True.
|
29 |
-
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
|
30 |
-
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
|
31 |
-
Defaults to None.
|
32 |
-
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
|
33 |
-
|
34 |
-
Returns:
|
35 |
-
ConsoleRenderable: A renderable object.
|
36 |
-
"""
|
37 |
-
highlighter = ReprHighlighter()
|
38 |
-
items_table = Table.grid(padding=(0, 1), expand=False)
|
39 |
-
items_table.add_column(justify="right")
|
40 |
-
|
41 |
-
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
|
42 |
-
"""Sort special variables first, then alphabetically."""
|
43 |
-
key, _ = item
|
44 |
-
return (not key.startswith("__"), key.lower())
|
45 |
-
|
46 |
-
items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
|
47 |
-
for key, value in items:
|
48 |
-
key_text = Text.assemble(
|
49 |
-
(key, "scope.key.special" if key.startswith("__") else "scope.key"),
|
50 |
-
(" =", "scope.equals"),
|
51 |
-
)
|
52 |
-
items_table.add_row(
|
53 |
-
key_text,
|
54 |
-
Pretty(
|
55 |
-
value,
|
56 |
-
highlighter=highlighter,
|
57 |
-
indent_guides=indent_guides,
|
58 |
-
max_length=max_length,
|
59 |
-
max_string=max_string,
|
60 |
-
),
|
61 |
-
)
|
62 |
-
return Panel.fit(
|
63 |
-
items_table,
|
64 |
-
title=title,
|
65 |
-
border_style="scope.border",
|
66 |
-
padding=(0, 1),
|
67 |
-
)
|
68 |
-
|
69 |
-
|
70 |
-
if __name__ == "__main__": # pragma: no cover
|
71 |
-
from pip._vendor.rich import print
|
72 |
-
|
73 |
-
print()
|
74 |
-
|
75 |
-
def test(foo: float, bar: float) -> None:
|
76 |
-
list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
|
77 |
-
dict_of_things = {
|
78 |
-
"version": "1.1",
|
79 |
-
"method": "confirmFruitPurchase",
|
80 |
-
"params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
|
81 |
-
"id": "194521489",
|
82 |
-
}
|
83 |
-
print(render_scope(locals(), title="[i]locals", sort_keys=False))
|
84 |
-
|
85 |
-
test(20.3423, 3.1427)
|
86 |
-
print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
# Things that might be relevant
|
4 |
-
|
5 |
-
## Trained models
|
6 |
-
|
7 |
-
ESPnet model for Yoloxochitl Mixtec
|
8 |
-
- Huggingface Hub page https://huggingface.co/espnet/ftshijt_espnet2_asr_yolo_mixtec_transformer
|
9 |
-
- Model source code https://github.com/espnet/espnet/tree/master/egs/yoloxochitl_mixtec/asr1
|
10 |
-
- Colab notebook to setup and apply the model https://colab.research.google.com/drive/1ieoW2b3ERydjaaWuhVPBP_v2QqqWsC1Q?usp=sharing
|
11 |
-
|
12 |
-
Coqui model for Yoloxochitl Mixtec
|
13 |
-
- Huggingface Hub page
|
14 |
-
- Coqui page https://coqui.ai/mixtec/jemeyer/v1.0.0
|
15 |
-
- Colab notebook to setup and apply the model https://colab.research.google.com/drive/1b1SujEGC_F3XhvUCuUyZK_tyUkEaFZ7D?usp=sharing#scrollTo=6IvRFke4Ckpz
|
16 |
-
|
17 |
-
Spanish ASR models
|
18 |
-
- XLS-R model based on CV8 with LM https://huggingface.co/jonatasgrosman/wav2vec2-xls-r-1b-spanish
|
19 |
-
- XLSR model based on CV6 with LM https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-spanish
|
20 |
-
- XLSR model based on Librispeech https://huggingface.co/IIC/wav2vec2-spanish-multilibrispeech
|
21 |
-
|
22 |
-
Speechbrain Language identification on Common Language (from Common Voice 6/7?)
|
23 |
-
- source code https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonLanguage
|
24 |
-
- HF Hub model page https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa
|
25 |
-
- HF Hub space https://huggingface.co/spaces/akhaliq/Speechbrain-audio-classification
|
26 |
-
|
27 |
-
Speechbrain Language identification on VoxLingua
|
28 |
-
- source code https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxLingua107/lang_id
|
29 |
-
- HF Hub model page https://huggingface.co/speechbrain/lang-id-voxlingua107-ecapa
|
30 |
-
|
31 |
-
|
32 |
-
## Corpora
|
33 |
-
|
34 |
-
OpenSLR89 https://www.openslr.org/89/
|
35 |
-
|
36 |
-
Common Language https://huggingface.co/datasets/common_language
|
37 |
-
|
38 |
-
VoxLingua http://bark.phon.ioc.ee/voxlingua107/
|
39 |
-
|
40 |
-
Multilibrispeech https://huggingface.co/datasets/multilingual_librispeech
|
41 |
-
|
42 |
-
|
43 |
-
# Possible demos
|
44 |
-
|
45 |
-
## Simple categorization of utterances
|
46 |
-
|
47 |
-
A few example files are provided for each language, and the user can record their own.
|
48 |
-
The predicted confidence of each class label is shown.
|
49 |
-
|
50 |
-
## Segmentation and identification
|
51 |
-
|
52 |
-
Recordings with alternating languages in a single audio file, provided examples or the user can record.
|
53 |
-
Some voice activity detection to split the audio, then predict language of each piece
|
54 |
-
|
55 |
-
## Identication and transcription
|
56 |
-
|
57 |
-
Example files for each language separately.
|
58 |
-
The lang-id model predicts what language it is.
|
59 |
-
The corresponding ASR model produces a transcript.
|
60 |
-
|
61 |
-
## Segmentation, identification and transcription
|
62 |
-
|
63 |
-
Recordings with alternating languages in a single audio file.
|
64 |
-
Use voice activity detection to split the audio, then predict the language of each piece
|
65 |
-
Use the corresponding ASR model to produce a transcript of each piece to display.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/generate_mk.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# Generate set of projects mk files.
|
3 |
-
# Usage: python generate_mk.py PROJECTS_MK_DIR THRUST_SOURCE_DIR
|
4 |
-
# The program scans through unit tests and examples in THRUST_SOURCE_DIR
|
5 |
-
# and generates project mk for each of the tests and examples in PROJECTS_MK_DIR
|
6 |
-
# A single example or unit test source file generates its own executable
|
7 |
-
# This program is called by a top level Makefile, but can also be used stand-alone for debugging
|
8 |
-
# This program also generates testing.mk, examples.mk and dependencies.mk
|
9 |
-
from __future__ import print_function
|
10 |
-
import sys
|
11 |
-
import shutil as sh
|
12 |
-
import os
|
13 |
-
import glob
|
14 |
-
import re
|
15 |
-
|
16 |
-
test_template = """
|
17 |
-
TEST_SRC := %(TEST_SRC)s
|
18 |
-
TEST_NAME := %(TEST_NAME)s
|
19 |
-
include $(ROOTDIR)/thrust/internal/build/generic_test.mk
|
20 |
-
"""
|
21 |
-
example_template = """
|
22 |
-
EXAMPLE_SRC := %(EXAMPLE_SRC)s
|
23 |
-
EXAMPLE_NAME := %(EXAMPLE_NAME)s
|
24 |
-
include $(ROOTDIR)/thrust/internal/build/generic_example.mk
|
25 |
-
"""
|
26 |
-
|
27 |
-
def Glob(pattern, directory,exclude='\B'):
|
28 |
-
src = glob.glob(os.path.join(directory,pattern))
|
29 |
-
p = re.compile(exclude)
|
30 |
-
src = [s for s in src if not p.match(s)]
|
31 |
-
return src
|
32 |
-
|
33 |
-
|
34 |
-
def generate_test_mk(mk_path, test_path, group, TEST_DIR):
|
35 |
-
print('Generating makefiles in "'+mk_path+'" for tests in "'+test_path+'"')
|
36 |
-
src_cu = Glob("*.cu", test_path, ".*testframework.cu$")
|
37 |
-
src_cxx = Glob("*.cpp", test_path)
|
38 |
-
src_cu.sort();
|
39 |
-
src_cxx.sort();
|
40 |
-
src_all = src_cu + src_cxx;
|
41 |
-
tests_all = []
|
42 |
-
dependencies_all = []
|
43 |
-
for s in src_all:
|
44 |
-
fn = os.path.splitext(os.path.basename(s));
|
45 |
-
t = "thrust."+group+"."+fn[0]
|
46 |
-
e = fn[1]
|
47 |
-
mkfile = test_template % {"TEST_SRC" : s, "TEST_NAME" : t}
|
48 |
-
f = open(os.path.join(mk_path,t+".mk"), 'w')
|
49 |
-
f.write(mkfile)
|
50 |
-
f.close()
|
51 |
-
tests_all.append(os.path.join(mk_path,t))
|
52 |
-
dependencies_all.append(t+": testframework")
|
53 |
-
return [tests_all, dependencies_all]
|
54 |
-
|
55 |
-
def generate_example_mk(mk_path, example_path, group, EXAMPLE_DIR):
|
56 |
-
print('Generating makefiles in "'+mk_path+'" for examples in "'+example_path+'"')
|
57 |
-
src_cu = Glob("*.cu", example_path)
|
58 |
-
src_cxx = Glob("*.cpp", example_path)
|
59 |
-
src_cu.sort();
|
60 |
-
src_cxx.sort();
|
61 |
-
src_all = src_cu + src_cxx;
|
62 |
-
examples_all = []
|
63 |
-
for s in src_all:
|
64 |
-
fn = os.path.splitext(os.path.basename(s));
|
65 |
-
t = "thrust."+group+"."+fn[0]
|
66 |
-
e = fn[1]
|
67 |
-
mkfile = example_template % {"EXAMPLE_SRC" : s, "EXAMPLE_NAME" : t}
|
68 |
-
f = open(os.path.join(mk_path,t+".mk"), 'w')
|
69 |
-
f.write(mkfile)
|
70 |
-
f.close()
|
71 |
-
examples_all.append(os.path.join(mk_path,t))
|
72 |
-
return examples_all
|
73 |
-
|
74 |
-
|
75 |
-
## relpath : backported from os.relpath form python 2.6+
|
76 |
-
def relpath(path, start):
|
77 |
-
"""Return a relative version of a path"""
|
78 |
-
|
79 |
-
import posixpath
|
80 |
-
if not path:
|
81 |
-
raise ValueError("no path specified")
|
82 |
-
start_list = posixpath.abspath(start).split(posixpath.sep)
|
83 |
-
path_list = posixpath.abspath(path).split(posixpath.sep)
|
84 |
-
# Work out how much of the filepath is shared by start and path.
|
85 |
-
i = len(posixpath.commonprefix([start_list, path_list]))
|
86 |
-
rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
|
87 |
-
if not rel_list:
|
88 |
-
return posixpath.curdir
|
89 |
-
return posixpath.join(*rel_list)
|
90 |
-
|
91 |
-
mk_path=sys.argv[1]
|
92 |
-
REL_DIR="../../"
|
93 |
-
if (len(sys.argv) > 2):
|
94 |
-
root_path=sys.argv[2];
|
95 |
-
mk_path = relpath(mk_path, root_path)
|
96 |
-
REL_DIR = relpath(root_path,mk_path)
|
97 |
-
|
98 |
-
try:
|
99 |
-
sh.rmtree(mk_path)
|
100 |
-
except:
|
101 |
-
pass
|
102 |
-
os.makedirs(mk_path)
|
103 |
-
|
104 |
-
tests_all, dependencies_all = generate_test_mk(mk_path, "testing/", "test", REL_DIR)
|
105 |
-
tests_cu, dependencies_cu = generate_test_mk(mk_path, "testing/cuda/", "test.cuda", REL_DIR)
|
106 |
-
tests_all.extend(tests_cu)
|
107 |
-
dependencies_all.extend(dependencies_cu)
|
108 |
-
|
109 |
-
testing_mk = ""
|
110 |
-
|
111 |
-
for t in tests_all:
|
112 |
-
testing_mk += "PROJECTS += "+t+"\n"
|
113 |
-
testing_mk += "PROJECTS += internal/build/testframework\n"
|
114 |
-
|
115 |
-
|
116 |
-
f = open(os.path.join(mk_path,"testing.mk"),'w')
|
117 |
-
f.write(testing_mk)
|
118 |
-
f.close()
|
119 |
-
|
120 |
-
dependencies_mk = ""
|
121 |
-
for d in dependencies_all:
|
122 |
-
dependencies_mk += d + "\n"
|
123 |
-
|
124 |
-
f = open(os.path.join(mk_path,"dependencies.mk"),'w')
|
125 |
-
f.write(dependencies_mk)
|
126 |
-
f.close()
|
127 |
-
|
128 |
-
|
129 |
-
examples_mk = ""
|
130 |
-
examples_all = generate_example_mk(mk_path, "examples/", "example", REL_DIR)
|
131 |
-
examples_cuda = generate_example_mk(mk_path, "examples/cuda/", "example.cuda", REL_DIR)
|
132 |
-
examples_all.extend(examples_cuda)
|
133 |
-
for e in examples_all:
|
134 |
-
examples_mk += "PROJECTS += "+e+"\n"
|
135 |
-
|
136 |
-
f = open(os.path.join(mk_path,"examples.mk"),'w')
|
137 |
-
f.write(examples_mk)
|
138 |
-
f.close()
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits transform_reduce
|
22 |
-
#include <thrust/system/cpp/detail/transform_reduce.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
from mmcv.cnn.bricks import build_plugin_layer
|
2 |
-
from mmcv.runner import force_fp32
|
3 |
-
|
4 |
-
from mmdet.models.builder import ROI_EXTRACTORS
|
5 |
-
from .base_roi_extractor import BaseRoIExtractor
|
6 |
-
|
7 |
-
|
8 |
-
@ROI_EXTRACTORS.register_module()
|
9 |
-
class GenericRoIExtractor(BaseRoIExtractor):
|
10 |
-
"""Extract RoI features from all level feature maps levels.
|
11 |
-
|
12 |
-
This is the implementation of `A novel Region of Interest Extraction Layer
|
13 |
-
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
|
14 |
-
|
15 |
-
Args:
|
16 |
-
aggregation (str): The method to aggregate multiple feature maps.
|
17 |
-
Options are 'sum', 'concat'. Default: 'sum'.
|
18 |
-
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
|
19 |
-
post_cfg (dict | None): Specify post-processing modules. Default: None.
|
20 |
-
kwargs (keyword arguments): Arguments that are the same
|
21 |
-
as :class:`BaseRoIExtractor`.
|
22 |
-
"""
|
23 |
-
|
24 |
-
def __init__(self,
|
25 |
-
aggregation='sum',
|
26 |
-
pre_cfg=None,
|
27 |
-
post_cfg=None,
|
28 |
-
**kwargs):
|
29 |
-
super(GenericRoIExtractor, self).__init__(**kwargs)
|
30 |
-
|
31 |
-
assert aggregation in ['sum', 'concat']
|
32 |
-
|
33 |
-
self.aggregation = aggregation
|
34 |
-
self.with_post = post_cfg is not None
|
35 |
-
self.with_pre = pre_cfg is not None
|
36 |
-
# build pre/post processing modules
|
37 |
-
if self.with_post:
|
38 |
-
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
|
39 |
-
if self.with_pre:
|
40 |
-
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
|
41 |
-
|
42 |
-
@force_fp32(apply_to=('feats', ), out_fp16=True)
|
43 |
-
def forward(self, feats, rois, roi_scale_factor=None):
|
44 |
-
"""Forward function."""
|
45 |
-
if len(feats) == 1:
|
46 |
-
return self.roi_layers[0](feats[0], rois)
|
47 |
-
|
48 |
-
out_size = self.roi_layers[0].output_size
|
49 |
-
num_levels = len(feats)
|
50 |
-
roi_feats = feats[0].new_zeros(
|
51 |
-
rois.size(0), self.out_channels, *out_size)
|
52 |
-
|
53 |
-
# some times rois is an empty tensor
|
54 |
-
if roi_feats.shape[0] == 0:
|
55 |
-
return roi_feats
|
56 |
-
|
57 |
-
if roi_scale_factor is not None:
|
58 |
-
rois = self.roi_rescale(rois, roi_scale_factor)
|
59 |
-
|
60 |
-
# mark the starting channels for concat mode
|
61 |
-
start_channels = 0
|
62 |
-
for i in range(num_levels):
|
63 |
-
roi_feats_t = self.roi_layers[i](feats[i], rois)
|
64 |
-
end_channels = start_channels + roi_feats_t.size(1)
|
65 |
-
if self.with_pre:
|
66 |
-
# apply pre-processing to a RoI extracted from each layer
|
67 |
-
roi_feats_t = self.pre_module(roi_feats_t)
|
68 |
-
if self.aggregation == 'sum':
|
69 |
-
# and sum them all
|
70 |
-
roi_feats += roi_feats_t
|
71 |
-
else:
|
72 |
-
# and concat them along channel dimension
|
73 |
-
roi_feats[:, start_channels:end_channels] = roi_feats_t
|
74 |
-
# update channels starting position
|
75 |
-
start_channels = end_channels
|
76 |
-
# check if concat channels match at the end
|
77 |
-
if self.aggregation == 'concat':
|
78 |
-
assert start_channels == self.out_channels
|
79 |
-
|
80 |
-
if self.with_post:
|
81 |
-
# apply post-processing before return the result
|
82 |
-
roi_feats = self.post_module(roi_feats)
|
83 |
-
return roi_feats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChristopherMarais/Andrew_Alpha/README.md
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Andrew Alpha
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.23.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: other
|
11 |
-
---
|
12 |
-
|
13 |
-
This is a proof-of-concept version of our Artificial Intelligence model to classify images of bark and ambrosia beetles. As an input, please use an image of a specimen, or a group of specimens, ideally in ethanol with a white background.
|
14 |
-
|
15 |
-
This proof-of-concept model has been trained on a preliminary sample of 12 species: Coccotypes dactyliperda, Hylesinus varius, Monarthrum fasciatum, Phloeosinus dentatus, Pityophthorus juglandis, Platypus cylindrus, Pycnarthrum hispidium, Scolytodes schwarzi, Xyleborinus saxesenii, Xyleborus affinis, Xylosandrus compactus, and Xylosandrus crassiusculus.
|
16 |
-
|
17 |
-
For correct interpretation of the results, it is important to consider not just the suggested name, but also the associated probability. Identification of other species is coming soon, as soon as they are added to the training set.
|
18 |
-
|
19 |
-
You can find example photos [here](https://ambrosiasymbiosis.org/automated_identification/examples.html)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/g4f/Provider/Providers/Phind.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import time
|
4 |
-
import subprocess
|
5 |
-
|
6 |
-
from ...typing import sha256, Dict, get_type_hints
|
7 |
-
|
8 |
-
url = 'https://phind.com'
|
9 |
-
model = ['gpt-4']
|
10 |
-
supports_stream = True
|
11 |
-
|
12 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
13 |
-
|
14 |
-
path = os.path.dirname(os.path.realpath(__file__))
|
15 |
-
config = json.dumps({
|
16 |
-
'model': model,
|
17 |
-
'messages': messages}, separators=(',', ':'))
|
18 |
-
|
19 |
-
cmd = ['python', f'{path}/helpers/phind.py', config]
|
20 |
-
|
21 |
-
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
22 |
-
|
23 |
-
for line in iter(p.stdout.readline, b''):
|
24 |
-
if b'<title>Just a moment...</title>' in line:
|
25 |
-
os.system('clear' if os.name == 'posix' else 'cls')
|
26 |
-
yield 'Clouflare error, please try again...'
|
27 |
-
os._exit(0)
|
28 |
-
|
29 |
-
else:
|
30 |
-
if b'ping - 2023-' in line:
|
31 |
-
continue
|
32 |
-
|
33 |
-
yield line.decode('cp1251') #[:-1]
|
34 |
-
|
35 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
36 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from scipy.spatial import Delaunay
|
3 |
-
from .area_of_polygon import area_of_polygon_crd
|
4 |
-
import networkx as nx
|
5 |
-
|
6 |
-
def sqrt_sum(a, b):
|
7 |
-
x = (a[0]-b[0])
|
8 |
-
y = (a[1]-b[1])
|
9 |
-
return np.sqrt(x*x+y*y)
|
10 |
-
|
11 |
-
def shapeToSomePolygons(shape):
|
12 |
-
G = nx.Graph()
|
13 |
-
allnodes = set()
|
14 |
-
for line in shape:
|
15 |
-
G.add_nodes_from(line)
|
16 |
-
G.add_edge(line[0], line[1])
|
17 |
-
allnodes.add(line[0])
|
18 |
-
allnodes.add(line[1])
|
19 |
-
|
20 |
-
result = []
|
21 |
-
|
22 |
-
while allnodes:
|
23 |
-
node = allnodes.pop()
|
24 |
-
new_node = next(iter(G[node]), None)
|
25 |
-
if not new_node: continue
|
26 |
-
|
27 |
-
G.remove_edge(node, new_node)
|
28 |
-
temp = nx.shortest_path(G, node, new_node)
|
29 |
-
for j,t in enumerate(temp):
|
30 |
-
if t in allnodes:
|
31 |
-
allnodes.remove(t)
|
32 |
-
result.append(temp)
|
33 |
-
return result
|
34 |
-
|
35 |
-
def getAlfaShapes(pts,alfas=1):
|
36 |
-
tri_ind = [(0,1),(1,2),(2,0)]
|
37 |
-
tri = Delaunay(pts)
|
38 |
-
lenghts={}
|
39 |
-
for s in tri.simplices:
|
40 |
-
for ind in tri_ind:
|
41 |
-
a = pts[s[ind[0]]]
|
42 |
-
b = pts[s[ind[1]]]
|
43 |
-
# print('a---', a)
|
44 |
-
# print('b---', b)
|
45 |
-
line = (a, b)
|
46 |
-
# line = ((a[0], a[1]), (b[0], b[1]))
|
47 |
-
lenghts[line] = sqrt_sum(a, b)
|
48 |
-
|
49 |
-
ls = sorted(lenghts.values())
|
50 |
-
|
51 |
-
mean_length = np.mean(ls)
|
52 |
-
mean_length_index = ls.index(next(filter(lambda x: x>=mean_length, ls)))
|
53 |
-
magic_numbers = [ls[i] for i in range(mean_length_index, len(ls))]
|
54 |
-
magic_numbers[0] = 0
|
55 |
-
sum_magic = np.sum(magic_numbers)
|
56 |
-
for i in range(2, len(magic_numbers)):
|
57 |
-
magic_numbers[i] += magic_numbers[i-1]
|
58 |
-
magic_numbers = [m /sum_magic for m in magic_numbers]
|
59 |
-
|
60 |
-
rez = []
|
61 |
-
for alfa in alfas:
|
62 |
-
i = magic_numbers.index(next(filter(lambda z: z > alfa, magic_numbers), magic_numbers[-1]))
|
63 |
-
av_length = ls[mean_length_index+i]
|
64 |
-
|
65 |
-
lines = {}
|
66 |
-
|
67 |
-
for s in tri.simplices:
|
68 |
-
used = True
|
69 |
-
for ind in tri_ind:
|
70 |
-
if lenghts[(pts[s[ind[0]]], pts[s[ind[1]]])] > av_length:
|
71 |
-
used = False
|
72 |
-
break
|
73 |
-
if used == False: continue
|
74 |
-
|
75 |
-
for ind in tri_ind:
|
76 |
-
i,j= s[ind[0]],s[ind[1]]
|
77 |
-
line = (pts[min(i,j)], pts[max(i,j)])
|
78 |
-
lines[line] = line in lines
|
79 |
-
|
80 |
-
good_lines = []
|
81 |
-
for v in lines:
|
82 |
-
if not lines[v]:
|
83 |
-
good_lines.append(v)
|
84 |
-
|
85 |
-
result = shapeToSomePolygons(good_lines)
|
86 |
-
result.sort(key=area_of_polygon_crd, reverse=True)
|
87 |
-
rez.append(result)
|
88 |
-
return rez
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dagfinn1962/stablediffusion-models/main.css
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
background-color: #214d09;
|
3 |
-
width: 100%;
|
4 |
-
color: #FFFFFF;
|
5 |
-
}
|
6 |
-
|
7 |
-
h3 {
|
8 |
-
color: #FFFFF;
|
9 |
-
text-align: center;
|
10 |
-
font-family: verdana;
|
11 |
-
font-size: 24px;
|
12 |
-
border: 1px solid #FFFFFF;
|
13 |
-
border-radius: 10px;
|
14 |
-
}
|
15 |
-
|
16 |
-
p {
|
17 |
-
font-family: verdana;
|
18 |
-
font-size: 14px;
|
19 |
-
}
|
20 |
-
|
21 |
-
label {
|
22 |
-
font-family: verdana;
|
23 |
-
color: #000000;
|
24 |
-
font-weight: 700;
|
25 |
-
font-size: 14px;
|
26 |
-
border: 1px solid #000000;
|
27 |
-
}
|
28 |
-
|
29 |
-
gr.Textbox {
|
30 |
-
font-family: verdana;
|
31 |
-
background-color: #279700;
|
32 |
-
color: #000000;
|
33 |
-
font-weight: 700;
|
34 |
-
font-size: 14px;
|
35 |
-
border: 1px solid #FFFFFF;
|
36 |
-
border-radius: 6px;
|
37 |
-
}
|
38 |
-
|
39 |
-
gr.Botton {
|
40 |
-
font-family: verdana;
|
41 |
-
background-color: #279700;
|
42 |
-
color: #FFFFFF;
|
43 |
-
font-weight: 700;
|
44 |
-
font-size: 14px;
|
45 |
-
border: 1px solid #000000;
|
46 |
-
border-radius: 6px;
|
47 |
-
}
|
48 |
-
|
49 |
-
a a:active a.hover
|
50 |
-
{
|
51 |
-
font-family: verdana;
|
52 |
-
color: #572430;
|
53 |
-
text-decoration: none;
|
54 |
-
font-weight: 700;
|
55 |
-
font-size: 14px;
|
56 |
-
|
57 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|