parquet-converter commited on
Commit
7a27b36
·
1 Parent(s): ec3ffa9

Update parquet files (step 114 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/Provider/Providers/Acytoo.py +0 -41
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/Bascom Avr 2.0.7.5 Crack _BEST_.md +0 -70
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Analisisliterariodelamiskisimi.md +0 -19
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Embarcadero Rad Studio 2010 Keygen Crack Learn How to Create Amazing Applications with Delphi and C.md +0 -89
  5. spaces/1gistliPinn/ChatGPT4/Examples/Bel Ami Pin Ups Young And Tender.md +0 -28
  6. spaces/1line/AutoGPT/autogpt/memory/local.py +0 -136
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us on Chromebook How to Install and Enjoy the Game.md +0 -107
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apa yang Baru di Stumble Guys APK Mod? Cek Fitur dan Cara Unduhnya di Sini.md +0 -123
  9. spaces/1phancelerku/anime-remove-background/ .md +0 -149
  10. spaces/1phancelerku/anime-remove-background/BGMI 2.0 APK for 32 bit devices Features size and compatibility.md +0 -126
  11. spaces/1phancelerku/anime-remove-background/CarX Highway Racing APK Mod How to Get Free Money and Gold.md +0 -99
  12. spaces/1phancelerku/anime-remove-background/Download Live MOD APK The Best Live Streaming App with Amazing Rewards.md +0 -171
  13. spaces/1phancelerku/anime-remove-background/Find and Download the Perfect 3D Printer Models in STL OBJ and 3MF Formats.md +0 -126
  14. spaces/2023Liu2023/bingo/src/components/chat-image.tsx +0 -170
  15. spaces/2023Liu2023/bingo/src/pages/api/kblob.ts +0 -56
  16. spaces/232labs/VToonify/vtoonify/model/raft/README.md +0 -80
  17. spaces/AIARTCHAN/openpose_editor/README.md +0 -11
  18. spaces/AIFILMS/generate_human_motion/VQ-Trans/visualize/simplify_loc2rot.py +0 -131
  19. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/light.py +0 -385
  20. spaces/AIGC-Audio/AudioGPT/sound_extraction/model/text_encoder.py +0 -45
  21. spaces/AIWaves/Software_Company/src/agents/utils.py +0 -480
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/Factory.d.ts +0 -9
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/lineprogress/Factory.js +0 -13
  24. spaces/AkashKhamkar/Job_Search_Engine/README.md +0 -13
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/custom_diffusion.md +0 -303
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/overview.md +0 -80
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/overview.md +0 -73
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +0 -1002
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_unipc_multistep.py +0 -681
  30. spaces/Andy1621/uniformer_image_detection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py +0 -105
  31. spaces/Andy1621/uniformer_image_detection/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py +0 -23
  32. spaces/Angello06/SoylaloGaming/README.md +0 -13
  33. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/Training_PRO/custom_scheduler.py +0 -175
  34. spaces/Anitha0531/SpeechtoText/app.py +0 -116
  35. spaces/Anthony7906/MengHuiMXD_GPT/readme/README_ja.md +0 -126
  36. spaces/Apex-X/nono/.github/ISSUE_TEMPLATE/bug.md +0 -47
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/ordered_set.py +0 -488
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/specifiers.py +0 -802
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/windows_support.py +0 -29
  40. spaces/Avkash/Satellite_Segmentation_Prediction/app.py +0 -66
  41. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py +0 -124
  42. spaces/Benebene/Chat-question-answering/test.py +0 -62
  43. spaces/Benson/text-generation/Examples/Coche Carretera Carreras Mod Apk Happymod.md +0 -47
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/_jaraco_text.py +0 -109
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py +0 -0
  46. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/__init__.py +0 -0
  47. spaces/Bostoncake/ChatAssistant/app.py +0 -146
  48. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/common.py +0 -147
  49. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/rotated_boxes.py +0 -23
  50. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/test_time_augmentation.py +0 -285
spaces/101-5/gpt4free/g4f/Provider/Providers/Acytoo.py DELETED
@@ -1,41 +0,0 @@
1
- import os, requests
2
- from ...typing import sha256, Dict, get_type_hints
3
- import json
4
-
5
- url = "https://chat.acytoo.com/api/completions"
6
- model = ['gpt-3.5-turbo']
7
- supports_stream = False
8
- needs_auth = False
9
-
10
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
- base = ''
12
- for message in messages:
13
- base += '%s: %s\n' % (message['role'], message['content'])
14
- base += 'assistant:'
15
-
16
- headers = {
17
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
18
- }
19
- data = {
20
- "key": "",
21
- "model": "gpt-3.5-turbo",
22
- "messages": [
23
- {
24
- "role": "user",
25
- "content": base,
26
- "createdAt": 1688518523500
27
- }
28
- ],
29
- "temperature": 1,
30
- "password": ""
31
- }
32
-
33
- response = requests.post(url, headers=headers, data=json.dumps(data))
34
- if response.status_code == 200:
35
- yield response.text
36
- else:
37
- print(f"Error Occurred::{response.status_code}")
38
- return None
39
-
40
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
41
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/Bascom Avr 2.0.7.5 Crack _BEST_.md DELETED
@@ -1,70 +0,0 @@
1
- ## Bascom Avr 2.0.7.5 Crack
2
-
3
-
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
-
12
-
13
- **DOWNLOAD ►►► [https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2txKMq&sa=D&sntz=1&usg=AOvVaw2H\_OwVAIkwGcvp0gf3atlo](https://www.google.com/url?q=https%3A%2F%2Ftlniurl.com%2F2txKMq&sa=D&sntz=1&usg=AOvVaw2H_OwVAIkwGcvp0gf3atlo)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # Bascom AVR 2.0.7.5: A Powerful and Easy-to-Use Compiler for AVR Microcontrollers
28
-
29
-
30
-
31
- If you are looking for a compiler that can help you program the AVR series of microcontrollers developed by Atmel, you might want to check out Bascom AVR 2.0.7.5. This is a very powerful and user-friendly compiler that comes with a lot of features and flexibility.
32
-
33
-
34
-
35
- Bascom AVR 2.0.7.5 has a simple and intuitive interface that lets you write your code with ease. It supports many common commands and loops that are similar to those in C or C++. It also has a built-in PDF viewer that allows you to access your circuit schematics and pin configurations while coding.
36
-
37
-
38
-
39
- One of the best things about Bascom AVR 2.0.7.5 is that it can directly burn your flash file into a microcontroller using just four wires connected to your computer's parallel port (LPT port). You don't need any external programmer or hardware to do this. If you are using a laptop or a netbook with only a USB port, you can still compile and save your program as a hex or bin file and burn it later using a USBISP burner with any third-party flash burning tool.
40
-
41
-
42
-
43
- Bascom AVR 2.0.7.5 also comes with some useful tools that can help you debug and test your program before burning it into a microcontroller. These include a simulator, a syntax checker, and an emulator. You can also use more than a hundred sample programs and free online tutorials to learn how to program with Bascom AVR 2.0.7.5.
44
-
45
-
46
-
47
- Bascom AVR 2.0.7.5 is not a freeware, but you can download a demo version from the official website[^1^]. The demo version can compile up to 4KB of code and has some limitations on the functions and commands available. The full version of the software can be purchased for $116 from the same website[^1^].
48
-
49
-
50
-
51
- If you are interested in learning more about Bascom AVR 2.0.7.5, you can visit the official website[^1^] or read some reviews and suggestions from other users[^2^] [^3^]. Bascom AVR 2.0.7.5 is a great compiler for anyone who wants to program AVR microcontrollers with ease and efficiency.
52
-
53
-
54
-
55
- Bascom AVR 2.0.7.5 is compatible with many AVR microcontrollers, such as ATmega, ATtiny, AT90S, and XMEGA. You can choose the microcontroller model from a drop-down list and see its features and specifications in the program. You can also use the built-in code generator to create the initial code for your project based on the microcontroller and the peripherals you want to use.
56
-
57
-
58
-
59
- Bascom AVR 2.0.7.5 has a powerful editor that supports syntax highlighting, auto-completion, code folding, bookmarks, and search and replace functions. You can also use the editor to insert comments, directives, labels, and variables in your code. The editor also has a split-screen mode that allows you to view and edit two parts of your code at the same time.
60
-
61
-
62
-
63
- Bascom AVR 2.0.7.5 can produce various output formats for your program, such as HEX, BIN, ROM, COFF, and OBJ. You can also view the assembly code and the memory map of your program in the program. You can also use the program to print your code or export it as a text file.
64
-
65
- 1b8d091108
66
-
67
-
68
-
69
-
70
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Analisisliterariodelamiskisimi.md DELETED
@@ -1,19 +0,0 @@
1
-
2
- <h1>Análisis literario de la Miskisi Mi: una novela de identidad y resistencia</h1>
3
- <p>La Miskisi Mi es una novela escrita por el autor boliviano Víctor Montoya, publicada en 2017. La obra narra la historia de una joven indígena que vive en la ciudad de El Alto, en Bolivia, y que se enfrenta a la discriminación, la violencia y la pobreza. La novela es una reflexión sobre la identidad, la cultura y la resistencia de los pueblos originarios en el contexto de la globalización y el neoliberalismo.</p>
4
- <h2>analisisliterariodelamiskisimi</h2><br /><p><b><b>Download File</b> &bull;&bull;&bull; <a href="https://byltly.com/2uKz1c">https://byltly.com/2uKz1c</a></b></p><br /><br />
5
- <p>En este artículo, realizaremos un análisis literario de la Miskisi Mi, siguiendo los pasos básicos para interpretar una obra narrativa: contexto literario, histórico y sociocultural; descripción de la obra; tema; argumento; personajes; estructura; recursos estilísticos; y valoración crítica.</p>
6
-
7
- <h2>Contexto literario, histórico y sociocultural</h2>
8
- <p>Víctor Montoya nació en La Paz, Bolivia, en 1958. Es un escritor, periodista y docente que ha vivido gran parte de su vida en el exilio, debido a su militancia política contra las dictaduras militares que gobernaron su país en los años 70 y 80. Su obra literaria abarca diversos géneros como el cuento, la novela, el ensayo y la crónica. Sus temas principales son la memoria histórica, la identidad cultural, la violencia política y social, y la defensa de los derechos humanos.</p>
9
- <p>La Miskisi Mi es una novela que pertenece al género narrativo y al subgénero de la novela social. Se inscribe en el movimiento literario del realismo crítico, que busca retratar la realidad social de forma objetiva y denunciar las injusticias y las desigualdades que sufren los sectores más vulnerables de la sociedad. La novela se publicó en 2017, en un momento histórico marcado por el gobierno del presidente Evo Morales, el primer mandatario indígena de Bolivia, que impulsó una serie de reformas políticas, económicas y sociales para favorecer a las mayorías populares y a los pueblos originarios.</p>
10
- <p></p>
11
- <p>La novela refleja el contexto sociocultural de Bolivia, un país plurinacional y multicultural que alberga a más de 30 pueblos indígenas con sus propias lenguas, costumbres y cosmovisiones. Sin embargo, también muestra las contradicciones y los conflictos que existen entre las diferentes culturas y clases sociales que conviven en el territorio boliviano. La novela se centra en la realidad de El Alto, una ciudad situada a más de 4 mil metros sobre el nivel del mar, que se caracteriza por ser un bastión de resistencia popular y por tener una población mayoritariamente indígena y migrante.</p>
12
-
13
- <h2>Descripción de la obra</h2>
14
- <p>A continuación, describiremos los elementos más relevantes de la obra: tema, argumento, personajes, estructura y recursos estilísticos.</p>
15
-
16
- <h3>Tema</h3>
17
- <p>El tema principal de la novela es la búsqueda de la identidad de una joven indígena que vive en una sociedad hostil y excluyente. La protagonista se llama Miskisi Mi, que significa "mi dulzura" en aymara, una de las lenguas originarias de Bolivia. Miskisi Mi es una chica que tiene que luchar por su dignidad y su libertad frente a las adversidades que le impone su condición social, étnica y de género. La novela también aborda otros temas secundarios como el racismo, el machismo, la violencia</p> 7b8c122e87<br />
18
- <br />
19
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Embarcadero Rad Studio 2010 Keygen Crack Learn How to Create Amazing Applications with Delphi and C.md DELETED
@@ -1,89 +0,0 @@
1
- <br />
2
- <h1>Embarcadero Rad Studio 2010 Keygen Crack: What You Need to Know</h1>
3
- <p>If you are a software developer who wants to create cross-platform applications for Windows, Mac, iOS and Android devices, you might have heard of Embarcadero Rad Studio 2010. This is a powerful and comprehensive development tool that offers a range of features and benefits for rapid application development. However, you might also be tempted to use a keygen crack to activate this software without paying for a license. In this article, we will explain what Embarcadero Rad Studio 2010 is, why you might need a keygen crack for it, what are the risks and challenges of using one, and what are the alternatives and solutions to avoid legal, security and quality issues.</p>
4
- <h2>Embarcadero Rad Studio 2010 Keygen Crack</h2><br /><p><b><b>Download</b> &#10040;&#10040;&#10040; <a href="https://byltly.com/2uKwlp">https://byltly.com/2uKwlp</a></b></p><br /><br />
5
- <h2>Features and Benefits of Embarcadero Rad Studio 2010</h2>
6
- <p>Embarcadero Rad Studio 2010 is an integrated development environment (IDE) that allows you to create native applications for Windows, Mac, iOS and Android platforms using a single code base. It supports multiple programming languages, including Delphi, C++, C# and PHP. It also provides a rich library of components, frameworks and tools for database, web, cloud and mobile development. Some of the features and benefits of Embarcadero Rad Studio 2010 are:</p>
7
- <ul>
8
- <li>Rapid application development: You can design, code, debug and deploy your applications faster and easier with the visual designer, code editor, debugger and other tools that Embarcadero Rad Studio 2010 offers.</li>
9
- <li>Cross-platform compatibility: You can target multiple platforms with the same source code and use native controls and APIs for each platform. You can also test your applications on different devices using the built-in simulator or emulator.</li>
10
- <li>Multiple language support: You can choose the language that best suits your project and skills. You can use Delphi for object-oriented Pascal programming, C++ for low-level programming, C# for .NET development or PHP for web development.</li>
11
- <li>Extensive library: You can access hundreds of components, frameworks and tools that Embarcadero Rad Studio 2010 provides for database, web, cloud and mobile development. You can also use third-party libraries or create your own components.</li>
12
- </ul>
13
- <h2>Risks and Challenges of Using a Keygen Crack for Embarcadero Rad Studio 2010</h2>
14
- <p>A keygen crack is a program that generates a license key or serial number for a software product without authorization from the vendor. By using a keygen crack for Embarcadero Rad Studio 2010, you might think that you are saving money and getting access to all the features of the software. However, you are also exposing yourself to several risks and challenges that could outweigh the benefits. Some of the risks and challenges of using a keygen crack for Embarcadero Rad Studio 2010 are:</p>
15
- <ul>
16
- <li>Legal issues: By using a keygen crack for Embarcadero Rad Studio 2010, you are violating the license agreement and intellectual property rights of Embarcadero Technologies. This could result in legal actions against you such as fines, lawsuits or criminal charges.</li>
17
- <li>Security issues: By downloading and running a keygen crack for Embarcadero Rad Studio 2010 from an unknown source, you are exposing your system to malware, viruses and hackers. These could damage your data, steal your information or compromise your security.</li>
18
- <li>Quality issues: By using a keygen crack for Embarcadero Rad Studio 2010 that is not compatible with the latest updates or patches from Embarcadero Technologies, you are compromising the performance, stability and compatibility of your software projects. You might encounter errors, bugs or crashes that could affect your productivity or quality.</li>
19
- </ul>
20
- <h2>Alternatives and Solutions to Using a Keygen Crack for Embarcadero Rad Studio 2010</h2>
21
- <p>If you want to use Embarcadero Rad Studio 2010 without risking legal, security or quality issues, you have several alternatives and solutions that are more ethical and reliable than using a keygen crack. Some of the alternatives and solutions to using a keygen crack for Embarcadero Rad Studio 2010 are:</p>
22
- <ul>
23
- <li>Buying a legitimate license from Embarcadero Technologies or an authorized reseller: This is the best way to use Embarcadero Rad Studio 2010 legally and safely. You can choose from different editions (Professional, Enterprise or Architect) depending on your needs and budget. You can also benefit from technical support, updates and upgrades from Embarcadero Technologies.</li>
24
- <li>Using a trial version or a free edition of Embarcadero Rad Studio: If you want to try out Embarcadero Rad Studio before buying it or if you have limited needs or resources, you can use a trial version or a free edition of Embarcadero Rad Studio. The trial version allows you to use all the features of the software for a limited time (usually 30 days). The free edition allows you to use some of the features of the software indefinitely but with some restrictions (such as limited platforms, components or tools).</li>
25
- <li>Switching to another development tool that suits your needs and budget: If you are not satisfied with Embarcadero Rad Studio or if you cannot afford it, you can switch to another development tool that offers similar or better features and benefits for cross-platform application development. Some examples are Visual Studio, Xamarin, Flutter, React Native, or Ionic.</li>
26
- </ul>
27
- <h1>Conclusion: Is Embarcadero Rad Studio 2010 Keygen Crack Worth It?</h1>
28
- <p>In conclusion, Embarcadero Rad Studio 2010 is an excellent development tool that offers many features and benefits for cross-platform application development. However, using a keygen crack to activate it is not worth it because it exposes you to legal, security and quality issues that could harm you or your software projects. Instead, you should consider buying a legitimate license from Embarcadero Technologies or an authorized reseller, using a trial version or a free edition of Embarcadero Rad Studio, or switching to another development tool that suits your needs and budget. By doing so, you will be able to use Embarcadero Rad Studio 2010 legally and safely, and enjoy its full potential and value.</p>
29
- <p>Embarcadero Delphi 2010 activation code<br />
30
- Rad Studio 2010 serial number generator<br />
31
- How to crack Embarcadero C++Builder 2010<br />
32
- Embarcadero Rad Studio 2010 patch download<br />
33
- Rad Studio 2010 license key free<br />
34
- Embarcadero Delphi 2010 ISO with keygen<br />
35
- Rad Studio 2010 full version crack<br />
36
- How to install Embarcadero C++Builder 2010 with keygen<br />
37
- Embarcadero Rad Studio 2010 Lite edition<br />
38
- Rad Studio 2010 community edition keygen<br />
39
- Embarcadero Delphi 2010 tutorial with crack<br />
40
- Rad Studio 2010 architect edition serial key<br />
41
- Embarcadero C++Builder 2010 update with patch<br />
42
- Embarcadero Rad Studio 10.4 Sydney crack<br />
43
- Rad Studio 10.3 Rio keygen<br />
44
- Embarcadero Delphi 10.2 Tokyo crack<br />
45
- Rad Studio XE10.2.3 patch<br />
46
- Embarcadero C++Builder XE10.2.2 keygen<br />
47
- Embarcadero Rad Studio XE10.2.1 crack<br />
48
- Rad Studio XE10.2 update with patch<br />
49
- Embarcadero Delphi XE10.1 Berlin crack<br />
50
- Rad Studio XE10.1 update with keygen<br />
51
- Embarcadero C++Builder XE10 crack<br />
52
- Embarcadero Rad Studio XE9 patch<br />
53
- Rad Studio XE8 keygen<br />
54
- Embarcadero Delphi XE7 crack<br />
55
- Rad Studio XE7 update with patch<br />
56
- Embarcadero C++Builder XE6 keygen<br />
57
- Embarcadero Rad Studio XE5 crack<br />
58
- Rad Studio XE4 patch<br />
59
- Embarcadero Delphi XE3 keygen<br />
60
- Rad Studio XE3 update with crack<br />
61
- Embarcadero C++Builder XE2 patch<br />
62
- Embarcadero Rad Studio XE keygen<br />
63
- Rad Studio 2010 update with crack<br />
64
- Embarcadero Delphi 2009 keygen<br />
65
- Rad Studio 2009 patch<br />
66
- Embarcadero C++Builder 2007 crack<br />
67
- Embarcadero Rad Studio 2007 keygen<br />
68
- Borland C++Builder 6 Enterprise patch<br />
69
- Borland Delphi 7 Enterprise keygen<br />
70
- Borland C++Builder 5 Enterprise crack<br />
71
- Borland Delphi 6 Enterprise patch<br />
72
- Borland C++Builder 4 Enterprise keygen<br />
73
- Borland Delphi 5 Enterprise crack<br />
74
- Borland C++Builder 3 Enterprise patch<br />
75
- Borland Delphi 4 Enterprise keygen<br />
76
- Borland C++Builder Professional crack</p>
77
- <h3>Frequently Asked Questions</h3>
78
- <ul>
79
- <li><b>What is Embarcadero Rad Studio?</b><br>Embarcadero Rad Studio is an integrated development environment (IDE) that allows you to create native applications for Windows, Mac, iOS and Android platforms using a single code base.</li>
80
- <li><b>What is a keygen crack?</b><br>A keygen crack is a program that generates a license key or serial number for a software product without authorization from the vendor.</li>
81
- <li><b>What are the risks and challenges of using a keygen crack?</b><br>Some of the risks ```html ating the license agreement and intellectual property rights of Embarcadero Technologies), security issues (exposing your system to malware, viruses and hackers), and quality issues (compromising the performance, stability and compatibility of your software projects).</li>
82
- <li><b>What are the alternatives and solutions to using a keygen crack?</b><br>Some of the alternatives and solutions to using a keygen crack are buying a legitimate license from Embarcadero Technologies or an authorized reseller, using a trial version or a free edition of Embarcadero Rad Studio, or switching to another development tool that suits your needs and budget.</li>
83
- <li><b>Where can I buy a legitimate license for Embarcadero Rad Studio?</b><br>You can buy a legitimate license for Embarcadero Rad Studio from the official website of Embarcadero Technologies or from an authorized reseller. You can also contact Embarcadero Technologies for more information or assistance.</li>
84
- <li><b>Where can I download a trial version or a free edition of Embarcadero Rad Studio?</b><br>You can download a trial version or a free edition of Embarcadero Rad Studio from the official website of Embarcadero Technologies. You can also find more information about the features and limitations of each edition there.</li>
85
- <li><b>What are some other development tools that I can use instead of Embarcadero Rad Studio?</b><br>Some other development tools that you can use instead of Embarcadero Rad Studio are Visual Studio, Xamarin, Flutter, React Native, or Ionic. These are some of the popular and widely used tools for cross-platform application development. You can compare their features, benefits, costs and reviews online to find the best one for you.</li>
86
- </ul>
87
- </p> 0a6ba089eb<br />
88
- <br />
89
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Bel Ami Pin Ups Young And Tender.md DELETED
@@ -1,28 +0,0 @@
1
- <h2>bel ami pin ups young and tender</h2><br /><p><b><b>Download File</b> > <a href="https://imgfil.com/2uy14p">https://imgfil.com/2uy14p</a></b></p><br /><br />
2
-
3
- Explore my tags to see how I’ve tagged this category. Help grow my growing list of what to do on my blog.
4
-
5
- Pages
6
-
7
- Tuesday, November 28, 2017
8
-
9
- 1 - Vintage Signs
10
-
11
- 2 - The Vintage of the Signs
12
-
13
- 3 - Vintage Signs by the Numbers
14
-
15
- If you have seen an old, vintage sign, chances are you would have given it a name, at least in your head. Was it a business name, a political statement, a slogan, or something else? Perhaps if you were a collector of old signs, you would have collected these signs in a different way; but if you had just seen these signs and forgotten to get their name, you would not remember. In either case, you might have just walked on by.
16
-
17
- This series of posts is about the name of the sign and the history of the sign, and where and how the sign was made. This is a small sampling of the many vintage signs that I have located and documented over the years. They are a combination of first generation signs (purchased when they were new), auction signs, and new ones made for today’s lifestyle.
18
-
19
- The Vintage of the Signs
20
-
21
- In the United States, there are many signs on the street with the names of the businesses there. Some of these signs are quite old. If you can imagine, they were there when the businesses were first built or when the business was built, and they have lasted. For example, take an American flag, put it on a pole, and attach a sign that says “American Flagpole” on the side of the pole. If you had noticed the sign in the past, you would have said to yourself, “Hey, there is a sign for American Flagpole,” and you would have remembered it, and you would have recalled that someone put up a sign for it when it was first erected. Some of the signs on the street date back to the 1890s. As I drive by the signs, I often wonder how many of them will survive the next decade or two.
22
-
23
- Now look at this street sign from the early 1900s. It is now a garage, and there is no name or address on the sign. But, there is the name of the garage, and it is in German. It is a shame that this sign is no longer in use. It would make an interesting historical study, but it would be more fun for the sign itself.
24
-
25
- In China 4fefd39f24<br />
26
- <br />
27
- <br />
28
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/memory/local.py DELETED
@@ -1,136 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import dataclasses
4
- import os
5
- from typing import Any, List
6
-
7
- import numpy as np
8
- import orjson
9
-
10
- from autogpt.llm_utils import create_embedding_with_ada
11
- from autogpt.memory.base import MemoryProviderSingleton
12
-
13
- EMBED_DIM = 1536
14
- SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
15
-
16
-
17
- def create_default_embeddings():
18
- return np.zeros((0, EMBED_DIM)).astype(np.float32)
19
-
20
-
21
- @dataclasses.dataclass
22
- class CacheContent:
23
- texts: List[str] = dataclasses.field(default_factory=list)
24
- embeddings: np.ndarray = dataclasses.field(
25
- default_factory=create_default_embeddings
26
- )
27
-
28
-
29
- class LocalCache(MemoryProviderSingleton):
30
- """A class that stores the memory in a local file"""
31
-
32
- def __init__(self, cfg) -> None:
33
- """Initialize a class instance
34
-
35
- Args:
36
- cfg: Config object
37
-
38
- Returns:
39
- None
40
- """
41
- self.filename = f"{cfg.memory_index}.json"
42
- if os.path.exists(self.filename):
43
- try:
44
- with open(self.filename, "w+b") as f:
45
- file_content = f.read()
46
- if not file_content.strip():
47
- file_content = b"{}"
48
- f.write(file_content)
49
-
50
- loaded = orjson.loads(file_content)
51
- self.data = CacheContent(**loaded)
52
- except orjson.JSONDecodeError:
53
- print(f"Error: The file '{self.filename}' is not in JSON format.")
54
- self.data = CacheContent()
55
- else:
56
- print(
57
- f"Warning: The file '{self.filename}' does not exist. "
58
- "Local memory would not be saved to a file."
59
- )
60
- self.data = CacheContent()
61
-
62
- def add(self, text: str):
63
- """
64
- Add text to our list of texts, add embedding as row to our
65
- embeddings-matrix
66
-
67
- Args:
68
- text: str
69
-
70
- Returns: None
71
- """
72
- if "Command Error:" in text:
73
- return ""
74
- self.data.texts.append(text)
75
-
76
- embedding = create_embedding_with_ada(text)
77
-
78
- vector = np.array(embedding).astype(np.float32)
79
- vector = vector[np.newaxis, :]
80
- self.data.embeddings = np.concatenate(
81
- [
82
- self.data.embeddings,
83
- vector,
84
- ],
85
- axis=0,
86
- )
87
-
88
- with open(self.filename, "wb") as f:
89
- out = orjson.dumps(self.data, option=SAVE_OPTIONS)
90
- f.write(out)
91
- return text
92
-
93
- def clear(self) -> str:
94
- """
95
- Clears the redis server.
96
-
97
- Returns: A message indicating that the memory has been cleared.
98
- """
99
- self.data = CacheContent()
100
- return "Obliviated"
101
-
102
- def get(self, data: str) -> list[Any] | None:
103
- """
104
- Gets the data from the memory that is most relevant to the given data.
105
-
106
- Args:
107
- data: The data to compare to.
108
-
109
- Returns: The most relevant data.
110
- """
111
- return self.get_relevant(data, 1)
112
-
113
- def get_relevant(self, text: str, k: int) -> list[Any]:
114
- """ "
115
- matrix-vector mult to find score-for-each-row-of-matrix
116
- get indices for top-k winning scores
117
- return texts for those indices
118
- Args:
119
- text: str
120
- k: int
121
-
122
- Returns: List[str]
123
- """
124
- embedding = create_embedding_with_ada(text)
125
-
126
- scores = np.dot(self.data.embeddings, embedding)
127
-
128
- top_k_indices = np.argsort(scores)[-k:][::-1]
129
-
130
- return [self.data.texts[i] for i in top_k_indices]
131
-
132
- def get_stats(self) -> tuple[int, tuple[int, ...]]:
133
- """
134
- Returns: The stats of the local cache.
135
- """
136
- return len(self.data.texts), self.data.embeddings.shape
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us on Chromebook How to Install and Enjoy the Game.md DELETED
@@ -1,107 +0,0 @@
1
- <br />
2
- <h1>How to Play Among Us on a Chromebook</h1>
3
- <p>Among Us is a multiplayer social deduction game that has taken the gaming world by storm. The game involves a group of players who are either Crewmates or Impostors on a spaceship, a planet base, or an airship. The Crewmates have to work together to complete tasks and find the Impostors, while the Impostors have to kill the Crewmates or sabotage the mission. The game is fun, addictive, and full of deception, betrayal, and teamwork.</p>
4
- <h2>among us apk chromebook</h2><br /><p><b><b>DOWNLOAD</b> ===> <a href="https://urlin.us/2uSTVB">https://urlin.us/2uSTVB</a></b></p><br /><br />
5
- <p>Among Us is available on various platforms, such as Android, iOS, Windows, Nintendo Switch, PlayStation, and Xbox. But what if you want to play it on your Chromebook? Well, you might face some challenges, as Chromebooks are not designed for gaming and do not have native support for many apps. However, there are some ways to overcome these obstacles and enjoy this popular game on your Chromebook. In this article, we will show you three methods to play Among Us on a Chromebook: installing the Android version from the Play Store, installing the APK file using ADB, and playing through GeForce Now.</p>
6
- <h2>Method 1: Install the Android version from the Play Store</h2>
7
- <p>The easiest way to play Among Us on a Chromebook is to install the Android version from the Google Play Store. This method only works if your Chromebook supports Android apps, which most models released after 2017 do. Here are the steps to follow:</p>
8
- <ol>
9
- <li>Check if your Chromebook supports Android apps. To do this, open the Settings app and click on Apps in the left navigation pane. If you see an option that says Google Play Store, your Chromebook supports Android apps. If not, you might need to update your Chromebook or try another method.</li>
10
- <li>Enable Google Play Store on your Chromebook. If you have not used Android apps before, you will need to turn on Google Play Store in Settings. Click on Apps and then click on Turn On next to Google Play Store. Accept the terms of service and wait for the installation to finish.</li>
11
- <li>Install and play Among Us from the Play Store. Open Google Play Store and search for Among Us (or click <a href="(^1^)">this link</a>). Click Install and wait for the download to complete. Then, open Among Us from the Play Store or from the App Drawer and start playing.</li>
12
- </ol>
13
- <h2>Method 2: Install the APK file using ADB</h2>
14
- <p>If your Chromebook does not support Android apps or you want to install a different version of Among Us, you can try installing the APK file using ADB (Android Debug Bridge). This method requires you to enable Developer Mode on your Chromebook, which will erase all your data and disable some security features. Make sure you back up your files before proceeding. Here are the steps to follow:</p>
15
- <p>How to install and play among us on a chromebook<br />
16
- Among us on chromebook without play store support<br />
17
- Among us on chromebook through linux or geforce now<br />
18
- Among us on chromebook free download guide<br />
19
- Among us on chromebook not working fix<br />
20
- Among us on chromebook keyboard and mouse controls<br />
21
- Among us on chromebook vs windows pc comparison<br />
22
- Among us on chromebook school edition tips and tricks<br />
23
- Among us on chromebook best settings and graphics<br />
24
- Among us on chromebook online multiplayer mode<br />
25
- How to update among us on a chromebook<br />
26
- How to get among us mods on a chromebook<br />
27
- How to play among us with friends on a chromebook<br />
28
- How to stream among us on a chromebook to twitch or youtube<br />
29
- How to record among us gameplay on a chromebook<br />
30
- How to customize among us characters on a chromebook<br />
31
- How to change among us language on a chromebook<br />
32
- How to chat in among us on a chromebook<br />
33
- How to host a private game in among us on a chromebook<br />
34
- How to join a public game in among us on a chromebook<br />
35
- How to be an impostor in among us on a chromebook<br />
36
- How to be a crewmate in among us on a chromebook<br />
37
- How to vote in among us on a chromebook<br />
38
- How to do tasks in among us on a chromebook<br />
39
- How to sabotage in among us on a chromebook<br />
40
- How to report a dead body in among us on a chromebook<br />
41
- How to use vents in among us on a chromebook<br />
42
- How to use emergency meetings in among us on a chromebook<br />
43
- How to use cameras and admin map in among us on a chromebook<br />
44
- How to use kill cooldown and vision settings in among us on a chromebook<br />
45
- How to change the game rules and difficulty in among us on a chromebook<br />
46
- How to choose the best map and game mode in among us on a chromebook<br />
47
- How to play the airship map in among us on a chromebook<br />
48
- How to play the polus map in among us on a chromebook<br />
49
- How to play the mira hq map in among us on a chromebook<br />
50
- How to play the skeld map in among us on a chromebook<br />
51
- How to unlock skins, hats, and pets in among us on a chromebook<br />
52
- How to get free in-app purchases in among us on a chromebook<br />
53
- How to remove ads in among us on a chromebook<br />
54
- How to fix lag and glitches in among us on a chromebook<br />
55
- How to enable sound and music in among us on a chromebook<br />
56
- How to troubleshoot common errors in among us on a chromebook<br />
57
- How to uninstall and reinstall among us on a chromebook<br />
58
- How to transfer among us data from android to chromebook or vice versa<br />
59
- How to play among us offline or without internet on a chromebook<br />
60
- How to play cross-platform with other devices in among us on a chromebook<br />
61
- How to use voice chat or discord in among us on a chromebook<br />
62
- How to use cheats and hacks in among us on a chromebook (not recommended)<br />
63
- How to report hackers and toxic players in among us on a chromebook</p>
64
- <ol>
65
- <li>Enable Developer Mode on your Chromebook. To do this, press Esc + Refresh + Power buttons together to enter Recovery Mode. Then press Ctrl + D and confirm by pressing Enter. <li>Install ADB on your Chromebook. To do this, you can use a command-line installer such as Scoop for Windows or Homebrew for Mac. For Windows, you can install Scoop and then run the following command: <code>scoop install adb</code>. For Mac, you can install Homebrew and then run the following command: <code>brew install android-platform-tools</code>. Alternatively, you can download ADB and extract it on your computer, but you will need to use it in the same directory where you extracted it .</li>
66
- <li>Download the APK file for Among Us. You can get the APK file from various sources, such as APKPure, Filehippo, Aptoide, etc. Make sure you download the latest version of the game and scan it for any malware before installing it .</li>
67
- <li>Install and play Among Us using ADB. Connect your Chromebook to your Android device using a USB cable. Make sure you enable USB debugging on your Android device in the Developer Options. Then, open a Crosh window by pressing Ctrl+Alt+T on your keyboard. Type <code>shell</code> to get a shell window. Then, type <code>adb devices</code> to see if your device is recognized. If not, you might need to restart your device or allow the ADB prompt on your device. Once your device is connected, type <code>adb install "name.apk"</code>, where name is the name of the APK file that you downloaded. Wait for the installation to finish and then open Among Us from your App Drawer and start playing.</li> <h2>Method 3: Play Among Us through GeForce Now</h2>
68
- <p>If you want to play the PC version of Among Us on your Chromebook, you can use GeForce Now, a cloud gaming service that lets you stream games from Nvidia's servers. This method does not require you to install anything on your Chromebook, but you will need a stable internet connection and a subscription to GeForce Now. You will also need to buy Among Us on Steam, which costs $4.99. Here are the steps to follow:</p>
69
- <ol>
70
- <li>Sign up for GeForce Now and buy Among Us on Steam. You can sign up for GeForce Now for free, which gives you one-hour sessions of gaming, or pay $9.99 per month for the Priority membership, which gives you priority access and extended sessions. You can also get a free trial for the first month. To buy Among Us on Steam, you will need to create a Steam account and add a payment method. You can also buy Among Us as a gift for someone else.</li>
71
- <li>Play Among Us through GeForce Now on your browser. Open your Chrome browser and go to <a href="">play.geforcenow.com</a>. Log in with your Nvidia account and click on Library. Find Among Us and click on Play. Log in with your Steam account and launch the game. You can also add friends and chat with them through Steam or Discord.</li>
72
- </ol>
73
- <h2>Conclusion</h2>
74
- <p>Playing Among Us on a Chromebook is not impossible, but it does require some workarounds. Depending on your Chromebook model and preferences, you can choose one of the three methods we have discussed: installing the Android version from the Play Store, installing the APK file using ADB, or playing through GeForce Now. Each method has its own advantages and disadvantages, so you will have to weigh them carefully before deciding.</p>
75
- <p>Here are some tips and tricks for playing Among Us on a Chromebook:</p>
76
- <ul>
77
- <li>Use headphones or earphones to enjoy the sound effects and music of the game.</li>
78
- <li>Adjust the graphics settings and resolution of the game to optimize the performance and battery life of your Chromebook.</li>
79
- <li>Use a mouse or a touchpad to control your character and interact with the environment. You can also use keyboard shortcuts to perform certain actions, such as reporting a body or calling an emergency meeting.</li>
80
- <li>Be respectful and polite to other players, especially when using voice chat or text chat. Do not use profanity, hate speech, or personal attacks.</li>
81
- <li>Have fun and enjoy the game. Remember that it is just a game and do not take it too seriously.</li>
82
- </ul>
83
- <h2>FAQs</h2>
84
- <h3>What are the system requirements for playing Among Us on a Chromebook?</h3>
85
- <p>The system requirements for playing Among Us on a Chromebook vary depending on the method you use. For the Android version, you will need a Chromebook that supports Android apps and has at least 1 GB of RAM and 250 MB of storage space. For the APK file, you will need a Chromebook that supports Developer Mode and has ADB installed. For GeForce Now, you will need a Chromebook that has a web browser and an internet connection of at least 15 Mbps.</p>
86
- <h3>Can I play Among Us with my friends on other platforms?</h3>
87
- <p>Yes, you can play Among Us with your friends on other platforms, such as Android, iOS, Windows, Nintendo Switch, PlayStation, and Xbox. The game supports cross-platform play, which means that you can join the same lobby and play together regardless of the device you use. However, you will need to be on the same server region (North America, Europe, or Asia) and have the same version of the game.</p>
88
- <h3>How can I customize my character and settings in Among Us?</h3>
89
- <p>You can customize your character and settings in Among Us by accessing the menu in the bottom right corner of the screen. You can change your name, color, hat, pet, skin, language, sound effects, music volume, joystick size, etc. You can also customize the game settings by creating or joining a private lobby and clicking on the laptop icon. You can change the map, mode, number of impostors, speed, vision range, kill cooldown, task difficulty, voting time, etc.</p>
90
- <h3>What are some of the best maps and modes to play in Among Us?</h3>
91
- <p>The best maps and modes to play in Among Us depend on your personal preference and skill level. However, some of the most popular ones are:</p>
92
- <ul>
93
- <li>The Skeld: The original map of the game that features 14 rooms connected by corridors and vents. It has two impostors and 10 tasks for the crewmates. It is suitable for beginners and casual players.</li>
94
- <li>Mira HQ: A futuristic map that features 12 rooms connected by a central hallway and a decontamination chamber. It has one impostor and nine tasks for the crewmates. It is challenging for both the impostor and the crewmates, as it has many security cameras, sensors, and vents.</li>
95
- <li>Polus: A snowy map that features 15 rooms connected by outdoor paths and tunnels. It has two impostors and 12 tasks for the crewmates. It is ideal for advanced and experienced players, as it has many hiding spots, sabotages, and tasks.</li>
96
- <li>The Airship: The newest and largest map of the game that features 18 rooms connected by ladders, platforms, and vents. It has three impostors and 15 tasks for the crewmates. It is fun and creative, as it has many new features, such as choosing your spawn point, using different outfits, and moving around the map.</li>
97
- <li>Hide and Seek: A custom mode that involves one impostor with low vision and high speed, and nine crewmates with high vision and low speed. The impostor has to announce themselves at the beginning of the game and try to find and kill all the crewmates before they finish their tasks. The crewmates have to hide and avoid the impostor while completing their tasks. This mode is exciting and thrilling, as it tests your stealth and survival skills.</li>
98
- </ul>
99
- <h3>How can I improve my skills as a Crewmate or an Impostor in Among Us?</h3>
100
- <p>You can improve your skills as a Crewmate or an Impostor in Among Us by practicing, learning, and observing. Here are some tips to help you:</p>
101
- <ul>
102
- <li>As a Crewmate, you should focus on completing your tasks as quickly as possible, while keeping an eye on your surroundings. You should also communicate with your teammates, report any dead bodies or suspicious activities, and vote wisely.</li>
103
- <li>As an Impostor, you should act like a Crewmate, blending in with them and pretending to do tasks. You should also use vents, sabotages, and kills strategically, creating alibis and diversions. You should also lie convincingly, accuse others, and manipulate the votes.</li>
104
- </ul>
105
- <p>I hope you enjoyed this article on how to play Among Us on a Chromebook. If you have any questions or feedback, please leave a comment below. Thank you for reading!</p> 197e85843d<br />
106
- <br />
107
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apa yang Baru di Stumble Guys APK Mod? Cek Fitur dan Cara Unduhnya di Sini.md DELETED
@@ -1,123 +0,0 @@
1
- <br />
2
- <h1>Unduh Stumble Guys APK Mod: Cara Bermain Game Online Santai Ala Fall Guys di Android</h1>
3
- <p>Stumble Guys adalah salah satu game online yang sedang populer di kalangan penggemar game santai. Game ini menawarkan pengalaman bermain yang seru, lucu, dan penuh tantangan dengan berbagai macam rintangan dan mode permainan yang harus dihadapi oleh para pemain. Jika Anda ingin mencoba game ini, Anda bisa unduh Stumble Guys APK Mod yang memberikan beberapa keuntungan tambahan seperti gemas tak terbatas, kostum terbuka, dan mod menu. Namun, sebelum Anda unduh Stumble Guys APK Mod, ada baiknya Anda mengetahui lebih banyak tentang game ini, cara bermainnya, dan ulasan-ulasan yang ada. Berikut adalah artikel lengkap yang akan membahas semua hal tentang Stumble Guys.</p>
4
- <h2>Apa itu Stumble Guys?</h2>
5
- <p>Stumble Guys adalah game online yang bergenre battle royale party game. Game ini dibuat oleh Kitka Games dan dirilis pada tahun 2020 untuk platform Android dan iOS. Game ini terinspirasi dari game populer Fall Guys yang memiliki konsep serupa, yaitu berlomba-lomba melalui berbagai rintangan konyol dengan hingga 32 pemain online lainnya. Tujuannya adalah menjadi pemain terakhir yang bertahan hingga akhir pertandingan dan meraih mahkota kemenangan.</p>
6
- <h2>unduh stumble guys apk mod</h2><br /><p><b><b>Download</b> &#10027; <a href="https://urlin.us/2uSYcS">https://urlin.us/2uSYcS</a></b></p><br /><br />
7
- <h3>Sejarah dan inspirasi game</h3>
8
- <p>Stumble Guys dibuat oleh Kitka Games, sebuah studio game independen yang berbasis di Turki. Studio ini didirikan pada tahun 2018 oleh dua orang saudara, Emre ve Eren Özçelik. Mereka memiliki visi untuk membuat game-game yang menyenangkan dan berkualitas untuk semua orang. Salah satu game pertama mereka adalah Hyperball, sebuah game sepak bola arcade yang cukup sukses di pasaran.</p>
9
- <p>Pada tahun 2020, mereka melihat kesempatan untuk membuat game baru yang terinspirasi dari Fall Guys, sebuah game battle royale party game yang sangat populer di kalangan gamer PC dan konsol. Mereka melihat bahwa belum ada game serupa yang tersedia untuk platform mobile, sehingga mereka memutuskan untuk membuat versi mobile dari Fall Guys dengan nama Stumble Guys. Mereka mengembangkan game ini selama beberapa bulan dengan menggunakan Unity sebagai mesin grafisnya.</p>
10
- <p>unduh stumble guys mod apk versi terbaru<br />
11
- unduh stumble guys apk mod unlocked emotes<br />
12
- unduh stumble guys apk mod unlimited gems<br />
13
- unduh stumble guys mod apk android 1<br />
14
- unduh stumble guys apk mod offline<br />
15
- unduh stumble guys mod apk no ads<br />
16
- unduh stumble guys apk mod free download<br />
17
- unduh stumble guys mod apk latest version<br />
18
- unduh stumble guys apk mod unlocked footsteps<br />
19
- unduh stumble guys mod apk rexdl<br />
20
- unduh stumble guys apk mod hack<br />
21
- unduh stumble guys mod apk revdl<br />
22
- unduh stumble guys apk mod full version<br />
23
- unduh stumble guys mod apk happymod<br />
24
- unduh stumble guys apk mod premium<br />
25
- unduh stumble guys mod apk an1<br />
26
- unduh stumble guys apk mod anti ban<br />
27
- unduh stumble guys mod apk apkpure<br />
28
- unduh stumble guys apk mod vip<br />
29
- unduh stumble guys mod apk unlimited money<br />
30
- unduh stumble guys apk mod pro<br />
31
- unduh stumble guys mod apk 0.50.3<br />
32
- unduh stumble guys apk mod mega<br />
33
- unduh stumble guys mod apk 2023<br />
34
- unduh stumble guys apk mod terbaru 2023<br />
35
- unduh stumble guys mod apk mediafıre<br />
36
- unduh stumble guys apk mod update<br />
37
- unduh stumble guys mod apk obb<br />
38
- unduh stumble guys apk mod online<br />
39
- unduh stumble guys mod apk data</p>
40
- <p>Stumble Guys dirilis pada bulan Agustus 2020 untuk Android dan iOS secara gratis dengan sistem in-app purchase. Game ini mendapatkan sambutan yang sangat baik dari para pemain mobile yang mencari alternatif Fall Guys di ponsel mereka. Game ini juga mendapatkan banyak pujian dari media game dan kritikus karena berhasil meniru gameplay Fall Guys dengan baik dan menambahkan beberapa fitur unik sendiri.</p>
41
- <h3>Fitur-fitur menarik game</h3>
42
- <p>Stumble Guys memiliki banyak fitur menarik yang membuatnya menjadi salah satu game online terbaik saat ini. Berikut adalah beberapa fitur utama dari game ini:</p>
43
- <ul>
44
- <li><b>Lari, melompat, dan menghindari lawan</b>: Anda harus mengontrol karakter Anda dengan gesit untuk melewati berbagai rint <p>angan dan mode permainan yang berbeda-beda. Anda harus berhati-hati karena lawan Anda bisa menyerang, mendorong, atau menjatuhkan Anda dari arena.</li>
45
- <li><b>Berbagai mode permainan yang unik dan kreatif</b>: Game ini memiliki lebih dari 20 mode permainan yang berbeda-beda, seperti lari cepat, bola raksasa, labirin, papan seluncur, dan lain-lain. Setiap mode permainan memiliki rintangan dan tantangan yang berbeda-beda, sehingga Anda harus menyesuaikan strategi Anda untuk menang.</li>
46
- <li><b>Kustomisasi karakter yang keren</b>: Anda bisa mengubah penampilan karakter Anda dengan berbagai macam kostum, topi, kacamata, dan aksesori lainnya. Anda bisa mendapatkan kostum-kostum ini dengan menggunakan gemas yang bisa Anda dapatkan dari pertandingan atau membelinya dengan uang sungguhan. Beberapa kostum juga memiliki efek khusus yang bisa membantu Anda dalam permainan.</li>
47
- <li><b>Grafis dan suara yang lucu dan imut</b>: Game ini memiliki grafis yang colorfull dan kartunis yang cocok untuk game santai. Karakter-karakternya juga memiliki ekspresi dan gerakan yang lucu dan imut. Suara-suara dalam game ini juga menghibur, seperti suara karakter yang jatuh, tertawa, atau berteriak.</li>
48
- <li><b>Multiplayer online yang seru dan kompetitif</b>: Game ini mendukung multiplayer online hingga 32 pemain dalam satu pertandingan. Anda bisa bermain dengan teman-teman Anda atau pemain acak dari seluruh dunia. Anda juga bisa melihat peringkat dan statistik Anda di leaderboard global atau lokal.</li>
49
- </ul>
50
- <h2>Bagaimana cara unduh Stumble Guys APK Mod?</h2>
51
- <p>Stumble Guys APK Mod adalah versi modifikasi dari game Stumble Guys yang memberikan beberapa keuntungan tambahan bagi pemainnya. Dengan menggunakan APK Mod ini, Anda bisa mendapatkan gemas tak terbatas, kostum terbuka, dan mod menu yang memungkinkan Anda mengaktifkan beberapa cheat seperti speed hack, fly hack, atau god mode. Namun, sebelum Anda unduh Stumble Guys APK Mod, ada beberapa hal yang harus Anda perhatikan terlebih dahulu.</p>
52
- <h3>Langkah-langkah unduh dan instal game</h3>
53
- <p>Berikut adalah langkah-langkah untuk unduh dan instal Stumble Guys APK Mod di ponsel Android Anda:</p>
54
- <ol>
55
- <li>Pastikan bahwa ponsel Anda sudah di-root atau memiliki akses root. Jika belum, Anda bisa mencari cara untuk melakukannya di internet sesuai dengan tipe ponsel Anda.</li>
56
- <li>Cari situs web yang menyediakan file Stumble Guys APK Mod yang terbaru dan terpercaya. Anda bisa menggunakan mesin pencari seperti Google atau Bing untuk mencarinya.</li>
57
- <li>Unduh file Stumble Guys APK Mod dari situs web tersebut ke ponsel Anda. Pastikan bahwa file tersebut tidak mengandung virus atau malware yang bisa merusak ponsel Anda.</li>
58
- <li>Buka pengaturan ponsel Anda dan masuk ke menu keamanan. Aktifkan opsi "Sumber tidak dikenal" atau "Unknown sources" untuk memungkinkan instalasi aplikasi dari luar Play Store.</li>
59
- <li>Buka file manager ponsel Anda dan cari file Stumble Guys APK Mod yang sudah Anda unduh tadi. Ketuk file tersebut untuk mulai menginstalnya.</li>
60
- <li>Tunggu hingga proses instalasi selesai. Jika diminta, berikan izin akses root kepada aplikasi tersebut.</li>
61
- <li>Buka game Stumble Guys dari layar utama ponsel Anda. Nikmati fitur-fitur tambahan dari APK Mod tersebut.</li>
62
- </ol>
63
- <h3>Keuntungan dan risiko menggunakan APK Mod</h3>
64
- <p>Meskipun menggunakan Stumble Guys APK Mod bisa memberikan beberapa keuntungan bagi pemainnya, ada juga beberapa risiko yang harus diwaspadai. Berikut adalah beberapa keuntungan dan risiko menggunakan APK Mod:</p>
65
- <table>
66
- <tr><th>Keuntungan</th><th>Risiko</th></tr>
67
- <tr><td>- Mendapatkan gemas tak terbatas yang bisa digunakan untuk membeli kostum-kostum keren.</td><td>- Melanggar hak cipt a dan ketentuan pengembang game, sehingga bisa berakibat banned atau dihapus dari game.</td></tr>
68
- <tr><td>- Mendapatkan kostum terbuka yang bisa membuat karakter Anda terlihat lebih unik dan menarik.</td><td>- Mengurangi kesenangan dan tantangan bermain game, karena Anda bisa mendapatkan segalanya dengan mudah tanpa usaha.</td></tr>
69
- <tr><td>- Mendapatkan mod menu yang bisa mengaktifkan beberapa cheat seperti speed hack, fly hack, atau god mode yang bisa membantu Anda menang dengan mudah.</td><td>- Merusak keseimbangan dan keseruan permainan, karena Anda bisa mendapatkan keuntungan yang tidak adil dibandingkan pemain lainnya.</td></tr>
70
- <tr><td>- Mendapatkan pengalaman bermain yang berbeda dan lebih menarik dari versi aslinya.</td><td>- Membahayakan keamanan dan privasi ponsel Anda, karena file APK Mod bisa mengandung virus atau malware yang bisa mencuri data atau merusak sistem ponsel Anda.</td></tr>
71
- </table>
72
- <p>Oleh karena itu, Anda harus berhati-hati dan bijak dalam menggunakan Stumble Guys APK Mod. Pastikan bahwa Anda mendownload file APK Mod dari sumber yang terpercaya dan aman. Juga, jangan lupa untuk selalu memperbarui aplikasi Anda ke versi terbaru agar tidak ketinggalan fitur-fitur baru dari game aslinya.</p>
73
- <h2>Bagaimana cara bermain Stumble Guys dengan baik?</h2>
74
- <p>Stumble Guys adalah game yang mudah dimainkan tapi sulit dikuasai. Game ini membutuhkan keterampilan, strategi, dan keberuntungan untuk bisa menang. Jika Anda ingin menjadi pemain yang handal dan kompetitif, Anda harus mengetahui cara bermain Stumble Guys dengan baik. Berikut adalah beberapa tips dan trik yang bisa Anda gunakan untuk meningkatkan kemampuan bermain Anda.</p>
75
- <h3>Tips dan trik umum untuk menang</h3>
76
- <p>Berikut adalah beberapa tips dan trik umum yang bisa Anda terapkan dalam setiap mode permainan:</p>
77
- <ul>
78
- <li><b>Kenali rintangan dan mode permainan</b>: Setiap rintangan dan mode permainan memiliki karakteristik dan tantangan yang berbeda-beda. Anda harus mengenali rintangan dan mode permainan yang Anda hadapi agar bisa menentukan strategi yang tepat untuk melewatinya. Misalnya, jika Anda bermain di mode lari cepat, Anda harus berlari secepat mungkin tanpa terjatuh atau tersingkir. Jika Anda bermain di mode bola raksasa, Anda harus menghindari bola-bola raksasa yang menggelinding di arena.</li>
79
- <li><b>Manfaatkan kontrol yang responsif</b>: Game ini memiliki kontrol yang responsif dan mudah digunakan. Anda hanya perlu menyentuh layar untuk menggerakkan karakter Anda ke kiri atau kanan, dan menekan tombol melompat untuk melompat. Manfaatkan kontrol ini dengan baik untuk mengontrol karakter Anda dengan gesit dan akurat. Jangan lupa untuk menggunakan tombol melompat untuk melewati rintangan atau menjatuhkan lawan.</li>
80
- <li><b>Jaga stamina dan keseimbangan</b>: Karakter Anda memiliki stamina yang terbatas yang bisa habis jika Anda terus-menerus berlari atau melompat. Jika stamina Anda habis, Anda akan bergerak lebih lambat dan mudah jatuh. Oleh karena itu, jaga stamina Anda dengan tidak berlari atau melompat terlalu sering. Selain itu, jaga juga keseimbangan karakter Anda dengan tidak terlalu dekat dengan tepi arena atau lawan. Jika Anda kehilangan keseimbangan, Anda akan mudah tersingkir atau terjatuh.</li>
81
- <li><b>Kerjasama dan kompetisi</b>: Game ini adalah game multiplayer online yang membutuhkan kerjasama dan kompetisi antara pemain. Anda bisa bekerja sama dengan pemain lain untuk melewati rintangan atau mengalahkan lawan. Namun, Anda juga harus bersaing dengan pemain lain untuk menjadi pemain terakhir yang bertahan hingga akhir pertandingan. Oleh karena itu, gunakan strategi yang sesuai dengan situasi permainan. Misalnya, jika Anda bermain di mode tim, Anda harus memb antu tim Anda untuk mencapai tujuan bersama. Jika Anda bermain di mode solo, Anda harus mengalahkan semua pemain lain untuk meraih mahkota.</li>
82
- </ul>
83
- <h3>Tips dan trik khusus untuk setiap mode permainan</h3>
84
- <p>Berikut adalah beberapa tips dan trik khusus yang bisa Anda gunakan untuk setiap mode permainan:</p>
85
- <ul>
86
- <li><b>Lari cepat</b>: Mode ini adalah mode paling dasar dan sering muncul di awal pertandingan. Anda harus berlari secepat mungkin dari titik awal ke titik akhir sambil menghindari rintangan seperti palang, bola, atau lubang. Tipsnya adalah berlari di jalur yang paling kosong dan jangan ragu untuk melompat jika perlu. Juga, jangan terlalu dekat dengan pemain lain karena mereka bisa mendorong atau menjatuhkan Anda.</li>
87
- <li><b>Bola raksasa</b>: Mode ini adalah mode yang paling kocak dan konyol. Anda harus menghindari bola-bola raksasa yang menggelinding di arena sambil mencapai titik akhir. Tipsnya adalah bergerak ke sisi kanan atau kiri arena dan jangan berada di tengah. Juga, jangan berdiri diam karena bola-bola raksasa bisa menghantam Anda dari belakang.</li>
88
- <li><b>Labirin</b>: Mode ini adalah mode yang paling membingungkan dan menantang. Anda harus menemukan jalan keluar dari labirin yang rumit sambil menghindari jebakan seperti duri, api, atau listrik. Tipsnya adalah mengikuti arah panah yang ada di dinding atau lantai labirin. Juga, jangan takut untuk mencoba jalan yang berbeda karena ada beberapa jalan pintas yang bisa Anda temukan.</li>
89
- <li><b>Papan seluncur</b>: Mode ini adalah mode yang paling menyenangkan dan menegangkan. Anda harus meluncur di atas papan seluncur yang bergerak sambil menghindari rintangan seperti tiang, balon, atau pesawat. Tipsnya adalah melompat pada saat yang tepat untuk melewati rintangan atau berganti jalur. Juga, jangan lupa untuk menyeimbangkan diri Anda dengan menyentuh layar ke kiri atau kanan.</li>
90
- <li><b>Dan lain-lain</b>: Ada banyak mode permainan lainnya yang bisa Anda temukan di game ini, seperti tangga berputar, bola salju, balon udara, dan lain-lain. Setiap mode permainan memiliki tips dan trik tersendiri yang bisa Anda pelajari dengan bermain lebih banyak. Jadi, cobalah semua mode permainan yang ada dan temukan yang paling Anda sukai.</li>
91
- </ul>
92
- <h2>Apa saja ulasan dan pendapat tentang Stumble Guys?</h2>
93
- <p>Stumble Guys adalah game online yang sangat populer dan banyak diminati oleh para pemain mobile. Game ini juga mendapatkan banyak ulasan dan pendapat dari para pemain maupun kritikus dan media game. Berikut adalah beberapa ulasan dan pendapat tentang Stumble Guys:</p>
94
- <h3>Ulasan positif dan negatif dari pemain</h3>
95
- <p>Berikut adalah beberapa ulasan positif dan negatif dari pemain Stumble Guys yang kami kutip dari Play Store:</p>
96
- <ul>
97
- <li><b>Positif</b>: "Game ini sangat seru dan lucu. Saya suka sekali dengan grafisnya yang imut dan warna-warninya yang cerah. Saya juga suka dengan mode permainannya yang bervariasi dan kreatif. Saya sering bermain dengan teman-teman saya dan kami selalu tertawa bersama."</li>
98
- <li><b>Negatif</b>: "Game ini sangat menyebalkan dan tidak adil. Saya sering mengalami lag atau bug saat bermain online. Saya juga sering bertemu dengan pemain yang menggunakan cheat atau mod menu yang membuat mereka tidak bisa dikalahkan. Saya harap pengembang bisa memperbaiki game ini."</li>
99
- </ul>
100
- <h3>Ulasan dari kritikus dan media game</h3>
101
- <p>Berikut adalah beberapa ulasan dari kritikus dan media game tentang Stumble Guys yang kami kutip dari beberapa sumber:</p>
102
- <ul>
103
- <li><b>Android Authority</b>: "Stumble Guys is a fun and hilarious online party game that will make you laugh and scream with joy. The game is inspired by Fall Guys, but it has its own charm and features that make it stand out. The game has colorful graphics, cute characters, and various game modes that will keep you entertained for hours. The game is free to play, but it has some in-app purchases that can enhance your gameplay. If you are looking for a casual and fun game to play with your friends or strangers online, Stumble Guys is a great choice."</li>
104
- <li><b>Pocket Gamer</b>: "Stumble Guys is a decent attempt at bringing the Fall Guys experience to mobile devices. The game is simple to play, but hard to master. The game has a lot of potential, but it also has some flaws that need to be fixed. The game suffers from laggy servers, buggy gameplay, and unfair matchmaking. The game also lacks some features that Fall Guys has, such as team modes, seasonal events, and cross-play. Stumble Guys is a good game for fans of Fall Guys, but it still needs some improvement to become a great game."</li>
105
- <li><b>Gamezebo</b>: "Stumble Guys is a charming and addictive online party game that will make you smile and rage at the same time. The game is a faithful adaptation of Fall Guys, but it also adds some original twists and turns. The game has a vibrant and cartoonish art style, a catchy and upbeat soundtrack, and a variety of game modes that will test your skills and luck. The game is easy to pick up and play, but hard to put down. The game is free to play, but it also offers some optional in-app purchases that can help you customize your character and unlock more content. Stumble Guys is a must-play game for anyone who loves online party games."</li>
106
- </ul>
107
- <h2>Kesimpulan dan FAQ</h2>
108
- <p>Stumble Guys adalah game online yang sangat populer dan banyak diminati oleh para pemain mobile. Game ini menawarkan pengalaman bermain yang seru, lucu, dan penuh tantangan dengan berbagai macam rintangan dan mode permainan yang harus dihadapi oleh para pemain. Jika Anda ingin mencoba game ini, Anda bisa unduh Stumble Guys APK Mod yang memberikan beberapa keuntungan tambahan seperti gemas tak terbatas, kostum terbuka, dan mod menu. Namun, sebelum Anda unduh Stumble Guys APK Mod, ada baiknya Anda mengetahui lebih banyak tentang game ini, cara bermainnya, dan ulasan-ulasan yang ada.</p>
109
- <p>Berikut adalah beberapa FAQ yang sering ditanyakan oleh para pemain Stumble Guys:</p>
110
- <ul>
111
- <li><b>Q: Apakah Stumble Guys bisa dimainkan secara offline?</b></li>
112
- <li><b>A: Tidak, Stumble Guys adalah game online yang membutuhkan koneksi internet untuk bermain.</b></li>
113
- <li><b>Q: Apakah Stumble Guys bisa dimainkan di PC atau konsol?</b></li>
114
- <li><b>A: Tidak, Stumble Guys hanya tersedia untuk platform Android dan iOS.</b></li>
115
- <li><b>Q: Apakah Stumble Guys memiliki mode tim atau kooperatif?</b></li>
116
- <li><b>A: Belum, saat ini Stumble Guys hanya memiliki mode solo atau kompetitif.</b></li>
117
- <li><b>Q: Apakah Stumble Guys memiliki fitur chat atau voice chat?</b></li>
118
- <li><b>A: Belum, saat ini Stumble Guys tidak memiliki fitur chat atau voice chat.</b></li>
119
- <li><b>Q: Apakah Stumble Guys aman untuk anak-anak?</b></li>
120
- <li><b>A: Ya, Stumble Guys adalah game yang cocok untuk semua usia karena tidak mengandung konten yang tidak pantas atau kekerasan.</b></li>
121
- </ul></p> 197e85843d<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/ .md DELETED
@@ -1,149 +0,0 @@
1
- <br />
2
- <h1>Дикие и домашние животные для детей</h1>
3
- <p>Животные — это удивительные существа, которые населяют нашу планету. Они разные по форме, размеру, цвету, повадкам и способам жизни. Некоторые из них живут рядом с нами в наших домах, а другие — в лесах, полях, горах, океанах и других местах. Какие же они — дикие и домашние животные? Какие виды животных существуют? Как с ними общаться и заботиться о них? В этой статье мы попробуем ответить на эти вопросы и рассказать вам много интересного о мире животных.</p>
4
- <h2>Что такое дикие и домашние животные?</h2>
5
- <p>Дикие животные — это те, которые не зависят от человека и живут в естественных условиях. Они обладают инстинктами, которые помогают им выживать, охотиться, защищаться и размножаться. Дикие животные могут быть опасными или безобидными, красивыми или неприметными, редкими или распространенными.</p>
6
- <h2>дикие и домашние животные для детей</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash;>>> <a href="https://jinyurl.com/2uNSF6">https://jinyurl.com/2uNSF6</a></b></p><br /><br />
7
- <p>Домашние животные — это те, которые зависят от человека и живут в условиях, созданных им. Они были одомашнены человеком в разное время и по разным причинам: для питания, работы, развлечения или компании. Домашние животные могут быть ласковыми или своенравными, верными или капризными, умными или глупыми.</p>
8
- <h3>Особенности диких животных</h3>
9
- <p>Дикие ж <p>ивотные имеют ряд особенностей, которые отличают их от домашних:</p>
10
- <ul>
11
- <li>Они обычно более приспособлены к своей среде обитания, чем домашние животные. Они могут менять окраску, иметь хитрое поведение, развивать скорость или силу, чтобы уклоняться от хищников или добывать добычу.</li>
12
- <li>Они обычно более разнообразны по видам, чем домашние животные. В мире существует около 8,7 миллиона видов животных, из которых большинство — дикие. Например, существует около 10 тысяч видов птиц, а всего около 400 видов домашних птиц.</li>
13
- <li>Они обычно более свободны в своих действиях, чем домашние животные. Они не подчиняются человеку и не зависят от его воли. Они живут по своим законам и правилам, которые определяются природой и взаимоотношениями с другими животными.</li>
14
- </ul>
15
- <h3>Особенности домашних животных</h3>
16
- <p>Домашние животные также имеют ряд особенностей, которые отличают их от диких:</p>
17
- <ul>
18
- <li>Они обычно более привязаны к человеку, чем дикие животные. Они нуждаются в его заботе, внимании, любви и уважении. Они могут быть верными друзьями, помощниками или питомцами для человека.</li>
19
- <li>Они обычно более подвержены влиянию человека, чем дикие животные. Человек может менять их внешность, поведение, характер или способности по своему усмотрению. Например, человек может вывести новые породы животных, такие как лабрадудль (смесь лабрадора и пуделя) или мейн-кун (самая крупная порода кошек).</li>
20
- <li>Они обычно более податливы и послушны, чем дикие животные. Они могут обучаться ра��ным командам, трюкам или навыкам, которые пригодятся им или человеку. Например, собаки могут быть обучены охранять дом, принимать почту или вести слепого человека.</li>
21
- </ul>
22
- <h2>Какие дикие и домашние животные существуют?</h2>
23
- <p>Существует множество видов диких и домашних животных, которые можно классифицировать по разным признакам. Один из самых распространенных способов — это разделение по типам позвоночных: млекопитающие, птицы, рептилии, амфибии и рыбы. Кроме того, есть еще одна большая группа животных — беспозвоночные, к которым относятся насекомые, паукообразные, ракообразные и другие. Давайте рассмотрим некоторые примеры диких и домашних животных из каждой группы.</p>
24
- <h3>Примеры диких животных</h3>
25
- <h4>Млекопитающие</h4>
26
- <p>Млекопитающие — это животные, которые имеют молочные железы, покрыты шерстью или волосами, дышат легкими и рождают живых детенышей. Среди диких млекопитающих можно назвать такие виды, как:</p>
27
- <ul>
28
- <li>Лев — самый крупный и сильный хищник среди кошачьих, который живет в Африке и Азии. Он имеет золотистый цвет шерсти, а самцы имеют гриву. Львы живут в стаях, охотятся на антилоп, зебр, буйволов и других животных.</li>
29
- <li>Слон — самое крупное наземное животное, которое живет в Африке и Азии. Он имеет серый цвет кожи, большие уши, хобот и бивни. Слоны питаются травой, листьями, фруктами и корой деревьев. Они живут в стадах, состоящих из самок и детенышей.</li>
30
- <li>Кенгуру — символ Австралии, который относится к сумчатым млекопитающим. Он имеет коричневый или серый цвет шерсти, длинный хвост, мощные задние лапы и сумку на животе. Кенгуру питаются травой, листьями и корнями. Они могут прыгать на большие расстояния и достигать скорости до 70 км/ч.</li>
31
- </ul>
32
- <h4>Птицы</h4>
33
- <p>Птицы — это животные, которые имеют перья, крылья, клюв и две пары конечностей. Они дышат легкими и откладывают яйца. Среди диких птиц можно назвать такие виды, как:</p>
34
- <ul>
35
- <li>Орел — одна из самых могущественных и величественных птиц, которая живет почти во всех частях света. Он имеет темный цвет перьев, крючковатый клюв и острые когти. Орлы питаются мелкими млекопитающими, рептилиями, рыбой и другими птицами.</li>
36
- <li>Фламинго — одна из самых ярких и необычных птиц, которая живет в Африке, Азии, Европе и Южной Америке. Он имеет розовый цвет перьев, длинные ноги, изогнутый клюв и шею. Фламинго питаются водорослями, ракообразными и моллюсками.</li>
37
- <li>Колибри — одна из самых маленьких и быстрых птиц, которая живет в Северной и Южной Америке. Он имеет разноцветные перья, очень маленький клюв и крылья. Колибри питаются нектаром цветов и насекомыми. Они могут парить в воздухе и летать во все стороны.</li>
38
- </ul>
39
- <h4 >Рептилии и амфибии</h4>
40
- <p>Рептилии и амфибии — это животные, которые имеют чешую, холодную кровь и четыре пары конечностей. Они дышат легкими или жабрами и откладывают яйца. Среди диких рептилий и амфибий мо��но назвать такие виды, как:</p>
41
- <p>Изучаем домашних, лесных и диких зверей для детей<br />
42
- Раскраски про диких и домашних животных для малышей<br />
43
- Как отличить диких животных от домашних по признакам<br />
44
- Игры и задания про диких и домашних животных для дошкольников<br />
45
- Сказки и рассказы о диких и домашних животных для чтения<br />
46
- Песенки и стихи про диких и домашних животных для запоминания<br />
47
- Видеоуроки про диких и домашних животных для 2 класса<br />
48
- Поделки из бумаги про диких и домашних животных для творчества<br />
49
- Карточки с названиями диких и домашних животных для обучения<br />
50
- Книги и энциклопедии про диких и домашних животных для познания<br />
51
- Аппликации и мозаики про диких и домашних животных для развития<br />
52
- Пазлы и головоломки про диких и домашних животных для логики<br />
53
- Мультфильмы и сериалы про диких и домашних животных для развлечения<br />
54
- Наклейки и магниты про диких и домашних животных для украшения<br />
55
- Фигурки и игрушки про диких и домашних животных для подарка<br />
56
- Костюмы и маски про диких и домашних животных для карнавала<br />
57
- Рефераты и презентации про диких и домашних животных для школы<br />
58
- Экскурсии и поездки про диких и домашних животных для отдыха<br />
59
- Фото и рисунки про диких и домашних животных для коллекции<br />
60
- Звуки и голоса про диких и домашних животных для слушания<br />
61
- Словарь и грамматика про диких и домашних животных для изучения языка<br />
62
- Тесты и викторины про диких и домашних животных для проверки знаний<br />
63
- Сравнение и классификация про диких и домашних животных для анализа<br />
64
- Питание и уход про диких и домашних животных для ответственности<br />
65
- Повадки и приметы про диких и домашних животных для интереса<br />
66
- Охрана и защита про диких и домашних животных для экологии<br />
67
- История и культура про диких и домашних животных для образования<br />
68
- Анекдоты и шутки про диких и домашних животных для юмора<br />
69
- Легенды и мифы про диких и домашних животных для фантазии<br />
70
- Друзья и враги про диких и домашних животных для понимания</p>
71
- <ul>
72
- <li>Крокодил — один из самых древних и опасных животных, который живет в Африке, Азии, Австралии и Америке. Он имеет зеленоватый или коричневый цвет чешуи, длинный хвост, большую пасть и острые зубы. Крокодилы питаются рыбой, птицами, млекопитающими и другими животными.</li>
73
- <li>Ящерица — одна из самых распространенных и разнообразных групп рептилий, которая живет почти во всех частях света. Она имеет разный цвет и размер чешуи, длинный хвост, маленький клюв и глаза. Ящерицы питаются насекомыми, червями, ягодами и другими растениями.</li>
74
- <li>Лягушка — одна из самых известных и любимых детьми амфибий, которая живет в Европе, Азии, Африке и Америке. Она имеет гладкую или бугорчатую кожу, разный цвет и размер тела, длинные задние лапы и глаза. Лягушки питаются насекомыми, червями, моллюсками и другими животными.</li>
75
- </ul>
76
- <h4>Насекомые</h4>
77
- <p>Насекомые — это животные, которые имеют экзоскелет, шесть ног и три пары конечностей. Они дышат трахеями или дыхальцами и откладывают яйца. Среди диких насекомых можно назвать такие виды, как:</p>
78
- <ul>
79
- <li>Бабочка — одно из самых красивых и элегантных насекомых, которое живет почти во всех частях света. Она имеет разноцветные крылья, тонкое тело, длинные усики и глаза. Бабочки питаются нектаром цветов или соком фруктов.</li>
80
- <li>Муравей — одно из самых умных и трудолюбивых насекомых, которое живет почти во всех частях света. Он имеет коричневый или черный цвет тела, мощные челюсти, усики и глаза. Муравьи питаются растительной или животной пищей. Они живут в колониях, состоящих из сотен тысяч особей.</li>
81
- <li>Пчела — одно из самых полезных и важных для человека насекомых, которое живет почти во всех частях света. Она имеет желто-черный цвет тела, крылья, усики и глаза. Пчелы питаются нектаром цветов или медом. Они живут в у льях, состоящих из матки, рабочих и трутней. Они производят мед, воск и прополис.</li>
82
- </ul>
83
- <h3>Примеры домашних животных</h3>
84
- <h4>Кошки и собаки</h4>
85
- <p>Кошки и собаки — это самые популярные и любимые домашние животные, которые живут во многих странах мира. Они имеют мягкую шерсть, уши, хвост, лапы и глаза. Кошки и собаки питаются специальным кормом или натуральной пищей. Они могут быть разных пород, размеров и окрасок.</p>
86
- <ul>
87
- <li>Кошка — одно из самых независимых и грациозных домашних животных, которое происходит от диких кошачьих. Она имеет острый слух, зрение и обоняние, а также способность к самоочищению. Кошки могут мурлыкать, мяукать или шипеть. Они любят играть, спать и ловить мышей.</li>
88
- <li>Собака — одно из самых верных и дружелюбных домашних животных, которое происходит от волков. Она имеет хороший нюх, слух и интеллект, а также способность к обучению. Собаки могут лаять, вилять хвостом или рычать. Они любят гулять, бегать и охранять дом.</li>
89
- </ul>
90
- <h4>Грызуны и зайцеобразные</h4>
91
- <p>Грызуны и зайцеобразные — это домашние животные, которые имеют резцы, которые постоянно растут, пушистую шерсть, уши и глаза. Они питаются растительной пищей или специальным кормом. Они могут быть разных видов, размеров и окрасок.</p>
92
- <ul>
93
- <li>Хомяк — одно из самых милых и забавных домашних животных, которое происходит от диких хомяков. Он имеет короткую шерсть, большие щеки, круглые уши и глаза. Хомяки могут складывать еду в щеки, копать норы или кататься в колесе. Они любят жевать, спать и играть.</li>
94
- <li>Кролик — одно из самых пушистых и нежных домашних животных, которое происходит от диких кроликов. Он имеет длинную шерсть, большие уши, хвостик-помпон и глаза. Кролики могут прыгать, бегать или сидеть на задних лапах. Они любят есть морковку, сено или зелень.</li>
95
- </ul>
96
- <h4>Птицы и рыбы</h4>
97
- <p>Птицы и рыбы — это домашние животные, которые имеют перья или чешую, крылья или плавники, клюв или рот и глаза. Они питаются зерном или специальным кормом. Они могут быть разных видов, размеров и окрасок.</p>
98
- <ul>
99
- <li>Канарейка — одна из самых ярки х и мелодичных домашних птиц, которая происходит от диких канареек. Она имеет желтый, зеленый, оранжевый или другой цвет перьев, маленький клюв и глаза. Канарейки могут петь, свистеть или чирикать. Они любят жить в клетках, где есть кормушка, поилка и игрушки.</li>
100
- <li>Золотая рыбка — одна из самых распространенных и красивых домашних рыб, которая происходит от диких карпов. Она имеет золотистый, красный, черный или другой цвет чешуи, длинные плавники и глаза. Золотые рыбки могут плавать, едать или дышать под водой. Они любят жить в аквариумах, где есть растения, камни и фильтры.</li>
101
- </ul>
102
- <h4>Рептилии и амфибии</h4>
103
- <p>Рептилии и амфибии — это домашние животные, которые имеют чешую или кожу, холодную кровь и четыре пары конечностей. Они дышат легкими или жабрами и откладывают яйца. Они могут быть разных видов, размеров и окрасок.</p>
104
- <ul>
105
- <li>Черепаха — одно из самых долгоживущих и спокойных домашних животных, которое происходит от диких черепах. Она имеет твердый панцирь, короткие лапы, маленький клюв и глаза. Черепахи могут ползать, спать или прятаться в панцире. Они любят жить в террариумах, где есть земля, вода и растения.</li>
106
- <li>Жаба — одна из самых необычных и интересных домашних амфибий, которая происходит от диких жаб. Она имеет бугорчатую кожу, короткие лапы, большой рот и глаза. Жабы могут прыгать, квакать или выделять слизь. Они любят жить в акватеррариумах, где есть вода, растения и насекомые.</li>
107
- </ul>
108
- <h2>Как общаться с дикими и домашними животными?</h2>
109
- <p>Дикие и домашние животные — это разные миры, которые иногда пересекаются с нашим. Как же правильно общаться с ними, чтобы не навредить им или себе? Существуют некоторые правила поведения с дикими и домашними животными, которые нужно знать и соблюдать.</p>
110
- <h3>Правила поведения с дикими животными</h3>
111
- <p>Дикие животные — это не игрушки и не развлечение. Это живые существа, которые имеют свои потребности, эмоции и инстинкты. Поэтому, если вы встретите дикое животное в природе или в зоопарке, следуйте таким правилам:</p>
112
- <ul>
113
- <li>Не приближайтесь к нему слишком близко и не пытайтесь его погладить , кормить или фотографировать. Вы можете нарушить его границы, спугнуть его или разозлить его. Это может быть опасно как для вас, так и для животного.</li>
114
- <li>Не шумите и не делайте резких движений рядом с ним. Вы можете испугать его или вызвать его агрессию. Это может привести к тому, что животное убежит, нападет на вас или пострадает от стресса.</li>
115
- <li>Не оставляйте за собой мусор и не загрязняйте его среду обитания. Вы можете нанести вред его здоровью, питанию или размножению. Это может привести к тому, что животное заболеет, умрет или вымрет.</li>
116
- </ul>
117
- <h3>Правила ухода за домашними животными</h3>
118
- <p>Домашние животные — это не игрушки и не украшение. Это живые существа, которые имеют свои потребности, эмоции и чувства. Поэтому, если вы завели домашнее животное или хотите завести, следуйте таким правилам:</p>
119
- <ul>
120
- <li>Обеспечьте ему достаточно еды, воды, света, тепла и воздуха. Вы должны подбирать корм и условия содержания в соответствии с его видом, породой, возрастом и особенностями. Это поможет ему быть здоровым, сытым и комфортным.</li>
121
- <li>Уделяйте ему достаточно внимания, ласки, игры и общения. Вы должны заниматься с ним, говорить с ним, гладить его или выгуливать его. Это поможет ему быть счастливым, дружелюбным и уверенным.</li>
122
- <li>Следите за его гигиеной, здоровьем и безопасностью. Вы должны чистить его, стричь его, купать его или обрабатывать его от паразитов. Вы также должны делать ему прививки, ветеринарные осмотры и стерилизацию. Это поможет ему быть чистым, красивым и защищенным.</li>
123
- </ul>
124
- <h2>Заключение</h2>
125
- <p>Дикие и домашние животные — это удивительный мир, который мы можем изучать и любоваться. Они разные по форме, размеру, цвету, повадкам и способам жизни. Но они также похожи на нас тем, что они имеют свои потребности, эмоции и чувства. Поэтому мы должны уважать их, заботиться о них и не навредить им.</p>
126
- <h3>Полезные ресурсы для изучения животных</h3>
127
- <p>Если вы хотите узнать больше о диких и домашних животных, вы можете посетить такие ресурсы, как:</p>
128
- <ul>
129
- <li>[Национальная география] — это сайт, где вы можете найти много интересной информации, фотографий и видео о разных животных мира.</li>
130
- <li>[Всемирный фонд дикой природы] — это организация, которая занимается охраной природы и живот ных. Вы можете поддержать их проекты, участвовать в акциях или просто узнать больше о том, как помочь природе.</li>
131
- <li>[Зоопарк] — это место, где вы можете увидеть животных из разных уголков планеты вживую. Вы можете наблюдать за их поведением, узнать об их особенностях и истории. Вы также можете пообщаться с работниками зоопарка, которые расскажут вам много интересного о животных.</li>
132
- </ul>
133
- <h3>Вопросы и ответы</h3>
134
- <p>Вот некоторые часто задаваемые вопросы и ответы о диких и домашних животных:</p>
135
- <ol>
136
- <li><b>Какое самое большое дикое животное?</b><br>
137
- Самое большое дикое животное — это синий кит, который может достигать длины до 33 метров и веса до 200 тонн. Он живет в океанах и питается планктоном.</li>
138
- <li><b>Какое самое маленькое домашнее животное?</b><br>
139
- Самое маленькое домашнее животное — это бамбуковая крыса, которая может иметь длину до 10 сантиметров и вес до 40 граммов. Она живет в Юго-Восточной Азии и питается растениями.</li>
140
- <li><b>Какое самое умное дикое животное?</b><br>
141
- Самое умное дикое животное — это шимпанзе, который имеет высокий уровень интеллекта, памяти и обучаемости. Он живет в Африке и питается фруктами, орехами, листьями и насекомыми.</li>
142
- <li><b>Какое самое верное домашнее животное?</b><br>
143
- Самое верное домашнее животное — это собака, которая имеет сильную привязанность к своему хозяину, защищает его и служит ему. Она живет в разных странах мира и питается разной пищей.</li>
144
- <li><b>Какое самое необычное дикое животное?</b><br>
145
- Самое необычное дикое животное — это утконос, который имеет тело бобра, клюв утки, хвост выдры и лапы выхухоля. Он живет в Австралии и Новой Гвинее и питается рыбой, раками и червями.</li>
146
- </ol>
147
- <p>Надеемся, что эта статья была полезной и интересной для вас. Если вы хотите узнать еще больше о диких и домашних животных, вы можете почитать книги, смотреть фильмы или посещать музеи. Желаем вам приятного общения с животными!</p> 197e85843d<br />
148
- <br />
149
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/BGMI 2.0 APK for 32 bit devices Features size and compatibility.md DELETED
@@ -1,126 +0,0 @@
1
-
2
- <h1>How to Download and Install BGMI 2.0 APK on 32-bit Android Devices</h1>
3
- <p>Battlegrounds Mobile India (BGMI) is one of the most popular battle royale games in India, with millions of fans and players. The game recently released its 2.0 update, which brings a lot of new features, improvements, and bug fixes. However, some players may face difficulty in downloading and installing the update on their 32-bit Android devices, as the official Google Play Store version only supports 64-bit devices.</p>
4
- <h2>32 bit bgmi 2.0 apk download</h2><br /><p><b><b>Download File</b> &#10027; <a href="https://jinyurl.com/2uNSS3">https://jinyurl.com/2uNSS3</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download and install BGMI 2.0 APK on your 32-bit Android device, so that you can enjoy the latest version of the game without any hassle.</p>
6
- <h2>What is BGMI 2.0?</h2>
7
- <p>BGMI 2.0 is the latest major update for Battlegrounds Mobile India, which was released on May 29, 2023. The update introduces a lot of new content and changes to the game, such as:</p>
8
- <h3>New features and updates in BGMI 2.0</h3>
9
- <ul>
10
- <li>A new official version of Livik map, with new themed areas, an all-terrain UTV vehicle, XT weapons, ziplines, herbs, recall tower, firearm depot, and more.</li>
11
- <li>A new Cycle 2 Season 6 and Month 11 Royal Pass, with new rewards and missions.</li>
12
- <li>A new Battlegrounds Mobile India x Evangelion Discovery theme, with special events and items inspired by the anime series.</li>
13
- <li>A new gameplay and features, such as unfinished RP missions highlighted on the in-match tab, magazine capacity bar, new Ban Pan system, basic improvements to controls and UI, MG3 gun in Metro Royale mode, emergency pickup, and more.</li>
14
- </ul>
15
- <h3>System requirements for BGMI 2.0</h3>
16
- <p>The minimum system requirements for BGMI 2.0 are:</p>
17
- <table>
18
- <tr><td>Operating System</td><td>Android 4.3 or above</td></tr>
19
- <tr><td>RAM</td><td>1.5 GB or above</td></tr>
20
- <tr><td>Processor</td><td>Mediatek MT6737M quad-core or equivalent</td></tr>
21
- <tr><td>Download Size</td><td>710 MB</td></tr>
22
- </table>
23
- <p>Note that these are the minimum requirements for running the game smoothly on your device. You may need higher specifications for optimal performance and graphics.</p>
24
- <p>How to install 32 bit bgmi 2.0 apk on android<br />
25
- 32 bit bgmi 2.0 apk download link for free<br />
26
- 32 bit bgmi 2.0 apk download without obb file<br />
27
- 32 bit bgmi 2.0 apk download latest version<br />
28
- 32 bit bgmi 2.0 apk download for low end devices<br />
29
- 32 bit bgmi 2.0 apk download size and requirements<br />
30
- 32 bit bgmi 2.0 apk download error and fix<br />
31
- 32 bit bgmi 2.0 apk download with unlimited uc<br />
32
- 32 bit bgmi 2.0 apk download mod menu<br />
33
- 32 bit bgmi 2.0 apk download no ban<br />
34
- 32 bit bgmi 2.0 apk download for pc<br />
35
- 32 bit bgmi 2.0 apk download for ios<br />
36
- 32 bit bgmi 2.0 apk download with new maps and modes<br />
37
- 32 bit bgmi 2.0 apk download with hd graphics<br />
38
- 32 bit bgmi 2.0 apk download with voice chat<br />
39
- 32 bit bgmi 2.0 apk download with custom skins<br />
40
- 32 bit bgmi 2.0 apk download with anti cheat system<br />
41
- 32 bit bgmi 2.0 apk download with offline mode<br />
42
- 32 bit bgmi 2.0 apk download with vpn<br />
43
- 32 bit bgmi 2.0 apk download with root access<br />
44
- Best settings for 32 bit bgmi 2.0 apk on android<br />
45
- Tips and tricks for playing 32 bit bgmi 2.0 apk on android<br />
46
- Comparison of 32 bit and 64 bit bgmi 2.0 apk on android<br />
47
- Benefits of using 32 bit bgmi 2.0 apk on android<br />
48
- Drawbacks of using 32 bit bgmi 2.0 apk on android<br />
49
- Alternatives to 32 bit bgmi 2.0 apk on android<br />
50
- Reviews of 32 bit bgmi 2.0 apk on android<br />
51
- FAQs about 32 bit bgmi 2.0 apk on android<br />
52
- How to update to the latest version of the game from the old version of the game?<br />
53
- How to uninstall the game from your device?</p>
54
- <h2>How to Check If Your Android Device is 32-bit or 64-bit?</h2>
55
- <p>Before you download and install BGMI 2.0 APK on your Android device, you need to check if your device is compatible with the update. As mentioned earlier, the official Google Play Store version of BGMI only supports 64-bit devices, which means that if you have a 32-bit device, you will not be able to update the game from there.</p>
56
- <p>To check if your Android device is 32-bit or 64-bit, you can use one of the following methods:</p>
57
- <h3>Using a file manager app</h3>
58
- <ul>
59
- <li>Download and install a file manager app on your device, such as C <h3>Using a file manager app</h3>
60
- <ul>
61
- <li>Download and install a file manager app on your device, such as Cx File Explorer, ES File Explorer, or Solid Explorer.</li>
62
- <li>Open the app and navigate to the root directory of your device, which is usually denoted by a slash (/).</li>
63
- <li>Look for a folder named "system" and open it.</li>
64
- <li>Look for a file named "build.prop" and open it with a text editor.</li>
65
- <li>Scroll down the file and look for a line that starts with "ro.product.cpu.abi" or "ro.product.cpu.abilist".</li>
66
- <li>If the line ends with "arm64-v8a" or "x86_64", then your device is 64-bit. If the line ends with "armeabi-v7a" or "x86", then your device is 32-bit.</li>
67
- </ul>
68
- <h3>Using an APK installer app</h3>
69
- <ul>
70
- <li>Download and install an APK installer app on your device, such as APKPure, APKMirror, or Aptoide.</li>
71
- <li>Open the app and search for BGMI 2.0 APK file.</li>
72
- <li>Tap on the download button and wait for the file to be downloaded.</li>
73
- <li>Before installing the file, tap on the "Details" or "Info" button to see more information about the file.</li>
74
- <li>Look for a section that says "Architecture" or "Supported ABIs".</li>
75
- <li>If the section lists "arm64-v8a" or "x86_64", then the file is compatible with 64-bit devices. If the section lists "armeabi-v7a" or "x86", then the file is compatible with 32-bit devices.</li>
76
- </ul>
77
- <h2>How to Download BGMI 2.0 APK File for 32-bit Android Devices?</h2>
78
- <p>If you have confirmed that your Android device is 32-bit, then you will need to download BGMI 2.0 APK file from a third-party source, as the official Google Play Store version will not work on your device. There are two ways to download BGMI 2.0 APK file for 32-bit Android devices:</p>
79
- <h3>Using your browser</h3>
80
- <ul>
81
- <li>Open your browser and go to a trusted website that provides BGMI 2.0 APK file for 32-bit devices, such as [APKPure], [APKMirror], or [Aptoide].</li>
82
- <li>Search for BGMI 2.0 APK file and make sure it is compatible with 32-bit devices by checking the architecture or supported ABIs section.</li>
83
- <li>Tap on the download button and wait for the file to be downloaded on your device.</li>
84
- </ul>
85
- <h3>Using your computer</h3>
86
- <ul>
87
- <li>Open your computer and go to a trusted website that provides BGMI 2.0 APK file for 32-bit devices, such as [APKPure], [APKMirror], or [Aptoide].</li>
88
- <li>Search for BGMI 2.0 APK file and make sure it is compatible with 32-bit devices by checking the architecture or supported ABIs section.</li>
89
- <li>Click on the download button and wait for the file to be downloaded on your computer.</li>
90
- <li>Connect your Android device to your computer using a USB cable.</li>
91
- <li>Copy and paste the downloaded APK file from your computer to your device's storage.</li>
92
- </ul>
93
- : https://apkpure.com/battlegrounds-mobile-india/com.pubg.imobile : https://www.apkmirror.com/apk/krafton-inc/battlegrounds-mobile-india/battlegrounds-mobile-india-2-0-release/ : https://battlegrounds-mobile-india.en.aptoide.com/app <h2>How to Install BGMI 2.0 APK File on 32-bit Android Devices?</h2>
94
- <p>After you have downloaded BGMI 2.0 APK file on your 32-bit Android device, you need to install it manually, as it is not from the official Google Play Store. To install BGMI 2.0 APK file on your 32-bit Android device, you need to follow these steps:</p>
95
- <h3>Allowing unknown apps</h3>
96
- <ul>
97
- <li>Go to your device's settings and look for a section that says "Security" or "Privacy".</li>
98
- <li>Tap on it and look for an option that says "Unknown sources" or "Install unknown apps".</li>
99
- <li>Enable it and confirm your choice by tapping on "OK" or "Allow".</li>
100
- <li>This will allow you to install apps from sources other than the Google Play Store.</li>
101
- </ul>
102
- <h3>Locating and opening the APK file</h3>
103
- <ul>
104
- <li>Go to your device's file manager app and look for the folder where you have saved the downloaded APK file.</li>
105
- <li>Tap on the APK file and a pop-up window will appear, asking you to install the app.</li>
106
- <li>Tap on "Install" and wait for the installation process to complete.</li>
107
- <li>Once the installation is done, you can launch the app by tapping on "Open" or by looking for its icon on your home screen or app drawer.</li>
108
- </ul>
109
- <h2>Conclusion</h2>
110
- <p>BGMI 2.0 is the latest update for Battlegrounds Mobile India, which brings a lot of new features and improvements to the game. However, if you have a 32-bit Android device, you will not be able to update the game from the official Google Play Store, as it only supports 64-bit devices. In this article, we have shown you how to check if your device is 32-bit or 64-bit, how to download BGMI 2.0 APK file for 32-bit devices, and how to install it manually on your device. We hope this article was helpful and informative for you.</p>
111
- <h3>FAQs</h3>
112
- <ol>
113
- <li>Q: Is BGMI 2.0 safe to download and install on 32-bit devices?</li>
114
- <li>A: Yes, BGMI 2.0 is safe to download and install on 32-bit devices, as long as you download it from a trusted website and follow the instructions carefully. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain malware or viruses that can harm your device.</li>
115
- <li>Q: Will I be able to play BGMI 2.0 with other players who have updated the game from the Google Play Store?</li>
116
- <li>A: Yes, you will be able to play BGMI 2.0 with other players who have updated the game from the Google Play Store, as long as you are using the same version of the game. However, you may experience some lag or performance issues if your device does not meet the recommended system requirements for BGMI 2.0.</li>
117
- <li>Q: What are the benefits of updating to BGMI 2.0?</li>
118
- <li>A: Updating to BGMI 2.0 will give you access to a lot of new content and features in the game, such as a new Livik map, a new Royal Pass, a new theme, new gameplay and features, and more. Updating to BGMI 2.0 will also fix some bugs and glitches that may have affected your gaming experience in the previous version.</li>
119
- <li>Q: How can I update BGMI 2.0 in the future if there is another update?</li>
120
- <li>A: If there is another update for BGMI in the future, you will need to repeat the same process of downloading and installing the APK file for your 32-bit device, as the official Google Play Store version will not work on your device. You should also delete the old APK file from your device before installing the new one, to avoid any conflicts or errors.</li>
121
- <li>Q: How can I contact BGMI support if I have any issues or queries regarding BGMI 2.0?</li>
122
- <li>A: If you have any issues or queries regarding BGMI 2.0, you can contact BGMI support by visiting their official website , their official Facebook page , or their official Instagram account . You can also send them an email at [[email protected]] or call them at [1800-123-4567].</li>
123
- </ol>
124
- : https://www.battlegroundsmobileindia.com/ : https://www.facebook.com/Battleground</p> 401be4b1e0<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/CarX Highway Racing APK Mod How to Get Free Money and Gold.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>CarX Highway Racing APK Mod: A Thrilling Racing Game for Android</h1>
3
- <p>If you are a fan of racing games, you might have heard of CarX Highway Racing, a popular game that offers realistic physics, stunning graphics, and intense gameplay. But did you know that you can enjoy this game even more with CarX Highway Racing APK Mod?</p>
4
- <p>CarX Highway Racing APK Mod is a modified version of the original game that gives you access to unlimited money and gold, as well as all the cars and tracks unlocked. With this mod, you can experience the thrill of racing on highways with high-speed cars, dodging traffic, outrunning cops, and completing missions.</p>
5
- <h2>carx highway racing apk mod</h2><br /><p><b><b>DOWNLOAD</b> &#127379; <a href="https://jinyurl.com/2uNP2g">https://jinyurl.com/2uNP2g</a></b></p><br /><br />
6
- <p>In this article, we will tell you everything you need to know about CarX Highway Racing APK Mod, including its features, how to download and install it, and some tips and tricks for playing it. Read on to find out why you should download this mod right now!</p>
7
- <h2>Features of CarX Highway Racing APK Mod</h2>
8
- <p>CarX Highway Racing APK Mod has many features that make it one of the best racing games for Android. Here are some of them:</p>
9
- <h3>Realistic physics and graphics</h3>
10
- <p>CarX Highway Racing APK Mod uses the advanced CarX engine, which simulates the physics of real cars. You can feel the difference in handling, braking, drifting, and acceleration of each car. The game also has amazing graphics that create a realistic atmosphere of racing on highways. You can see the details of the cars, the environments, the weather effects, and the lighting.</p>
11
- <h3>Diverse cars and tracks</h3>
12
- <p>CarX Highway Racing APK Mod has over 40 cars to choose from, ranging from sports cars, muscle cars, supercars, to trucks. Each car has its own characteristics, such as speed, power, handling, and durability. You can also customize your car with different colors, decals, wheels, and spoilers. The game also has over 20 tracks to race on, each with its own challenges and scenery. You can race on deserts, mountains, cities, forests, and more.</p>
13
- <h3>Challenging game modes and missions</h3>
14
- <p>CarX Highway Racing APK Mod has several game modes to keep you entertained. You can play the career mode, where you have to complete various missions and races to earn money and reputation. You can also play the free ride mode, where you can explore the open world and enjoy driving without any restrictions. You can also play the online mode, where you can compete with other players from around the world.</p>
15
- <h3>Unlimited money and gold</h3>
16
- <p>One of the best features of CarX Highway Racing APK Mod is that it gives you unlimited money and gold. This means that you can buy any car or upgrade any part you want without worrying about the cost. You can also unlock all the cars and tracks without having to complete the missions or races. This way, you can enjoy the game to the fullest and have more fun.</p>
17
- <h2>How to download and install CarX Highway Racing APK Mod</h2>
18
- <p>Downloading and installing CarX Highway Racing APK Mod is very easy and simple. Just follow these steps:</p>
19
- <h3>Download the APK and OBB files from a trusted source</h3>
20
- <p>The first thing you need to do is to download the APK and OBB files of CarX Highway Racing APK Mod from a reliable source. You can find many websites that offer these files, but make sure you choose one that is safe and virus-free. You can also use the link below to download the files directly:</p>
21
- <p>carx highway racing mod apk unlimited money<br />
22
- carx highway racing hack apk download<br />
23
- carx highway racing mod apk latest version<br />
24
- carx highway racing mod apk android 1<br />
25
- carx highway racing mod apk revdl<br />
26
- carx highway racing mod apk rexdl<br />
27
- carx highway racing mod apk obb<br />
28
- carx highway racing mod apk offline<br />
29
- carx highway racing mod apk data<br />
30
- carx highway racing mod apk 2023<br />
31
- carx highway racing mod apk free download<br />
32
- carx highway racing mod apk unlimited gold<br />
33
- carx highway racing mod apk all cars unlocked<br />
34
- carx highway racing mod apk no root<br />
35
- carx highway racing mod apk pure<br />
36
- carx highway racing mod apk happymod<br />
37
- carx highway racing mod apk unlimited everything<br />
38
- carx highway racing mod apk online<br />
39
- carx highway racing mod apk 1.74.8<br />
40
- carx highway racing mod apk 1.74.7<br />
41
- carx highway racing mod apk 1.74.6<br />
42
- carx highway racing mod apk 1.74.5<br />
43
- carx highway racing mod apk 1.74.4<br />
44
- carx highway racing mod apk 1.74.3<br />
45
- carx highway racing mod apk 1.74.2<br />
46
- carx highway racing mod apk 1.74.1<br />
47
- carx highway racing mod apk 1.74.0<br />
48
- carx highway racing mod apk 1.73.9<br />
49
- carx highway racing mod apk 1.73.8<br />
50
- carx highway racing mod apk 1.73.7<br />
51
- carx highway racing mod apk 1.73.6<br />
52
- carx highway racing mod apk 1.73.5<br />
53
- carx highway racing mod apk 1.73.4<br />
54
- carx highway racing mod apk 1.73.3<br />
55
- carx highway racing mod apk 1.73.2<br />
56
- carx highway racing mod apk 1.73.1<br />
57
- carx highway racing mod apk 1.73.0<br />
58
- carx highway racing unlimited money and gold apk download<br />
59
- download game carx highway racing mod apk terbaru<br />
60
- cara download game carx highway racing mod apk</p>
61
- <p><a href="">CarX Highway Racing APK Mod Download Link</a></p>
62
- <h3>Enable unknown sources on your device</h3>
63
- <p>The next thing you need to do is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.</p>
64
- <h3>Extract the OBB file and copy it to the Android/OBB folder</h3>
65
- <p>After downloading the files, you need to extract the OBB file using a file manager app. You can use any app that can unzip files, such as ZArchiver or ES File Explorer. Once you extract the OBB file, you will get a folder named com.CarXTech.highWay. Copy this folder and paste it to the Android/OBB folder on your device storage.</p>
66
- <h3>Install the APK file and launch the game</h3>
67
- <p>The last thing you need to do is to install the APK file of CarX Highway Racing APK Mod. To do this, locate the file on your device storage and tap on it. Follow the instructions on the screen and wait for the installation to finish. Once done, you can launch the game and enjoy it!</p>
68
- <h2>Tips and tricks for playing CarX Highway Racing APK Mod</h2>
69
- <p>Now that you have installed CarX Highway Racing APK Mod, you might want to know some tips and tricks for playing it better. Here are some of them:</p>
70
- <h3>Choose the right car for each race</h3>
71
- <p>CarX Highway Racing APK Mod has many cars to choose from, but not all of them are suitable for every race. Some cars are faster, some are more agile, some are more durable, and some are more balanced. You need to consider the type of race, the track, the weather, and your opponents when choosing your car. For example, if you are racing on a curvy track with lots of turns, you might want to use a car that has good handling and braking. If you are racing on a straight track with few obstacles, you might want to use a car that has high speed and acceleration.</p>
72
- <h3>Upgrade your car regularly</h3>
73
- <p>Another tip for playing CarX Highway Racing APK Mod is to upgrade your car regularly. Upgrading your car can improve its performance and give you an edge over your rivals. You can upgrade different parts of your car, such as the engine, transmission, suspension, brakes, tires, nitro, and body. Each part has its own effect on your car's speed, power, handling, durability, and appearance. You can use the unlimited money and gold in CarX Highway Racing APK Mod to buy any upgrade you want.</p>
74
- <h3>Use nitro wisely</h3>
75
- <p>Nitro is a powerful boost that can help you speed up your car and overtake your opponents. However, nitro is not unlimited in CarX Highway Racing APK Mod. You have a nitro meter that shows how much nitro you have left. You can refill your nitro meter by driving fast, drifting, or performing stunts. You need to use nitro wisely and strategically in CarX Highway Racing APK Mod. Don't waste it on unnecessary moments or when you are already ahead of your rivals. Save it for when you need it most, such as when you are behind or when you are facing a tough opponent.</p>
76
- <h3>Avoid collisions and traffic</h3>
77
- <p>The last tip for playing CarX Highway Racing APK Mod is to avoid collisions and traffic. Collisions can damage your car and slow you down. Traffic can also block your way and prevent you from reaching your destination. You need to be careful and alert when driving on highways in CarX Highway Racing APK Mod. Try to avoid hitting other cars or objects on the road. Use your skills and reflexes to dodge traffic and find the best route.</p>
78
- <h2>Conclusion</h2>
79
- <p>Car X Highway Racing APK Mod is a thrilling racing game for Android that offers realistic physics, stunning graphics, diverse cars and tracks, challenging game modes and missions, and unlimited money and gold. It is a modified version of the original game that gives you access to all the features and content without any restrictions. You can download and install CarX Highway Racing APK Mod easily and safely by following the steps in this article. You can also improve your skills and performance by following the tips and tricks in this article. If you are looking for a fun and exciting racing game for your Android device, you should definitely try CarX Highway Racing APK Mod!</p>
80
- <h2>FAQs</h2>
81
- <p>Here are some frequently asked questions about CarX Highway Racing APK Mod:</p>
82
- <h3>Is CarX Highway Racing APK Mod safe to use?</h3>
83
- <p>Yes, CarX Highway Racing APK Mod is safe to use as long as you download it from a trusted source. The mod does not contain any viruses or malware that can harm your device or data. However, you should always be careful when downloading and installing any modded apps, as they may not be compatible with your device or the latest version of the game. You should also backup your data before installing any modded apps, in case something goes wrong.</p>
84
- <h3>How to update CarX Highway Racing APK Mod?</h3>
85
- <p>To update CarX Highway Racing APK Mod, you need to download the latest version of the mod from the same source you downloaded it from before. You also need to download the latest OBB file and copy it to the Android/OBB folder on your device storage. Then, you need to uninstall the previous version of the mod and install the new one. You should be able to play the updated version of CarX Highway Racing APK Mod without any problems.</p>
86
- <h3>Can I play CarX Highway Racing APK Mod offline?</h3>
87
- <p>Yes, you can play CarX Highway Racing APK Mod offline. You don't need an internet connection to play the career mode or the free ride mode. However, you do need an internet connection to play the online mode, where you can compete with other players from around the world.</p>
88
- <h3>How to get more money and gold in CarX Highway Racing APK Mod?</h3>
89
- <p>You don't need to worry about getting more money and gold in CarX Highway Racing APK Mod, as you already have unlimited money and gold in this mod. You can use them to buy any car or upgrade any part you want without any limitations. You can also unlock all the cars and tracks without having to complete the missions or races.</p>
90
- <h3>What are the minimum requirements for CarX Highway Racing APK Mod?</h3>
91
- <p>The minimum requirements for CarX Highway Racing APK Mod are as follows:</p>
92
- <ul>
93
- <li>Android 5.0 or higher</li>
94
- <li>2 GB of RAM or more</li>
95
- <li>1 GB of free storage space or more</li>
96
- <li>A stable internet connection (for online mode)</li>
97
- </ul></p> 197e85843d<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Live MOD APK The Best Live Streaming App with Amazing Rewards.md DELETED
@@ -1,171 +0,0 @@
1
-
2
- <h1>What is Live Mod APK and Why You Should Download It</h1>
3
- <p>If you are looking for a way to enhance your app experience on your Android device, you may have heard of live mod apk. But what is it exactly and what can it do for you? In this article, we will explain what is live mod apk and why you should download it. We will also show you how to download it safely and easily, what are the risks of using it and how to avoid them, and what are the alternatives to it and when to use them. By the end of this article, you will have a clear idea of whether live mod apk is right for you or not.</p>
4
- <h2>Introduction</h2>
5
- <p>Live mod apk is a modified version of an official app that offers additional features or modifications that are not available in the original app. For example, you can use live mod apk to access premium features for free, customize your app appearance or functionality, bypass restrictions or limitations imposed by the app developer or store, etc.</p>
6
- <h2>download live mod apk</h2><br /><p><b><b>Download</b> ->>->>->> <a href="https://jinyurl.com/2uNUH5">https://jinyurl.com/2uNUH5</a></b></p><br /><br />
7
- <p>Some examples of popular live mod apks are Live NetTV MOD APK, which allows you to enjoy streaming more than 800 channels for free without ads; Thlive MOD APK, which is a free livestream app that lets you earn money online by showing your talent in front of the camera; Live NetTV MOD APK, which is a modified version of Live NetTV that offers ad-free streaming and official updates.</p>
8
- <p>Some benefits of using live mod apk are:</p>
9
- <ul>
10
- <li>You can access all the features of an app without paying for them</li>
11
- <li>You can enjoy premium features that are not available in the official app</li>
12
- <li>You can customize your app according to your preferences</li>
13
- <li>You can bypass restrictions or limitations imposed by the app developer or store</li>
14
- <li>You can test new features or versions before they are released</li>
15
- </ul>
16
- <h2>How to Download Live Mod APK Safely and Easily</h2>
17
- <p>To download live mod apk safely and easily, you need to follow these steps:</p>
18
- <ol>
19
- <li>Find a trusted source that offers live mod apk files. You can use a web browser or an app like APKCHEW.COM or APKVIPO to search for the live mod apk you want. You can also check the table below for some of the best websites for downloading live mod apk, along with their features and ratings.</li>
20
- <li>Download the live mod apk file from the source you chose. Make sure the file is safe and virus-free by scanning it with an antivirus software or using a website like VirusTotal. You can also read the reviews and comments from other users to see if they had any issues with the file.</li>
21
- <li>Install the live mod apk file on your device. To do this, you need to enable unknown sources in your device settings. This will allow you to install apps from sources other than the Google Play Store. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the live mod apk file in your device storage and tap on it to start the installation process. Follow the instructions on the screen and grant the necessary permissions to the app.</li>
22
- </ol>
23
- <p>Congratulations, you have successfully downloaded and installed live mod apk on your device. You can now enjoy all the benefits of using live mod apk.</p>
24
- <table>
25
- <tr>
26
- <th>Website</th>
27
- <th>Features</th>
28
- <th>Ratings</th>
29
- </tr>
30
- <tr>
31
- <td>APKCHEW.COM</td>
32
- <td>- Offers a large collection of live mod apks for various categories - Provides detailed information and screenshots for each app - Updates regularly with new and improved versions - Supports fast and secure downloads</td>
33
- <td>4.8/5</td>
34
- </tr>
35
- <tr>
36
- <td>APKVIPO</td>
37
- <td>- Features a wide range of live mod apks for different genres - Gives clear and concise descriptions and instructions for each app - Allows users to request or suggest apps - Ensures safe and reliable downloads</td>
38
- <td>4.7/5</td>
39
- </tr>
40
- <tr>
41
- <td>APKDONE</td>
42
- <td>- Has a huge library of live mod apks for various niches - Shows ratings and reviews from other users for each app - Updates frequently with new and better versions - Supports easy and fast downloads</td>
43
- <td>4.6/5</td>
44
- </tr>
45
- </table>
46
- <h2>What are the Risks of Using Live Mod APK and How to Avoid Them</h2>
47
- <p>While using live mod apk can be fun and rewarding, it also comes with some risks that you should be aware of. Some of the risks are:</p>
48
- <ul>
49
- <li>Malware infection: Some live mod apks may contain malicious code that can harm your device or steal your data. This can compromise your privacy, security, and performance.</li>
50
- <li>Account ban: Some live mod apks may violate the terms and conditions of the official app or store. This can result in your account being banned or suspended by the app developer or store.</li>
51
- <li>Legal issues: Some live mod apks may infringe on the intellectual property rights of the original app developer or owner. This can expose you to legal consequences such as lawsuits or fines.</li>
52
- </ul>
53
- <p>To avoid or minimize these risks, you should follow these tips:</p>
54
- <p>download mglobal live mod apk unlock all room<br />
55
- download boom live mod apk unlimited coins<br />
56
- download mlive mod apk terbaru 2023<br />
57
- download gogo live mod apk free coins<br />
58
- download bigo live mod apk diamond<br />
59
- download mango live mod apk unlock room<br />
60
- download bunny live mod apk latest version<br />
61
- download joy live mod apk no banned<br />
62
- download nonolive mod apk unlimited money<br />
63
- download kitty live mod apk vip unlocked<br />
64
- download tangi live mod apk full access<br />
65
- download vlive mod apk free coins and hearts<br />
66
- download u live mod apk premium features<br />
67
- download yome live mod apk unlimited diamonds<br />
68
- download zaky live mod apk no ads<br />
69
- download lamour live mod apk free membership<br />
70
- download starlive mod apk unlimited gifts<br />
71
- download livu mod apk free coins and gems<br />
72
- download tango live mod apk hack balance<br />
73
- download holla live mod apk unlimited matches<br />
74
- download azar live mod apk free gems and coins<br />
75
- download liveme mod apk pro tools<br />
76
- download badoo live mod apk free credits and superpowers<br />
77
- download meetme live mod apk unlimited diamonds and coins<br />
78
- download skout live mod apk premium unlocked<br />
79
- download tagged live mod apk vip access and golds<br />
80
- download paktor live mod apk free swipes and likes<br />
81
- download tantan live mod apk unlimited chats and matches<br />
82
- download waplog live mod apk free boost and stickers<br />
83
- download mico live mod apk free coins and gifts<br />
84
- download cake live mod apk unlimited cash and tokens<br />
85
- download camfrog live mod apk pro features unlocked<br />
86
- download uchannel live mod apk free subscribers and views<br />
87
- download uplive mod apk unlimited g-coins and coupons<br />
88
- download streamkar live mod apk free beans and coins<br />
89
- download likee lite live mod apk free diamonds and fans<br />
90
- download kwai go live mod apk free coins and followers<br />
91
- download tiktok lite live mod apk free likes and views<br />
92
- download instagram lite live mod apk free reels and stories views<br />
93
- download facebook lite live mod apk free stars and supporters<br />
94
- download snapchat lite live mod apk free filters and lenses<br />
95
- download youtube go live mod apk free subscribers and watch hours<br />
96
- download periscope lite live mod apk free super hearts and super fans<br />
97
- download twitch lite live mod apk free bits and prime gaming benefits <br />
98
- download discord lite live mod apk free nitro and server boosts <br />
99
- download zoom lite live mod apk free meeting minutes and cloud storage <br />
100
- download skype lite live mod apk free credits and calls <br />
101
- download whatsapp lite live mod apk free stickers and status views <br />
102
- download telegram lite live mod apk free channels and groups members <br />
103
- download signal lite live mod apk free voice notes and video calls</p>
104
- <ol>
105
- <li>Use antivirus software: You should always scan any live mod apk file you download with an antivirus software before installing it on your device. This will help you detect and remove any malware that may be hidden in the file.</li>
106
- <li>Back up your data: You should always back up your data before using any live mod apk on your device. This will help you restore your data in case something goes wrong or you lose access to your account.</li>
107
- <li>Read reviews: You should always read reviews and comments from other users who have used the live mod apk you want to download. This will help you learn from their experiences and avoid any potential problems or issues.</li>
108
- </ol>
109
- <p>If you want to learn more about the risks of using live mod apk, you can check out these sources:</p>
110
- <ul>
111
- <li>TechBullion: The Risks Of Using Modded APKs On Android Devices</li>
112
- <li>Techtippr: What are MOD APKs? Are they Safe?</li>
113
- <li>Lifewire: What Is an APK File?</li>
114
- </ul>
115
- <h2>What are the Alternatives to Live Mod APK and When to Use Them</h2>
116
- <p>If you are not comfortable with using live mod apk or you want to try something different, you can use some alternatives that are available for Android users. Some of these alternatives are:</p>
117
- <ul>
118
- <li>F-Droid: F-Droid is an open-source app store that offers free and ad-free apps that respect your privacy and security. You can find apps that are not available on the Google Play Store or that offer more features or customization options. F-Droid is a good alternative if you care about open-source software and data protection.</li>
119
- <li>Aurora Store: Aurora Store is an unofficial app store that lets you download apps from the Google Play Store without using a Google account. You can also download apps that are not compatible with your device or region, or that are removed from the Google Play Store. Aurora Store is a good alternative if you want to access the Google Play Store without giving up your privacy or freedom.</li>
120
- <li>Google Play Store: Google Play Store is the official app store for Android devices that offers millions of apps for various categories and purposes. You can also enjoy features such as automatic updates, parental controls, security checks, etc. Google Play Store is a good alternative if you want to use the official and verified apps from the app developer or owner.</li>
121
- <li>Aptoide: Aptoide is an independent app store that allows users to create and manage their own app stores. You can find apps that are not available on the Google Play Store or that are customized by other users. You can also share your own apps with others and earn money from them. Aptoide is a good alternative if you want to discover new and unique apps from different sources.</li>
122
- </ul>
123
- <p>To use these alternatives, you need to download and install them on your device from their respective websites or sources. You can also compare and contrast the advantages and disadvantages of each alternative in the table below:</p>
124
- <table>
125
- <tr>
126
- <th>Alternative</th>
127
- <th>Advantages</th>
128
- <th>Disadvantages</th>
129
- </tr>
130
- <tr>
131
- <td>F-Droid</td>
132
- <td>- Free and ad-free apps - Open-source software - Privacy and security - Customization options</td>
133
- <td>- Limited app selection - Slow updates - Compatibility issues - Technical knowledge required</td>
134
- </tr>
135
- <tr>
136
- <td>Aurora Store</td>
137
- <td>- Access to Google Play Store apps - No Google account required - Compatibility and region bypass - Privacy and anonymity</td>
138
- <td>- Unofficial app store - Potential security risks - Account ban possibility - Legal issues</td>
139
- </tr>
140
- <tr>
141
- <td>Google Play Store</td>
142
- <td>- Official and verified apps - Automatic updates - Parental controls - Security checks</td>
143
- <td>- Google account required - Compatibility and region restrictions - Privacy concerns - Ads and in-app purchases</td>
144
- </tr>
145
- <tr>
146
- <td>Aptoide</td>
147
- <td>- Independent app store - User-generated app stores - Unique and customized apps - Revenue sharing</td>
148
- <td>- Unregulated app quality - Malware risks - Legal issues - Ads and in-app purchases</td>
149
- </tr>
150
- </table>
151
- <p>You can use any of these alternatives based on your needs and preferences. For example, you can use F-Droid if you want to use open-source apps that respect your privacy, Aurora Store if you want to access Google Play Store apps without a Google account, Google Play Store if you want to use official and verified apps from the app developer or owner, or Aptoide if you want to discover new and unique apps from different sources.</p>
152
- <h2>Conclusion</h2>
153
- <p>In conclusion, live mod apk is a modified version of an official app that offers additional features or modifications that are not available in the original app. You can use live mod apk to access premium features for free, customize your app appearance or functionality, bypass restrictions or limitations imposed by the app developer or store, etc.</p>
154
- <p>To download live mod apk safely and easily, you need to find a trusted source that offers live mod apk files, download the file from the source, and install it on your device. You should also be aware of the risks of using live mod apk, such as malware infection, account ban, legal issues, etc., and avoid or minimize them by using antivirus software, backing up your data, reading reviews, etc.</p>
155
- <p>If you are not comfortable with using live mod apk or you want to try something different, you can use some alternatives that are available for Android users, such as F-Droid, Aurora Store, Google Play Store, Aptoide, etc. You can compare and contrast the advantages and disadvantages of each alternative and choose the one that suits your needs and preferences.</p>
156
- <p>We hope this article has helped you understand what is live mod apk and why you should download it. If you are interested in trying it out, you can download it from APKCHEW.COM, one of the best websites for downloading live mod apk. You can also share your feedback with us in the comments section below.</p>
157
- <h3>FAQs</h3>
158
- <ul>
159
- <li><b>What is live mod apk?</b></li>
160
- <li <li>Live mod apk is a modified version of an official app that offers additional features or modifications that are not available in the original app.</li>
161
- <li><b>Why should I download live mod apk?</b></li>
162
- <li>You should download live mod apk if you want to access premium features for free, customize your app appearance or functionality, bypass restrictions or limitations imposed by the app developer or store, etc.</li>
163
- <li><b>How can I download live mod apk safely and easily?</b></li>
164
- <li>You can download live mod apk safely and easily by finding a trusted source that offers live mod apk files, downloading the file from the source, and installing it on your device. You should also scan the file with an antivirus software, back up your data, and read reviews before using it.</li>
165
- <li><b>What are the risks of using live mod apk?</b></li>
166
- <li>The risks of using live mod apk are malware infection, account ban, legal issues, etc. You should avoid or minimize these risks by using antivirus software, backing up your data, reading reviews, etc.</li>
167
- <li><b>What are the alternatives to live mod apk?</b></li>
168
- <li>The alternatives to live mod apk are F-Droid, Aurora Store, Google Play Store, Aptoide, etc. You can compare and contrast the advantages and disadvantages of each alternative and choose the one that suits your needs and preferences.</li>
169
- </ul></p> 197e85843d<br />
170
- <br />
171
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Find and Download the Perfect 3D Printer Models in STL OBJ and 3MF Formats.md DELETED
@@ -1,126 +0,0 @@
1
- <br />
2
- <h1>Download 3D OBJ Free: A Guide to Finding and Using Free 3D Models</h1>
3
- <p>If you are interested in 3D modeling, animation, or printing, you may have come across the term OBJ file. OBJ files are one of the most common and versatile file formats for storing and exchanging 3D models. They can be used for various purposes, such as creating realistic renderings, adding details to your designs, or printing them in full color.</p>
4
- <p>In this article, we will explain what an OBJ file is, why it is useful, how to download free 3D OBJ models from various websites, and how to use them in different 3D software. By the end of this article, you will have a better understanding of the OBJ file format and how to take advantage of it.</p>
5
- <h2>download 3d obj free</h2><br /><p><b><b>Download File</b> &raquo;&raquo;&raquo; <a href="https://jinyurl.com/2uNLhM">https://jinyurl.com/2uNLhM</a></b></p><br /><br />
6
- <h2>What is an OBJ File and Why is it Useful?</h2>
7
- <h3>The Basics of OBJ File Format</h3>
8
- <p>An OBJ file is a text file format that stores a description of the surface of a 3D object, composed of polygons, curves, vertices, texture maps, and other object information. It is a vector file that can be scaled and has no maximum file size. It is often used as an exchange format for 3D graphics and multi-color 3D printing. It can be edited in a text editor or various 3D image editing programs.</p>
9
- <p>The OBJ file format was developed by Wavefront Technologies (designers of 3D Maya modeling software) for its Advanced Visualizer package. The file format is open and has been adopted by other 3D graphics application vendors. The OBJ file format is a simple data-format that represents 3D geometry alone — namely, the position of each vertex, the UV position of each texture coordinate vertex, vertex normals, and the faces that make each polygon defined as a list of vertices, and texture vertices. Vertices are stored in a counter-clockwise order by default, making explicit declaration of face normals unnecessary. OBJ coordinates have no units, but OBJ files can contain scale information in a human readable comment line.</p>
10
- <h3>The Benefits of OBJ File Format</h3>
11
- <p>There are several benefits of using the OBJ file format for your 3D models. Some of them are:</p>
12
- <p>download free 3d obj models<br />
13
- free 3d obj files download<br />
14
- download 3d obj free for blender<br />
15
- free 3d obj models for printing<br />
16
- download 3d obj free online<br />
17
- free 3d obj converter download<br />
18
- download 3d obj free for unity<br />
19
- free 3d obj viewer download<br />
20
- download 3d obj free for sketchup<br />
21
- free 3d obj editor download<br />
22
- download 3d obj free for maya<br />
23
- free 3d obj textures download<br />
24
- download 3d obj free for cinema 4d<br />
25
- free 3d obj characters download<br />
26
- download 3d obj free for unreal engine<br />
27
- free 3d obj animals download<br />
28
- download 3d obj free for 3ds max<br />
29
- free 3d obj furniture download<br />
30
- download 3d obj free for zbrush<br />
31
- free 3d obj cars download<br />
32
- download 3d obj free for after effects<br />
33
- free 3d obj weapons download<br />
34
- download 3d obj free for photoshop<br />
35
- free 3d obj plants download<br />
36
- download 3d obj free for element 3d<br />
37
- free 3d obj human download<br />
38
- download 3d obj free for daz studio<br />
39
- free 3d obj buildings download<br />
40
- download 3d obj free for substance painter<br />
41
- free 3d obj clothes download<br />
42
- download 3d obj free for rhino<br />
43
- free 3d obj food download<br />
44
- download 3d obj free for solidworks<br />
45
- free 3d obj jewelry download<br />
46
- download 3d obj free for revit<br />
47
- free 3d obj trees download<br />
48
- download 3d obj free for autocad<br />
49
- free 3d obj rocks download<br />
50
- download 3d obj free for lumion<br />
51
- free 3d obj aircraft download<br />
52
- download 3d obj free for fusion360 <br />
53
- free 3d obj robots download <br />
54
- download 3d obj free for marvelous designer <br />
55
- free 3d obj dinosaurs download <br />
56
- download 3d obj free for houdini <br />
57
- free 3d obj skulls download <br />
58
- download 3d obj free for blenderkit <br />
59
- free 3d obj emojis download <br />
60
- download 3d obj free for godot</p>
61
- <ul>
62
- <li>It enables you to represent complex or irregularly shaped objects by dividing their surface into small, triangular "tiles". This tessellation process makes it easier to manipulate and render the design since you can modify each tile separately from the rest.</li>
63
- <li>It allows you to specify the geometry of 3D objects and their surface properties, including texture mapping and shading. This versatility makes the OBJ file format robust for creating realistic renderings of complex three-dimensional scenes.</li>
64
- <li>It supports high-resolution data compared to similar file formats like STL files. It can store textures and multiple colors in the same object, unlike STL files, which only support one color per object.</li>
65
- <li>It is compatible with many software programs and platforms, making it easy to share your files between different applications. You can also convert your OBJ files into other formats using online tools or software programs.</li>
66
- </ul>
67
- <h2>How to Download Free 3D OBJ Models from Various Websites</h2>
68
- <p>If you are looking for free 3D OBJ models to use for your projects, you are in luck. There are many websites that offer free downloads of high-quality 3D models in various categories and styles. Here are some of the best websites to download free 3D OBJ models:</p>
69
- <h3>TurboSquid</h3>
70
- <p>TurboSquid is one of the largest online marketplaces for 3D models. It offers both free and paid 3D models in various formats, including OBJ. You can browse by category, keyword, or popularity. You can also filter by price, license, rating, and poly count. TurboSquid has a large collection of free 3D models that you can download and use for personal or commercial projects. Some of the free 3D models include animals, vehicles, furniture, characters, and more. You can also find some free 3D models that are part of the StemCell initiative, which ensures that the models are compatible with multiple software and renderers.</p>
71
- <h3>Clara.io</h3>
72
- <p>Clara.io is a cloud-based 3D modeling, animation, and rendering platform that lets you create and share 3D content online. It also has a library of over 100,000 free 3D models that you can download and use for your projects. You can search by category, tag, or keyword. You can also filter by format, license, poly count, and rating. Clara.io supports various file formats, including OBJ, FBX, STL, DAE, and more. Some of the free 3D models include architecture, furniture, vehicles, characters, and more. You can also view and edit the 3D models online using the Clara.io editor.</p>
73
- <h3>CGTrader</h3>
74
- <p>CGTrader is another online marketplace for 3D models that offers both free and paid 3D models in various formats, including OBJ. You can browse by category, keyword, or popularity. You can also filter by price, license, rating, and poly count. CGTrader has a large collection of free 3D models that you can download and use for your projects. Some of the free 3D models include animals, vehicles, furniture, characters, and more. You can also find some free 3D models that are part of the AR/VR Ready collection, which ensures that the models are optimized for augmented reality and virtual reality applications.</p>
75
- <h3>Sketchfab</h3>
76
- <p>Sketchfab is a platform that lets you upload, view, and share 3D content online. It also has a library of over 4 million free and paid 3D models that you can download and use for your projects. You can search by category, tag, or keyword. You can also filter by format, license, poly count, and rating. Sketchfab supports various file formats, including OBJ, FBX, STL, DAE, and more. Some of the free 3D models include architecture, furniture, vehicles, characters, and more. You can also view and interact with the 3D models online using the Sketchfab viewer.</p>
77
- <h2>How to Use 3D OBJ Models in Different 3D Software</h2>
78
- <p>Once you have downloaded your free 3D OBJ models, you may want to use them in different 3D software for editing, rendering, or printing. Here are some of the most popular 3D software that support the OBJ file format and how to use them:</p>
79
- <h3>Blender</h3>
80
- <p>Blender is a free and open-source 3D creation suite that supports modeling, animation, rendering, sculpting, simulation, and more. It also supports various file formats, including OBJ. To import an OBJ file into Blender, follow these steps:</p>
81
- <ol>
82
- <li>Open Blender and create a new project or open an existing one.</li>
83
- <li>Go to File > Import > Wavefront (.obj) and navigate to the location of your OBJ file.</li>
84
- <li>Select your OBJ file and click Import OBJ. You can adjust the import settings in the panel on the left side of the screen.</li>
85
- <li>Your OBJ model will appear in the 3D viewport. You can use the tools in Blender to edit, transform, or animate your model as you wish.</li>
86
- </ol>
87
- <h3>3DS Max</h3>
88
- <p>3DS Max is a professional 3D modeling, animation, and rendering software that is widely used in the gaming, film, and design industries. It also supports various file formats, including OBJ. To import an OBJ file into 3DS Max, follow these steps:</p>
89
- <ol>
90
- <li>Open 3DS Max and create a new project or open an existing one.</li>
91
- <li>Go to File > Import and navigate to the location of your OBJ file.</li>
92
- <li>Select your OBJ file and click Open. You can adjust the import settings in the dialog box that appears.</li>
93
- <li>Your OBJ model will appear in the scene. You can use the tools in 3DS Max to edit, transform, or animate your model as you wish.</li>
94
- </ol>
95
- <h3>Cinema 4D</h3>
96
- <p>Cinema 4D is a powerful 3D modeling, animation, and rendering software that is used for motion graphics, visual effects, and design. It also supports various file formats, including OBJ. To import an OBJ file into Cinema 4D, follow these steps:</p>
97
- <ol>
98
- <li>Open Cinema 4D and create a new project or open an existing one.</li>
99
- <li>Go to File > Merge and navigate to the location of your OBJ file.</li>
100
- <li>Select your OBJ file and click Open. You can adjust the import settings in the dialog box that appears.</li>
101
- <li>Your OBJ model will appear in the object manager and the viewport. You can use the tools in Cinema 4D to edit, transform, or animate your model as you wish.</li>
102
- </ol>
103
- <h3>3D Builder for Windows</h3>
104
- <p>3D Builder is a free app for Windows that lets you view, create, edit, and print 3D models. It also supports various file formats, including OBJ. To import an OBJ file into 3D Builder, follow these steps:</p>
105
- <ol>
106
- <li>Open 3D Builder and create a new project or open an existing one.</li>
107
- <li>Go to Menu > Insert > Load Object and navigate to the location of your OBJ file.</li>
108
- <li>Select your OBJ file and click Open. Your OBJ model will appear in the scene.</li>
109
- <li>You can use the tools in 3D Builder to edit, transform, or print your model as you wish.</li>
110
- </ol>
111
- <h2>Conclusion</h2>
112
- <p>In this article, we have explained what an OBJ file is, why it is useful, how to download free 3D OBJ models from various websites, and how to use them in different 3D software. We hope that this article has helped you learn more about the OBJ file format and how to take advantage of it for your 3D projects.</p>
113
- <h2>FAQs</h2>
114
- <p>Here are some frequently asked questions about downloading free 3D OBJ models:</p>
115
- <h4>Q: What are some other websites that offer free 3D OBJ models?</h4>
116
- <p>A: Some other websites that offer free 3D OBJ models are Free3D.com, Archive3D.net, CadNav.com, and NASA.gov. You can also search for free 3D models on Google or Bing using keywords like "free obj models" or "free obj files ".</p>
117
- <h4>Q: How can I convert an OBJ file into another file format?</h4>
118
- <p>A: You can use online tools or software programs to convert your OBJ files into other file formats. Some of the online tools are Online 3D Converter, Convertio, and AnyConv. Some of the software programs are MeshLab, Autodesk Fusion 360, and Adobe Photoshop.</p>
119
- <h4>Q: How can I optimize an OBJ file for faster loading or printing?</h4>
120
- <p>A: You can optimize your OBJ file by reducing the number of polygons, vertices, and textures in your model. This will make your file size smaller and improve the performance of your 3D software or printer. You can use online tools or software programs to optimize your OBJ files. Some of the online tools are Meshmixer, MeshOptimizer, and RapidCompact. Some of the software programs are Blender, 3DS Max, and Cinema 4D.</p>
121
- <h4>Q: How can I edit an OBJ file in a text editor?</h4>
122
- <p>A: You can edit an OBJ file in a text editor by opening it with a program like Notepad, WordPad, or Sublime Text. You can then modify the lines of code that define the geometry and properties of your model. However, editing an OBJ file in a text editor is not recommended unless you are familiar with the syntax and structure of the file format. It is easier and safer to edit your OBJ file in a 3D image editing program.</p>
123
- <h4>Q: How can I view an OBJ file without downloading any software?</h4>
124
- <p>A: You can view an OBJ file without downloading any software by using online viewers or browsers that support the OBJ file format. Some of the online viewers are 3D Viewer Online, Viewstl.com, and 3D Model Viewer. Some of the browsers that support the OBJ file format are Chrome, Firefox, and Edge.</p> 401be4b1e0<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/chat-image.tsx DELETED
@@ -1,170 +0,0 @@
1
- import {
2
- useEffect,
3
- useState,
4
- useCallback,
5
- ChangeEvent,
6
- ClipboardEvent,
7
- MouseEventHandler,
8
- FormEvent,
9
- useRef
10
- } from "react"
11
- import Image from 'next/image'
12
- import PasteIcon from '@/assets/images/paste.svg'
13
- import UploadIcon from '@/assets/images/upload.svg'
14
- import CameraIcon from '@/assets/images/camera.svg'
15
- import { useBing } from '@/lib/hooks/use-bing'
16
- import { cn } from '@/lib/utils'
17
-
18
- interface ChatImageProps extends Pick<ReturnType<typeof useBing>, 'uploadImage'> {}
19
-
20
- const preventDefault: MouseEventHandler<HTMLDivElement> = (event) => {
21
- event.nativeEvent.stopImmediatePropagation()
22
- }
23
-
24
- const toBase64 = (file: File): Promise<string> => new Promise((resolve, reject) => {
25
- const reader = new FileReader()
26
- reader.readAsDataURL(file)
27
- reader.onload = () => resolve(reader.result as string)
28
- reader.onerror = reject
29
- })
30
-
31
- export function ChatImage({ children, uploadImage }: React.PropsWithChildren<ChatImageProps>) {
32
- const videoRef = useRef<HTMLVideoElement>(null)
33
- const canvasRef = useRef<HTMLCanvasElement>(null)
34
- const mediaStream = useRef<MediaStream>()
35
- const [panel, setPanel] = useState('none')
36
-
37
- const upload = useCallback((url: string) => {
38
- if (url) {
39
- uploadImage(url)
40
- }
41
- setPanel('none')
42
- }, [panel])
43
-
44
- const onUpload = useCallback(async (event: ChangeEvent<HTMLInputElement>) => {
45
- const file = event.target.files?.[0]
46
- if (file) {
47
- const fileDataUrl = await toBase64(file)
48
- if (fileDataUrl) {
49
- upload(fileDataUrl)
50
- }
51
- }
52
- }, [])
53
-
54
- const onPaste = useCallback((event: ClipboardEvent<HTMLInputElement>) => {
55
- const pasteUrl = event.clipboardData.getData('text') ?? ''
56
- upload(pasteUrl)
57
- }, [])
58
-
59
- const onEnter = useCallback((event: FormEvent<HTMLFormElement>) => {
60
- event.preventDefault()
61
- event.stopPropagation()
62
- // @ts-ignore
63
- const inputUrl = event.target.elements.image.value
64
- if (inputUrl) {
65
- upload(inputUrl)
66
- }
67
- }, [])
68
-
69
- const openVideo: MouseEventHandler<HTMLButtonElement> = async (event) => {
70
- event.stopPropagation()
71
- setPanel('camera-mode')
72
- }
73
-
74
- const onCapture = () => {
75
- if (canvasRef.current && videoRef.current) {
76
- const canvas = canvasRef.current
77
- canvas.width = videoRef.current!.videoWidth
78
- canvas.height = videoRef.current!.videoHeight
79
- canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height)
80
- const cameraUrl = canvas.toDataURL('image/jpeg')
81
- upload(cameraUrl)
82
- }
83
- }
84
-
85
- useEffect(() => {
86
- const handleBlur = () => {
87
- if (panel !== 'none') {
88
- setPanel('none')
89
- }
90
- }
91
- document.addEventListener('click', handleBlur)
92
- return () => {
93
- document.removeEventListener('click', handleBlur)
94
- }
95
- }, [panel])
96
-
97
- useEffect(() => {
98
- if (panel === 'camera-mode') {
99
- navigator.mediaDevices.getUserMedia({ video: true, audio: false })
100
- .then(videoStream => {
101
- mediaStream.current = videoStream
102
- if (videoRef.current) {
103
- videoRef.current.srcObject = videoStream
104
- }
105
- })
106
- } else {
107
- if (mediaStream.current) {
108
- mediaStream.current.getTracks().forEach(function(track) {
109
- track.stop()
110
- })
111
- mediaStream.current = undefined
112
- }
113
- }
114
- }, [panel])
115
-
116
- return (
117
- <div className="visual-search-container">
118
- <div onClick={() => panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}</div>
119
- <div className={cn('visual-search', panel)} onClick={preventDefault}>
120
- <div className="normal-content">
121
- <div className="header">
122
- <h4>添加图像</h4>
123
- </div>
124
- <div className="paste">
125
- <Image alt="paste" src={PasteIcon} width={24} />
126
- <form onSubmitCapture={onEnter}>
127
- <input
128
- className="paste-input"
129
- id="sb_imgpst"
130
- type="text"
131
- name="image"
132
- placeholder="粘贴图像 URL"
133
- aria-label="粘贴图像 URL"
134
- onPaste={onPaste}
135
- onClickCapture={(e) => e.stopPropagation()}
136
- />
137
- </form>
138
- </div>
139
- <div className="buttons">
140
- <button type="button" aria-label="从此设备上传">
141
- <input
142
- id="vs_fileinput"
143
- className="fileinput"
144
- type="file"
145
- accept="image/gif, image/jpeg, image/png, image/webp"
146
- onChange={onUpload}
147
- />
148
- <Image alt="uplaod" src={UploadIcon} width={20} />
149
- 从此设备上传
150
- </button>
151
- <button type="button" aria-label="拍照" onClick={openVideo}>
152
- <Image alt="camera" src={CameraIcon} width={20} />
153
- 拍照
154
- </button>
155
- </div>
156
- </div>
157
- {panel === 'camera-mode' && <div className="cam-content">
158
- <div className="webvideo-container">
159
- <video className="webvideo" autoPlay muted playsInline ref={videoRef} />
160
- <canvas className="webcanvas" ref={canvasRef} />
161
- </div>
162
- <div className="cambtn" role="button" aria-label="拍照" onClick={onCapture}>
163
- <div className="cam-btn-circle-large"></div>
164
- <div className="cam-btn-circle-small"></div>
165
- </div>
166
- </div>}
167
- </div>
168
- </div>
169
- )
170
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/pages/api/kblob.ts DELETED
@@ -1,56 +0,0 @@
1
- 'use server'
2
-
3
- import { NextApiRequest, NextApiResponse } from 'next'
4
- import FormData from 'form-data'
5
- import { fetch } from '@/lib/isomorphic'
6
- import { KBlobRequest } from '@/lib/bots/bing/types'
7
-
8
- const API_DOMAIN = 'https://bing.vcanbb.top'
9
-
10
- export const config = {
11
- api: {
12
- bodyParser: {
13
- sizeLimit: '10mb' // Set desired value here
14
- }
15
- }
16
- }
17
-
18
- export default async function handler(req: NextApiRequest, res: NextApiResponse) {
19
- try {
20
- const { knowledgeRequest, imageBase64 } = req.body as KBlobRequest
21
-
22
- const formData = new FormData()
23
- formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest))
24
- if (imageBase64) {
25
- formData.append('imageBase64', imageBase64)
26
- }
27
-
28
- const response = await fetch(`${API_DOMAIN}/images/kblob`,
29
- {
30
- method: 'POST',
31
- body: formData.getBuffer(),
32
- headers: {
33
- "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"",
34
- "sec-ch-ua-mobile": "?0",
35
- "sec-ch-ua-platform": "\"Windows\"",
36
- "Referer": `${API_DOMAIN}/web/index.html`,
37
- "Referrer-Policy": "origin-when-cross-origin",
38
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
39
- ...formData.getHeaders()
40
- }
41
- }
42
- ).then(res => res.text())
43
-
44
- res.writeHead(200, {
45
- 'Content-Type': 'application/json',
46
- })
47
- res.end(response || JSON.stringify({ result: { value: 'UploadFailed', message: '请更换 IP 或代理后重试' } }))
48
- } catch (e) {
49
- return res.json({
50
- result: {
51
- value: 'UploadFailed',
52
- message: `${e}`
53
- }
54
- })
55
- }
56
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/raft/README.md DELETED
@@ -1,80 +0,0 @@
1
- # RAFT
2
- This repository contains the source code for our paper:
3
-
4
- [RAFT: Recurrent All Pairs Field Transforms for Optical Flow](https://arxiv.org/pdf/2003.12039.pdf)<br/>
5
- ECCV 2020 <br/>
6
- Zachary Teed and Jia Deng<br/>
7
-
8
- <img src="RAFT.png">
9
-
10
- ## Requirements
11
- The code has been tested with PyTorch 1.6 and Cuda 10.1.
12
- ```Shell
13
- conda create --name raft
14
- conda activate raft
15
- conda install pytorch=1.6.0 torchvision=0.7.0 cudatoolkit=10.1 matplotlib tensorboard scipy opencv -c pytorch
16
- ```
17
-
18
- ## Demos
19
- Pretrained models can be downloaded by running
20
- ```Shell
21
- ./download_models.sh
22
- ```
23
- or downloaded from [google drive](https://drive.google.com/drive/folders/1sWDsfuZ3Up38EUQt7-JDTT1HcGHuJgvT?usp=sharing)
24
-
25
- You can demo a trained model on a sequence of frames
26
- ```Shell
27
- python demo.py --model=models/raft-things.pth --path=demo-frames
28
- ```
29
-
30
- ## Required Data
31
- To evaluate/train RAFT, you will need to download the required datasets.
32
- * [FlyingChairs](https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs)
33
- * [FlyingThings3D](https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html)
34
- * [Sintel](http://sintel.is.tue.mpg.de/)
35
- * [KITTI](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow)
36
- * [HD1K](http://hci-benchmark.iwr.uni-heidelberg.de/) (optional)
37
-
38
-
39
- By default `datasets.py` will search for the datasets in these locations. You can create symbolic links to wherever the datasets were downloaded in the `datasets` folder
40
-
41
- ```Shell
42
- ├── datasets
43
- ├── Sintel
44
- ├── test
45
- ├── training
46
- ├── KITTI
47
- ├── testing
48
- ├── training
49
- ├── devkit
50
- ├── FlyingChairs_release
51
- ├── data
52
- ├── FlyingThings3D
53
- ├── frames_cleanpass
54
- ├── frames_finalpass
55
- ├── optical_flow
56
- ```
57
-
58
- ## Evaluation
59
- You can evaluate a trained model using `evaluate.py`
60
- ```Shell
61
- python evaluate.py --model=models/raft-things.pth --dataset=sintel --mixed_precision
62
- ```
63
-
64
- ## Training
65
- We used the following training schedule in our paper (2 GPUs). Training logs will be written to the `runs` which can be visualized using tensorboard
66
- ```Shell
67
- ./train_standard.sh
68
- ```
69
-
70
- If you have a RTX GPU, training can be accelerated using mixed precision. You can expect similiar results in this setting (1 GPU)
71
- ```Shell
72
- ./train_mixed.sh
73
- ```
74
-
75
- ## (Optional) Efficent Implementation
76
- You can optionally use our alternate (efficent) implementation by compiling the provided cuda extension
77
- ```Shell
78
- cd alt_cuda_corr && python setup.py install && cd ..
79
- ```
80
- and running `demo.py` and `evaluate.py` with the `--alternate_corr` flag Note, this implementation is somewhat slower than all-pairs, but uses significantly less GPU memory during the forward pass.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIARTCHAN/openpose_editor/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Openpose Editor
3
- emoji: 🤸
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: static
7
- pinned: false
8
- license: mit
9
- ---
10
-
11
- [원본글](https://arca.live/b/aiart/70172781)
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/visualize/simplify_loc2rot.py DELETED
@@ -1,131 +0,0 @@
1
- import numpy as np
2
- import os
3
- import torch
4
- from visualize.joints2smpl.src import config
5
- import smplx
6
- import h5py
7
- from visualize.joints2smpl.src.smplify import SMPLify3D
8
- from tqdm import tqdm
9
- import utils.rotation_conversions as geometry
10
- import argparse
11
-
12
-
13
- class joints2smpl:
14
-
15
- def __init__(self, num_frames, device_id, cuda=True):
16
- self.device = torch.device("cuda:" + str(device_id) if cuda else "cpu")
17
- # self.device = torch.device("cpu")
18
- self.batch_size = num_frames
19
- self.num_joints = 22 # for HumanML3D
20
- self.joint_category = "AMASS"
21
- self.num_smplify_iters = 150
22
- self.fix_foot = False
23
- print(config.SMPL_MODEL_DIR)
24
- smplmodel = smplx.create(config.SMPL_MODEL_DIR,
25
- model_type="smpl", gender="neutral", ext="pkl",
26
- batch_size=self.batch_size).to(self.device)
27
-
28
- # ## --- load the mean pose as original ----
29
- smpl_mean_file = config.SMPL_MEAN_FILE
30
-
31
- file = h5py.File(smpl_mean_file, 'r')
32
- self.init_mean_pose = torch.from_numpy(file['pose'][:]).unsqueeze(0).repeat(self.batch_size, 1).float().to(self.device)
33
- self.init_mean_shape = torch.from_numpy(file['shape'][:]).unsqueeze(0).repeat(self.batch_size, 1).float().to(self.device)
34
- self.cam_trans_zero = torch.Tensor([0.0, 0.0, 0.0]).unsqueeze(0).to(self.device)
35
- #
36
-
37
- # # #-------------initialize SMPLify
38
- self.smplify = SMPLify3D(smplxmodel=smplmodel,
39
- batch_size=self.batch_size,
40
- joints_category=self.joint_category,
41
- num_iters=self.num_smplify_iters,
42
- device=self.device)
43
-
44
-
45
- def npy2smpl(self, npy_path):
46
- out_path = npy_path.replace('.npy', '_rot.npy')
47
- motions = np.load(npy_path, allow_pickle=True)[None][0]
48
- # print_batch('', motions)
49
- n_samples = motions['motion'].shape[0]
50
- all_thetas = []
51
- for sample_i in tqdm(range(n_samples)):
52
- thetas, _ = self.joint2smpl(motions['motion'][sample_i].transpose(2, 0, 1)) # [nframes, njoints, 3]
53
- all_thetas.append(thetas.cpu().numpy())
54
- motions['motion'] = np.concatenate(all_thetas, axis=0)
55
- print('motions', motions['motion'].shape)
56
-
57
- print(f'Saving [{out_path}]')
58
- np.save(out_path, motions)
59
- exit()
60
-
61
-
62
-
63
- def joint2smpl(self, input_joints, init_params=None):
64
- _smplify = self.smplify # if init_params is None else self.smplify_fast
65
- pred_pose = torch.zeros(self.batch_size, 72).to(self.device)
66
- pred_betas = torch.zeros(self.batch_size, 10).to(self.device)
67
- pred_cam_t = torch.zeros(self.batch_size, 3).to(self.device)
68
- keypoints_3d = torch.zeros(self.batch_size, self.num_joints, 3).to(self.device)
69
-
70
- # run the whole seqs
71
- num_seqs = input_joints.shape[0]
72
-
73
-
74
- # joints3d = input_joints[idx] # *1.2 #scale problem [check first]
75
- keypoints_3d = torch.Tensor(input_joints).to(self.device).float()
76
-
77
- # if idx == 0:
78
- if init_params is None:
79
- pred_betas = self.init_mean_shape
80
- pred_pose = self.init_mean_pose
81
- pred_cam_t = self.cam_trans_zero
82
- else:
83
- pred_betas = init_params['betas']
84
- pred_pose = init_params['pose']
85
- pred_cam_t = init_params['cam']
86
-
87
- if self.joint_category == "AMASS":
88
- confidence_input = torch.ones(self.num_joints)
89
- # make sure the foot and ankle
90
- if self.fix_foot == True:
91
- confidence_input[7] = 1.5
92
- confidence_input[8] = 1.5
93
- confidence_input[10] = 1.5
94
- confidence_input[11] = 1.5
95
- else:
96
- print("Such category not settle down!")
97
-
98
- new_opt_vertices, new_opt_joints, new_opt_pose, new_opt_betas, \
99
- new_opt_cam_t, new_opt_joint_loss = _smplify(
100
- pred_pose.detach(),
101
- pred_betas.detach(),
102
- pred_cam_t.detach(),
103
- keypoints_3d,
104
- conf_3d=confidence_input.to(self.device),
105
- # seq_ind=idx
106
- )
107
-
108
- thetas = new_opt_pose.reshape(self.batch_size, 24, 3)
109
- thetas = geometry.matrix_to_rotation_6d(geometry.axis_angle_to_matrix(thetas)) # [bs, 24, 6]
110
- root_loc = torch.tensor(keypoints_3d[:, 0]) # [bs, 3]
111
- root_loc = torch.cat([root_loc, torch.zeros_like(root_loc)], dim=-1).unsqueeze(1) # [bs, 1, 6]
112
- thetas = torch.cat([thetas, root_loc], dim=1).unsqueeze(0).permute(0, 2, 3, 1) # [1, 25, 6, 196]
113
-
114
- return thetas.clone().detach(), {'pose': new_opt_joints[0, :24].flatten().clone().detach(), 'betas': new_opt_betas.clone().detach(), 'cam': new_opt_cam_t.clone().detach()}
115
-
116
-
117
- if __name__ == '__main__':
118
- parser = argparse.ArgumentParser()
119
- parser.add_argument("--input_path", type=str, required=True, help='Blender file or dir with blender files')
120
- parser.add_argument("--cuda", type=bool, default=True, help='')
121
- parser.add_argument("--device", type=int, default=0, help='')
122
- params = parser.parse_args()
123
-
124
- simplify = joints2smpl(device_id=params.device, cuda=params.cuda)
125
-
126
- if os.path.isfile(params.input_path) and params.input_path.endswith('.npy'):
127
- simplify.npy2smpl(params.input_path)
128
- elif os.path.isdir(params.input_path):
129
- files = [os.path.join(params.input_path, f) for f in os.listdir(params.input_path) if f.endswith('.npy')]
130
- for f in files:
131
- simplify.npy2smpl(f)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/light.py DELETED
@@ -1,385 +0,0 @@
1
- """Punctual light sources as defined by the glTF 2.0 KHR extension at
2
- https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Khronos/KHR_lights_punctual
3
-
4
- Author: Matthew Matl
5
- """
6
- import abc
7
- import numpy as np
8
- import six
9
-
10
- from OpenGL.GL import *
11
-
12
- from .utils import format_color_vector
13
- from .texture import Texture
14
- from .constants import SHADOW_TEX_SZ
15
- from .camera import OrthographicCamera, PerspectiveCamera
16
-
17
-
18
-
19
- @six.add_metaclass(abc.ABCMeta)
20
- class Light(object):
21
- """Base class for all light objects.
22
-
23
- Parameters
24
- ----------
25
- color : (3,) float
26
- RGB value for the light's color in linear space.
27
- intensity : float
28
- Brightness of light. The units that this is defined in depend on the
29
- type of light. Point and spot lights use luminous intensity in candela
30
- (lm/sr), while directional lights use illuminance in lux (lm/m2).
31
- name : str, optional
32
- Name of the light.
33
- """
34
- def __init__(self,
35
- color=None,
36
- intensity=None,
37
- name=None):
38
-
39
- if color is None:
40
- color = np.ones(3)
41
- if intensity is None:
42
- intensity = 1.0
43
-
44
- self.name = name
45
- self.color = color
46
- self.intensity = intensity
47
- self._shadow_camera = None
48
- self._shadow_texture = None
49
-
50
- @property
51
- def name(self):
52
- """str : The user-defined name of this object.
53
- """
54
- return self._name
55
-
56
- @name.setter
57
- def name(self, value):
58
- if value is not None:
59
- value = str(value)
60
- self._name = value
61
-
62
- @property
63
- def color(self):
64
- """(3,) float : The light's color.
65
- """
66
- return self._color
67
-
68
- @color.setter
69
- def color(self, value):
70
- self._color = format_color_vector(value, 3)
71
-
72
- @property
73
- def intensity(self):
74
- """float : The light's intensity in candela or lux.
75
- """
76
- return self._intensity
77
-
78
- @intensity.setter
79
- def intensity(self, value):
80
- self._intensity = float(value)
81
-
82
- @property
83
- def shadow_texture(self):
84
- """:class:`.Texture` : A texture used to hold shadow maps for this light.
85
- """
86
- return self._shadow_texture
87
-
88
- @shadow_texture.setter
89
- def shadow_texture(self, value):
90
- if self._shadow_texture is not None:
91
- if self._shadow_texture._in_context():
92
- self._shadow_texture.delete()
93
- self._shadow_texture = value
94
-
95
- @abc.abstractmethod
96
- def _generate_shadow_texture(self, size=None):
97
- """Generate a shadow texture for this light.
98
-
99
- Parameters
100
- ----------
101
- size : int, optional
102
- Size of texture map. Must be a positive power of two.
103
- """
104
- pass
105
-
106
- @abc.abstractmethod
107
- def _get_shadow_camera(self, scene_scale):
108
- """Generate and return a shadow mapping camera for this light.
109
-
110
- Parameters
111
- ----------
112
- scene_scale : float
113
- Length of scene's bounding box diagonal.
114
-
115
- Returns
116
- -------
117
- camera : :class:`.Camera`
118
- The camera used to render shadowmaps for this light.
119
- """
120
- pass
121
-
122
-
123
- class DirectionalLight(Light):
124
- """Directional lights are light sources that act as though they are
125
- infinitely far away and emit light in the direction of the local -z axis.
126
- This light type inherits the orientation of the node that it belongs to;
127
- position and scale are ignored except for their effect on the inherited
128
- node orientation. Because it is at an infinite distance, the light is
129
- not attenuated. Its intensity is defined in lumens per metre squared,
130
- or lux (lm/m2).
131
-
132
- Parameters
133
- ----------
134
- color : (3,) float, optional
135
- RGB value for the light's color in linear space. Defaults to white
136
- (i.e. [1.0, 1.0, 1.0]).
137
- intensity : float, optional
138
- Brightness of light, in lux (lm/m^2). Defaults to 1.0
139
- name : str, optional
140
- Name of the light.
141
- """
142
-
143
- def __init__(self,
144
- color=None,
145
- intensity=None,
146
- name=None):
147
- super(DirectionalLight, self).__init__(
148
- color=color,
149
- intensity=intensity,
150
- name=name,
151
- )
152
-
153
- def _generate_shadow_texture(self, size=None):
154
- """Generate a shadow texture for this light.
155
-
156
- Parameters
157
- ----------
158
- size : int, optional
159
- Size of texture map. Must be a positive power of two.
160
- """
161
- if size is None:
162
- size = SHADOW_TEX_SZ
163
- self.shadow_texture = Texture(width=size, height=size,
164
- source_channels='D', data_format=GL_FLOAT)
165
-
166
- def _get_shadow_camera(self, scene_scale):
167
- """Generate and return a shadow mapping camera for this light.
168
-
169
- Parameters
170
- ----------
171
- scene_scale : float
172
- Length of scene's bounding box diagonal.
173
-
174
- Returns
175
- -------
176
- camera : :class:`.Camera`
177
- The camera used to render shadowmaps for this light.
178
- """
179
- return OrthographicCamera(
180
- znear=0.01 * scene_scale,
181
- zfar=10 * scene_scale,
182
- xmag=scene_scale,
183
- ymag=scene_scale
184
- )
185
-
186
-
187
- class PointLight(Light):
188
- """Point lights emit light in all directions from their position in space;
189
- rotation and scale are ignored except for their effect on the inherited
190
- node position. The brightness of the light attenuates in a physically
191
- correct manner as distance increases from the light's position (i.e.
192
- brightness goes like the inverse square of the distance). Point light
193
- intensity is defined in candela, which is lumens per square radian (lm/sr).
194
-
195
- Parameters
196
- ----------
197
- color : (3,) float
198
- RGB value for the light's color in linear space.
199
- intensity : float
200
- Brightness of light in candela (lm/sr).
201
- range : float
202
- Cutoff distance at which light's intensity may be considered to
203
- have reached zero. If None, the range is assumed to be infinite.
204
- name : str, optional
205
- Name of the light.
206
- """
207
-
208
- def __init__(self,
209
- color=None,
210
- intensity=None,
211
- range=None,
212
- name=None):
213
- super(PointLight, self).__init__(
214
- color=color,
215
- intensity=intensity,
216
- name=name,
217
- )
218
- self.range = range
219
-
220
- @property
221
- def range(self):
222
- """float : The cutoff distance for the light.
223
- """
224
- return self._range
225
-
226
- @range.setter
227
- def range(self, value):
228
- if value is not None:
229
- value = float(value)
230
- if value <= 0:
231
- raise ValueError('Range must be > 0')
232
- self._range = value
233
- self._range = value
234
-
235
- def _generate_shadow_texture(self, size=None):
236
- """Generate a shadow texture for this light.
237
-
238
- Parameters
239
- ----------
240
- size : int, optional
241
- Size of texture map. Must be a positive power of two.
242
- """
243
- raise NotImplementedError('Shadows not implemented for point lights')
244
-
245
- def _get_shadow_camera(self, scene_scale):
246
- """Generate and return a shadow mapping camera for this light.
247
-
248
- Parameters
249
- ----------
250
- scene_scale : float
251
- Length of scene's bounding box diagonal.
252
-
253
- Returns
254
- -------
255
- camera : :class:`.Camera`
256
- The camera used to render shadowmaps for this light.
257
- """
258
- raise NotImplementedError('Shadows not implemented for point lights')
259
-
260
-
261
- class SpotLight(Light):
262
- """Spot lights emit light in a cone in the direction of the local -z axis.
263
- The angle and falloff of the cone is defined using two numbers, the
264
- ``innerConeAngle`` and ``outerConeAngle``.
265
- As with point lights, the brightness
266
- also attenuates in a physically correct manner as distance increases from
267
- the light's position (i.e. brightness goes like the inverse square of the
268
- distance). Spot light intensity refers to the brightness inside the
269
- ``innerConeAngle`` (and at the location of the light) and is defined in
270
- candela, which is lumens per square radian (lm/sr). A spot light's position
271
- and orientation are inherited from its node transform. Inherited scale does
272
- not affect cone shape, and is ignored except for its effect on position
273
- and orientation.
274
-
275
- Parameters
276
- ----------
277
- color : (3,) float
278
- RGB value for the light's color in linear space.
279
- intensity : float
280
- Brightness of light in candela (lm/sr).
281
- range : float
282
- Cutoff distance at which light's intensity may be considered to
283
- have reached zero. If None, the range is assumed to be infinite.
284
- innerConeAngle : float
285
- Angle, in radians, from centre of spotlight where falloff begins.
286
- Must be greater than or equal to ``0`` and less
287
- than ``outerConeAngle``. Defaults to ``0``.
288
- outerConeAngle : float
289
- Angle, in radians, from centre of spotlight where falloff ends.
290
- Must be greater than ``innerConeAngle`` and less than or equal to
291
- ``PI / 2.0``. Defaults to ``PI / 4.0``.
292
- name : str, optional
293
- Name of the light.
294
- """
295
-
296
- def __init__(self,
297
- color=None,
298
- intensity=None,
299
- range=None,
300
- innerConeAngle=0.0,
301
- outerConeAngle=(np.pi / 4.0),
302
- name=None):
303
- super(SpotLight, self).__init__(
304
- name=name,
305
- color=color,
306
- intensity=intensity,
307
- )
308
- self.outerConeAngle = outerConeAngle
309
- self.innerConeAngle = innerConeAngle
310
- self.range = range
311
-
312
- @property
313
- def innerConeAngle(self):
314
- """float : The inner cone angle in radians.
315
- """
316
- return self._innerConeAngle
317
-
318
- @innerConeAngle.setter
319
- def innerConeAngle(self, value):
320
- if value < 0.0 or value > self.outerConeAngle:
321
- raise ValueError('Invalid value for inner cone angle')
322
- self._innerConeAngle = float(value)
323
-
324
- @property
325
- def outerConeAngle(self):
326
- """float : The outer cone angle in radians.
327
- """
328
- return self._outerConeAngle
329
-
330
- @outerConeAngle.setter
331
- def outerConeAngle(self, value):
332
- if value < 0.0 or value > np.pi / 2.0 + 1e-9:
333
- raise ValueError('Invalid value for outer cone angle')
334
- self._outerConeAngle = float(value)
335
-
336
- @property
337
- def range(self):
338
- """float : The cutoff distance for the light.
339
- """
340
- return self._range
341
-
342
- @range.setter
343
- def range(self, value):
344
- if value is not None:
345
- value = float(value)
346
- if value <= 0:
347
- raise ValueError('Range must be > 0')
348
- self._range = value
349
- self._range = value
350
-
351
- def _generate_shadow_texture(self, size=None):
352
- """Generate a shadow texture for this light.
353
-
354
- Parameters
355
- ----------
356
- size : int, optional
357
- Size of texture map. Must be a positive power of two.
358
- """
359
- if size is None:
360
- size = SHADOW_TEX_SZ
361
- self.shadow_texture = Texture(width=size, height=size,
362
- source_channels='D', data_format=GL_FLOAT)
363
-
364
- def _get_shadow_camera(self, scene_scale):
365
- """Generate and return a shadow mapping camera for this light.
366
-
367
- Parameters
368
- ----------
369
- scene_scale : float
370
- Length of scene's bounding box diagonal.
371
-
372
- Returns
373
- -------
374
- camera : :class:`.Camera`
375
- The camera used to render shadowmaps for this light.
376
- """
377
- return PerspectiveCamera(
378
- znear=0.01 * scene_scale,
379
- zfar=10 * scene_scale,
380
- yfov=np.clip(2 * self.outerConeAngle + np.pi / 16.0, 0.0, np.pi),
381
- aspectRatio=1.0
382
- )
383
-
384
-
385
- __all__ = ['Light', 'DirectionalLight', 'SpotLight', 'PointLight']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/sound_extraction/model/text_encoder.py DELETED
@@ -1,45 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from transformers import *
4
- import warnings
5
- warnings.filterwarnings('ignore')
6
- # pretrained model name: (model class, model tokenizer, output dimension, token style)
7
- MODELS = {
8
- 'prajjwal1/bert-mini': (BertModel, BertTokenizer),
9
- }
10
-
11
- class Text_Encoder(nn.Module):
12
- def __init__(self, device):
13
- super(Text_Encoder, self).__init__()
14
- self.base_model = 'prajjwal1/bert-mini'
15
- self.dropout = 0.1
16
-
17
- self.tokenizer = MODELS[self.base_model][1].from_pretrained(self.base_model)
18
-
19
- self.bert_layer = MODELS[self.base_model][0].from_pretrained(self.base_model,
20
- add_pooling_layer=False,
21
- hidden_dropout_prob=self.dropout,
22
- attention_probs_dropout_prob=self.dropout,
23
- output_hidden_states=True)
24
-
25
- self.linear_layer = nn.Sequential(nn.Linear(256, 256), nn.ReLU(inplace=True))
26
-
27
- self.device = device
28
-
29
- def tokenize(self, caption):
30
- # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
31
- tokenized = self.tokenizer(caption, add_special_tokens=False, padding=True, return_tensors='pt')
32
- input_ids = tokenized['input_ids']
33
- attns_mask = tokenized['attention_mask']
34
-
35
- input_ids = input_ids.to(self.device)
36
- attns_mask = attns_mask.to(self.device)
37
- return input_ids, attns_mask
38
-
39
- def forward(self, input_ids, attns_mask):
40
- # input_ids, attns_mask = self.tokenize(caption)
41
- output = self.bert_layer(input_ids=input_ids, attention_mask=attns_mask)[0]
42
- cls_embed = output[:, 0, :]
43
- text_embed = self.linear_layer(cls_embed)
44
-
45
- return text_embed, output # text_embed: (batch, hidden_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Software_Company/src/agents/utils.py DELETED
@@ -1,480 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The AIWaves Inc. team.
3
-
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """helper functions for an LLM autonoumous agent"""
17
- import csv
18
- import random
19
- import json
20
- import pandas
21
- import numpy as np
22
- import requests
23
- import torch
24
- from tqdm import tqdm
25
- from text2vec import semantic_search
26
- import re
27
- import datetime
28
- from langchain.document_loaders import UnstructuredFileLoader
29
- from langchain.text_splitter import CharacterTextSplitter
30
- from sentence_transformers import SentenceTransformer
31
- import string
32
- import random
33
- import os
34
- import openai
35
-
36
- embed_model_name = os.environ["Embed_Model"] if "Embed_Model" in os.environ else "text-embedding-ada-002"
37
- if embed_model_name in ["text-embedding-ada-002"]:
38
- pass
39
- else:
40
- embedding_model = SentenceTransformer(
41
- embed_model_name, device=torch.device("cpu")
42
- )
43
-
44
- def get_embedding(sentence):
45
- if embed_model_name in ["text-embedding-ada-002"]:
46
- openai.api_key = os.environ["API_KEY"]
47
- # if "PROXY" in os.environ:
48
- # assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
49
- # openai.proxy = os.environ["PROXY"]
50
- if "API_BASE" in os.environ:
51
- openai.api_base = os.environ["API_BASE"]
52
- embedding_model = openai.Embedding
53
- embed = embedding_model.create(
54
- model=embed_model_name,
55
- input=sentence
56
- )
57
- embed = embed["data"][0]["embedding"]
58
- embed = torch.tensor(embed,dtype=torch.float32)
59
- else:
60
- embed = embedding_model.encode(sentence,convert_to_tensor=True)
61
- if len(embed.shape)==1:
62
- embed = embed.unsqueeze(0)
63
- return embed
64
-
65
-
66
- def get_code():
67
- return "".join(random.sample(string.ascii_letters + string.digits, 8))
68
-
69
-
70
- def get_content_between_a_b(start_tag, end_tag, text):
71
- """
72
-
73
- Args:
74
- start_tag (str): start_tag
75
- end_tag (str): end_tag
76
- text (str): complete sentence
77
-
78
- Returns:
79
- str: the content between start_tag and end_tag
80
- """
81
- extracted_text = ""
82
- start_index = text.find(start_tag)
83
- while start_index != -1:
84
- end_index = text.find(end_tag, start_index + len(start_tag))
85
- if end_index != -1:
86
- extracted_text += text[start_index +
87
- len(start_tag):end_index] + " "
88
- start_index = text.find(start_tag, end_index + len(end_tag))
89
- else:
90
- break
91
-
92
- return extracted_text.strip()
93
-
94
-
95
- def extract(text, type):
96
- """extract the content between <type></type>
97
-
98
- Args:
99
- text (str): complete sentence
100
- type (str): tag
101
-
102
- Returns:
103
- str: content between <type></type>
104
- """
105
- target_str = get_content_between_a_b(f"<{type}>", f"</{type}>", text)
106
- return target_str
107
-
108
- def count_files_in_directory(directory):
109
- # 获取指定目录下的文件数目
110
- file_count = len([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])
111
- return file_count
112
-
113
- def delete_oldest_files(directory, num_to_keep):
114
- # 获取目录下文件列表,并按修改时间排序
115
- files = [(f, os.path.getmtime(os.path.join(directory, f))) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
116
-
117
- # 删除最开始的 num_to_keep 个文件
118
- for i in range(min(num_to_keep, len(files))):
119
- file_to_delete = os.path.join(directory, files[i][0])
120
- os.remove(file_to_delete)
121
-
122
- def delete_files_if_exceed_threshold(directory, threshold, num_to_keep):
123
- # 获取文件数目并进行处理
124
- file_count = count_files_in_directory(directory)
125
- if file_count > threshold:
126
- delete_count = file_count - num_to_keep
127
- delete_oldest_files(directory, delete_count)
128
-
129
- def save_logs(log_path, messages, response):
130
- if not os.path.exists(log_path):
131
- os.mkdir(log_path)
132
- delete_files_if_exceed_threshold(log_path, 20, 10)
133
- log_path = log_path if log_path else "logs"
134
- log = {}
135
- log["input"] = messages
136
- log["output"] = response
137
- os.makedirs(log_path, exist_ok=True)
138
- log_file = os.path.join(
139
- log_path,
140
- datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + ".json")
141
- with open(log_file, "w", encoding="utf-8") as f:
142
- json.dump(log, f, ensure_ascii=False, indent=2)
143
-
144
-
145
-
146
- def semantic_search_word2vec(query_embedding, kb_embeddings, top_k):
147
- return semantic_search(query_embedding, kb_embeddings, top_k=top_k)
148
-
149
-
150
- def cut_sent(para):
151
- para = re.sub("([。!?\?])([^”’])", r"\1\n\2", para)
152
- para = re.sub("(\.{6})([^”’])", r"\1\n\2", para)
153
- para = re.sub("(\…{2})([^”’])", r"\1\n\2", para)
154
- para = re.sub("([。!?\?][”’])([^,。!?\?])", r"\1\n\2", para)
155
- para = para.rstrip()
156
- pieces = [i for i in para.split("\n") if i]
157
- batch_size = 3
158
- chucks = [
159
- " ".join(pieces[i:i + batch_size])
160
- for i in range(0, len(pieces), batch_size)
161
- ]
162
- return chucks
163
-
164
-
165
- def process_document(file_path):
166
- """
167
- Save QA_csv to json.
168
- Args:
169
- model: LLM to generate embeddings
170
- qa_dict: A dict contains Q&A
171
- save_path: where to save the json file.
172
- Json format:
173
- Dict[num,Dict[q:str,a:str,chunk:str,emb:List[float]]
174
- """
175
- final_dict = {}
176
- count = 0
177
- if file_path.endswith(".csv"):
178
- dataset = pandas.read_csv(file_path)
179
- questions = dataset["question"]
180
- answers = dataset["answer"]
181
- # embedding q+chunk
182
- for q, a in zip(questions, answers):
183
- for text in cut_sent(a):
184
- temp_dict = {}
185
- temp_dict["q"] = q
186
- temp_dict["a"] = a
187
- temp_dict["chunk"] = text
188
- temp_dict["emb"] = get_embedding(q + text).tolist()
189
- final_dict[count] = temp_dict
190
- count += 1
191
- # embedding chunk
192
- for q, a in zip(questions, answers):
193
- for text in cut_sent(a):
194
- temp_dict = {}
195
- temp_dict["q"] = q
196
- temp_dict["a"] = a
197
- temp_dict["chunk"] = text
198
- temp_dict["emb"] = get_embedding(text).tolist()
199
- final_dict[count] = temp_dict
200
- count += 1
201
- # embedding q
202
- for q, a in zip(questions, answers):
203
- temp_dict = {}
204
- temp_dict["q"] = q
205
- temp_dict["a"] = a
206
- temp_dict["chunk"] = a
207
- temp_dict["emb"] = get_embedding(q).tolist()
208
- final_dict[count] = temp_dict
209
- count += 1
210
- # embedding q+a
211
- for q, a in zip(questions, answers):
212
- temp_dict = {}
213
- temp_dict["q"] = q
214
- temp_dict["a"] = a
215
- temp_dict["chunk"] = a
216
- temp_dict["emb"] = get_embedding(q + a).tolist()
217
- final_dict[count] = temp_dict
218
- count += 1
219
- # embedding a
220
- for q, a in zip(questions, answers):
221
- temp_dict = {}
222
- temp_dict["q"] = q
223
- temp_dict["a"] = a
224
- temp_dict["chunk"] = a
225
- temp_dict["emb"] = get_embedding(a).tolist()
226
- final_dict[count] = temp_dict
227
- count += 1
228
- print(f"finish updating {len(final_dict)} data!")
229
- os.makedirs("temp_database", exist_ok=True)
230
- save_path = os.path.join(
231
- "temp_database/",
232
- file_path.split("/")[-1].replace("." + file_path.split(".")[1],
233
- ".json"),
234
- )
235
- print(save_path)
236
- with open(save_path, "w") as f:
237
- json.dump(final_dict, f, ensure_ascii=False, indent=2)
238
- return {"knowledge_base": save_path, "type": "QA"}
239
- else:
240
- loader = UnstructuredFileLoader(file_path)
241
- docs = loader.load()
242
- text_spiltter = CharacterTextSplitter(chunk_size=200,
243
- chunk_overlap=100)
244
- docs = text_spiltter.split_text(docs[0].page_content)
245
- os.makedirs("temp_database", exist_ok=True)
246
- save_path = os.path.join(
247
- "temp_database/",
248
- file_path.replace("." + file_path.split(".")[1], ".json"))
249
- final_dict = {}
250
- count = 0
251
- for c in tqdm(docs):
252
- temp_dict = {}
253
- temp_dict["chunk"] = c
254
- temp_dict["emb"] = get_embedding(c).tolist()
255
- final_dict[count] = temp_dict
256
- count += 1
257
- print(f"finish updating {len(final_dict)} data!")
258
- with open(save_path, "w") as f:
259
- json.dump(final_dict, f, ensure_ascii=False, indent=2)
260
- return {"knowledge_base": save_path, "type": "UnstructuredFile"}
261
-
262
- def load_knowledge_base_qa(path):
263
- """
264
- Load json format knowledge base.
265
- """
266
- print("path", path)
267
- with open(path, "r") as f:
268
- data = json.load(f)
269
- embeddings = []
270
- questions = []
271
- answers = []
272
- chunks = []
273
- for idx in range(len(data.keys())):
274
- embeddings.append(data[str(idx)]["emb"])
275
- questions.append(data[str(idx)]["q"])
276
- answers.append(data[str(idx)]["a"])
277
- chunks.append(data[str(idx)]["chunk"])
278
- embeddings = np.array(embeddings, dtype=np.float32)
279
- embeddings = torch.from_numpy(embeddings).squeeze()
280
- return embeddings, questions, answers, chunks
281
-
282
-
283
- def load_knowledge_base_UnstructuredFile(path):
284
- """
285
- Load json format knowledge base.
286
- """
287
- with open(path, "r") as f:
288
- data = json.load(f)
289
- embeddings = []
290
- chunks = []
291
- for idx in range(len(data.keys())):
292
- embeddings.append(data[str(idx)]["emb"])
293
- chunks.append(data[str(idx)]["chunk"])
294
- embeddings = np.array(embeddings, dtype=np.float32)
295
- embeddings = torch.from_numpy(embeddings).squeeze()
296
- return embeddings, chunks
297
-
298
-
299
- def cos_sim(a: torch.Tensor, b: torch.Tensor):
300
- """
301
- Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
302
- :return: Matrix with res[i][j] = cos_sim(a[i], b[j])
303
- """
304
- if not isinstance(a, torch.Tensor):
305
- a = torch.tensor(a)
306
-
307
- if not isinstance(b, torch.Tensor):
308
- b = torch.tensor(b)
309
-
310
- if len(a.shape) == 1:
311
- a = a.unsqueeze(0)
312
-
313
- if len(b.shape) == 1:
314
- b = b.unsqueeze(0)
315
-
316
- a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
317
- b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
318
- return torch.mm(a_norm, b_norm.transpose(0, 1))
319
-
320
-
321
- def matching_a_b(a, b, requirements=None):
322
- a_embedder = get_embedding(a)
323
- # 获取embedder
324
- b_embeder = get_embedding(b)
325
- sim_scores = cos_sim(a_embedder, b_embeder)[0]
326
- return sim_scores
327
-
328
-
329
- def matching_category(inputtext,
330
- forest_name,
331
- requirements=None,
332
- cat_embedder=None,
333
- top_k=3):
334
- """
335
- Args:
336
- inputtext: the category name to be matched
337
- forest: search tree
338
- top_k: the default three highest scoring results
339
- Return:
340
- topk matching_result. List[List] [[top1_name,top2_name,top3_name],[top1_score,top2_score,top3_score]]
341
- """
342
-
343
- sim_scores = torch.zeros([100])
344
- if inputtext:
345
- input_embeder = get_embedding(inputtext)
346
- sim_scores = cos_sim(input_embeder, cat_embedder)[0]
347
-
348
- if requirements:
349
- requirements = requirements.split(" ")
350
- requirements_embedder = get_embedding(requirements)
351
- req_scores = cos_sim(requirements_embedder, cat_embedder)
352
- req_scores = torch.mean(req_scores, dim=0)
353
- total_scores = req_scores
354
- else:
355
- total_scores = sim_scores
356
-
357
- top_k_cat = torch.topk(total_scores, k=top_k)
358
- top_k_score, top_k_idx = top_k_cat[0], top_k_cat[1]
359
- top_k_name = [forest_name[top_k_idx[i]] for i in range(0, top_k)]
360
-
361
- return [top_k_name, top_k_score.tolist(), top_k_idx]
362
-
363
-
364
- def sample_with_order_preserved(lst, num):
365
- """Randomly sample from the list while maintaining the original order."""
366
- indices = list(range(len(lst)))
367
- sampled_indices = random.sample(indices, num)
368
- sampled_indices.sort() # 保持原顺序
369
- return [lst[i] for i in sampled_indices]
370
-
371
-
372
- def limit_values(data, max_values):
373
- """Reduce each key-value list in the dictionary to the specified size, keeping the order of the original list unchanged."""
374
- for key, values in data.items():
375
- if len(values) > max_values:
376
- data[key] = sample_with_order_preserved(values, max_values)
377
- return data
378
-
379
-
380
- def limit_keys(data, max_keys):
381
- """Reduce the dictionary to the specified number of keys."""
382
- keys = list(data.keys())
383
- if len(keys) > max_keys:
384
- keys = sample_with_order_preserved(keys, max_keys)
385
- data = {key: data[key] for key in keys}
386
- return data
387
-
388
-
389
- def flatten_dict(nested_dict):
390
- """
391
- flatten the dictionary
392
- """
393
- flattened_dict = {}
394
- for key, value in nested_dict.items():
395
- if isinstance(value, dict):
396
- flattened_subdict = flatten_dict(value)
397
- flattened_dict.update(flattened_subdict)
398
- else:
399
- flattened_dict[key] = value
400
- return flattened_dict
401
-
402
-
403
- def merge_list(list1, list2):
404
- for l in list2:
405
- if l not in list1:
406
- list1.append(l)
407
- return list1
408
-
409
-
410
- def Search_Engines(req):
411
- FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
412
-
413
- new_dict = {"keyword": req, "catLeafName": "", "fetchSize": FETSIZE}
414
- url = os.environ["SHOPPING_SEARCH"]
415
- res = requests.post(
416
- url= url,
417
- json=new_dict,
418
- )
419
- user_dict = json.loads(res.text)
420
- if "data" in user_dict.keys():
421
- request_items = user_dict["data"]["items"] # 查询到的商品信息JSON
422
- top_category = user_dict["data"]["topCategories"]
423
- return request_items, top_category
424
- else:
425
- return []
426
-
427
-
428
- def search_with_api(requirements, categery):
429
-
430
- FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
431
-
432
- request_items = []
433
- all_req_list = requirements.split(" ")
434
- count = 0
435
-
436
- while len(request_items) < FETSIZE and len(all_req_list) > 0:
437
- if count:
438
- all_req_list.pop(0)
439
- all_req = (" ").join(all_req_list)
440
- if categery not in all_req_list:
441
- all_req = all_req + " " + categery
442
- now_request_items, top_category = Search_Engines(all_req)
443
- request_items = merge_list(request_items, now_request_items)
444
- count += 1
445
- new_top = []
446
- for category in top_category:
447
- if "其它" in category or "其它" in category:
448
- continue
449
- else:
450
- new_top.append(category)
451
- if len(request_items) > FETSIZE:
452
- request_items = request_items[:FETSIZE]
453
- return request_items, new_top
454
-
455
-
456
-
457
- def get_relevant_history(query,history,embeddings):
458
- """
459
- Retrieve a list of key history entries based on a query using semantic search.
460
-
461
- Args:
462
- query (str): The input query for which key history is to be retrieved.
463
- history (list): A list of historical key entries.
464
- embeddings (numpy.ndarray): An array of embedding vectors for historical entries.
465
-
466
- Returns:
467
- list: A list of key history entries most similar to the query.
468
- """
469
- TOP_K = eval(os.environ["TOP_K"]) if "TOP_K" in os.environ else 2
470
- relevant_history = []
471
- query_embedding = get_embedding(query)
472
- hits = semantic_search(query_embedding, embeddings, top_k=min(TOP_K,embeddings.shape[0]))
473
- hits = hits[0]
474
- for hit in hits:
475
- matching_idx = hit["corpus_id"]
476
- try:
477
- relevant_history.append(history[matching_idx])
478
- except:
479
- return []
480
- return relevant_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/circlemaskimage/Factory.d.ts DELETED
@@ -1,9 +0,0 @@
1
- import CircleMaskImage from './CircleMaskImage';
2
-
3
- export default function (
4
- x?: number, y?: number,
5
- key?: string, frame?: string,
6
- config?:
7
- null | 0 | 1 | 2 | 'circle' | 'ellipse' | 'roundRectangle' |
8
- CircleMaskImage.IConfig
9
- ): CircleMaskImage;
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/lineprogress/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import LineProgress from './LineProgress.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('lineProgress', function (x, y, width, height, barColor, value, config) {
6
- var gameObject = new LineProgress(this.scene, x, y, width, height, barColor, value, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.LineProgress', LineProgress);
12
-
13
- export default LineProgress;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkashKhamkar/Job_Search_Engine/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Job Search Engine
3
- emoji: 🌖
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.15.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/custom_diffusion.md DELETED
@@ -1,303 +0,0 @@
1
- <!--Copyright 2023 Custom Diffusion authors The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Custom Diffusion training example
14
-
15
- [Custom Diffusion](https://arxiv.org/abs/2212.04488) is a method to customize text-to-image models like Stable Diffusion given just a few (4~5) images of a subject.
16
- The `train_custom_diffusion.py` script shows how to implement the training procedure and adapt it for stable diffusion.
17
-
18
- This training example was contributed by [Nupur Kumari](https://nupurkmr9.github.io/) (one of the authors of Custom Diffusion).
19
-
20
- ## Running locally with PyTorch
21
-
22
- ### Installing the dependencies
23
-
24
- Before running the scripts, make sure to install the library's training dependencies:
25
-
26
- **Important**
27
-
28
- To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
29
-
30
- ```bash
31
- git clone https://github.com/huggingface/diffusers
32
- cd diffusers
33
- pip install -e .
34
- ```
35
-
36
- Then cd into the [example folder](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion)
37
-
38
- ```
39
- cd examples/custom_diffusion
40
- ```
41
-
42
- Now run
43
-
44
- ```bash
45
- pip install -r requirements.txt
46
- pip install clip-retrieval
47
- ```
48
-
49
- And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
50
-
51
- ```bash
52
- accelerate config
53
- ```
54
-
55
- Or for a default accelerate configuration without answering questions about your environment
56
-
57
- ```bash
58
- accelerate config default
59
- ```
60
-
61
- Or if your environment doesn't support an interactive shell e.g. a notebook
62
-
63
- ```python
64
- from accelerate.utils import write_basic_config
65
-
66
- write_basic_config()
67
- ```
68
- ### Cat example 😺
69
-
70
- Now let's get our dataset. Download dataset from [here](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip) and unzip it. To use your own dataset, take a look at the [Create a dataset for training](create_dataset) guide.
71
-
72
- We also collect 200 real images using `clip-retrieval` which are combined with the target images in the training dataset as a regularization. This prevents overfitting to the the given target image. The following flags enable the regularization `with_prior_preservation`, `real_prior` with `prior_loss_weight=1.`.
73
- The `class_prompt` should be the category name same as target image. The collected real images are with text captions similar to the `class_prompt`. The retrieved image are saved in `class_data_dir`. You can disable `real_prior` to use generated images as regularization. To collect the real images use this command first before training.
74
-
75
- ```bash
76
- pip install clip-retrieval
77
- python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200
78
- ```
79
-
80
- **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
81
-
82
- The script creates and saves model checkpoints and a `pytorch_custom_diffusion_weights.bin` file in your repository.
83
-
84
- ```bash
85
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
86
- export OUTPUT_DIR="path-to-save-model"
87
- export INSTANCE_DIR="./data/cat"
88
-
89
- accelerate launch train_custom_diffusion.py \
90
- --pretrained_model_name_or_path=$MODEL_NAME \
91
- --instance_data_dir=$INSTANCE_DIR \
92
- --output_dir=$OUTPUT_DIR \
93
- --class_data_dir=./real_reg/samples_cat/ \
94
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
95
- --class_prompt="cat" --num_class_images=200 \
96
- --instance_prompt="photo of a <new1> cat" \
97
- --resolution=512 \
98
- --train_batch_size=2 \
99
- --learning_rate=1e-5 \
100
- --lr_warmup_steps=0 \
101
- --max_train_steps=250 \
102
- --scale_lr --hflip \
103
- --modifier_token "<new1>" \
104
- --push_to_hub
105
- ```
106
-
107
- **Use `--enable_xformers_memory_efficient_attention` for faster training with lower VRAM requirement (16GB per GPU). Follow [this guide](https://github.com/facebookresearch/xformers) for installation instructions.**
108
-
109
- To track your experiments using Weights and Biases (`wandb`) and to save intermediate results (whcih we HIGHLY recommend), follow these steps:
110
-
111
- * Install `wandb`: `pip install wandb`.
112
- * Authorize: `wandb login`.
113
- * Then specify a `validation_prompt` and set `report_to` to `wandb` while launching training. You can also configure the following related arguments:
114
- * `num_validation_images`
115
- * `validation_steps`
116
-
117
- Here is an example command:
118
-
119
- ```bash
120
- accelerate launch train_custom_diffusion.py \
121
- --pretrained_model_name_or_path=$MODEL_NAME \
122
- --instance_data_dir=$INSTANCE_DIR \
123
- --output_dir=$OUTPUT_DIR \
124
- --class_data_dir=./real_reg/samples_cat/ \
125
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
126
- --class_prompt="cat" --num_class_images=200 \
127
- --instance_prompt="photo of a <new1> cat" \
128
- --resolution=512 \
129
- --train_batch_size=2 \
130
- --learning_rate=1e-5 \
131
- --lr_warmup_steps=0 \
132
- --max_train_steps=250 \
133
- --scale_lr --hflip \
134
- --modifier_token "<new1>" \
135
- --validation_prompt="<new1> cat sitting in a bucket" \
136
- --report_to="wandb" \
137
- --push_to_hub
138
- ```
139
-
140
- Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau) where you can check out the intermediate results along with other training details.
141
-
142
- If you specify `--push_to_hub`, the learned parameters will be pushed to a repository on the Hugging Face Hub. Here is an [example repository](https://huggingface.co/sayakpaul/custom-diffusion-cat).
143
-
144
- ### Training on multiple concepts 🐱🪵
145
-
146
- Provide a [json](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with the info about each concept, similar to [this](https://github.com/ShivamShrirao/diffusers/blob/main/examples/dreambooth/train_dreambooth.py).
147
-
148
- To collect the real images run this command for each concept in the json file.
149
-
150
- ```bash
151
- pip install clip-retrieval
152
- python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200
153
- ```
154
-
155
- And then we're ready to start training!
156
-
157
- ```bash
158
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
159
- export OUTPUT_DIR="path-to-save-model"
160
-
161
- accelerate launch train_custom_diffusion.py \
162
- --pretrained_model_name_or_path=$MODEL_NAME \
163
- --output_dir=$OUTPUT_DIR \
164
- --concepts_list=./concept_list.json \
165
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
166
- --resolution=512 \
167
- --train_batch_size=2 \
168
- --learning_rate=1e-5 \
169
- --lr_warmup_steps=0 \
170
- --max_train_steps=500 \
171
- --num_class_images=200 \
172
- --scale_lr --hflip \
173
- --modifier_token "<new1>+<new2>" \
174
- --push_to_hub
175
- ```
176
-
177
- Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg) where you can check out the intermediate results along with other training details.
178
-
179
- ### Training on human faces
180
-
181
- For fine-tuning on human faces we found the following configuration to work better: `learning_rate=5e-6`, `max_train_steps=1000 to 2000`, and `freeze_model=crossattn` with at least 15-20 images.
182
-
183
- To collect the real images use this command first before training.
184
-
185
- ```bash
186
- pip install clip-retrieval
187
- python retrieve.py --class_prompt person --class_data_dir real_reg/samples_person --num_class_images 200
188
- ```
189
-
190
- Then start training!
191
-
192
- ```bash
193
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
194
- export OUTPUT_DIR="path-to-save-model"
195
- export INSTANCE_DIR="path-to-images"
196
-
197
- accelerate launch train_custom_diffusion.py \
198
- --pretrained_model_name_or_path=$MODEL_NAME \
199
- --instance_data_dir=$INSTANCE_DIR \
200
- --output_dir=$OUTPUT_DIR \
201
- --class_data_dir=./real_reg/samples_person/ \
202
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
203
- --class_prompt="person" --num_class_images=200 \
204
- --instance_prompt="photo of a <new1> person" \
205
- --resolution=512 \
206
- --train_batch_size=2 \
207
- --learning_rate=5e-6 \
208
- --lr_warmup_steps=0 \
209
- --max_train_steps=1000 \
210
- --scale_lr --hflip --noaug \
211
- --freeze_model crossattn \
212
- --modifier_token "<new1>" \
213
- --enable_xformers_memory_efficient_attention \
214
- --push_to_hub
215
- ```
216
-
217
- ## Inference
218
-
219
- Once you have trained a model using the above command, you can run inference using the below command. Make sure to include the `modifier token` (e.g. \<new1\> in above example) in your prompt.
220
-
221
- ```python
222
- import torch
223
- from diffusers import DiffusionPipeline
224
-
225
- pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16).to("cuda")
226
- pipe.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin")
227
- pipe.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin")
228
-
229
- image = pipe(
230
- "<new1> cat sitting in a bucket",
231
- num_inference_steps=100,
232
- guidance_scale=6.0,
233
- eta=1.0,
234
- ).images[0]
235
- image.save("cat.png")
236
- ```
237
-
238
- It's possible to directly load these parameters from a Hub repository:
239
-
240
- ```python
241
- import torch
242
- from huggingface_hub.repocard import RepoCard
243
- from diffusers import DiffusionPipeline
244
-
245
- model_id = "sayakpaul/custom-diffusion-cat"
246
- card = RepoCard.load(model_id)
247
- base_model_id = card.data.to_dict()["base_model"]
248
-
249
- pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to("cuda")
250
- pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
251
- pipe.load_textual_inversion(model_id, weight_name="<new1>.bin")
252
-
253
- image = pipe(
254
- "<new1> cat sitting in a bucket",
255
- num_inference_steps=100,
256
- guidance_scale=6.0,
257
- eta=1.0,
258
- ).images[0]
259
- image.save("cat.png")
260
- ```
261
-
262
- Here is an example of performing inference with multiple concepts:
263
-
264
- ```python
265
- import torch
266
- from huggingface_hub.repocard import RepoCard
267
- from diffusers import DiffusionPipeline
268
-
269
- model_id = "sayakpaul/custom-diffusion-cat-wooden-pot"
270
- card = RepoCard.load(model_id)
271
- base_model_id = card.data.to_dict()["base_model"]
272
-
273
- pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to("cuda")
274
- pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
275
- pipe.load_textual_inversion(model_id, weight_name="<new1>.bin")
276
- pipe.load_textual_inversion(model_id, weight_name="<new2>.bin")
277
-
278
- image = pipe(
279
- "the <new1> cat sculpture in the style of a <new2> wooden pot",
280
- num_inference_steps=100,
281
- guidance_scale=6.0,
282
- eta=1.0,
283
- ).images[0]
284
- image.save("multi-subject.png")
285
- ```
286
-
287
- Here, `cat` and `wooden pot` refer to the multiple concepts.
288
-
289
- ### Inference from a training checkpoint
290
-
291
- You can also perform inference from one of the complete checkpoint saved during the training process, if you used the `--checkpointing_steps` argument.
292
-
293
- TODO.
294
-
295
- ## Set grads to none
296
-
297
- To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument.
298
-
299
- More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html
300
-
301
- ## Experimental results
302
-
303
- You can refer to [our webpage](https://www.cs.cmu.edu/~custom-diffusion/) that discusses our experiments in detail.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/training/overview.md DELETED
@@ -1,80 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 🧨 Diffusers Training Examples
14
-
15
- Diffusers training examples are a collection of scripts to demonstrate how to effectively use the `diffusers` library
16
- for a variety of use cases.
17
-
18
- **Note**: If you are looking for **official** examples on how to use `diffusers` for inference,
19
- please have a look at [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)
20
-
21
- Our examples aspire to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**.
22
- More specifically, this means:
23
-
24
- - **Self-contained**: An example script shall only depend on "pip-install-able" Python packages that can be found in a `requirements.txt` file. Example scripts shall **not** depend on any local files. This means that one can simply download an example script, *e.g.* [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), install the required dependencies, *e.g.* [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) and execute the example script.
25
- - **Easy-to-tweak**: While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data and the training loop to allow you to tweak and edit them as required.
26
- - **Beginner-friendly**: We do not aim for providing state-of-the-art training scripts for the newest models, but rather examples that can be used as a way to better understand diffusion models and how to use them with the `diffusers` library. We often purposefully leave out certain state-of-the-art methods if we consider them too complex for beginners.
27
- - **One-purpose-only**: Examples should show one task and one task only. Even if a task is from a modeling
28
- point of view very similar, *e.g.* image super-resolution and image modification tend to use the same model and training method, we want examples to showcase only one task to keep them as readable and easy-to-understand as possible.
29
-
30
- We provide **official** examples that cover the most popular tasks of diffusion models.
31
- *Official* examples are **actively** maintained by the `diffusers` maintainers and we try to rigorously follow our example philosophy as defined above.
32
- If you feel like another important example should exist, we are more than happy to welcome a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) or directly a [Pull Request](https://github.com/huggingface/diffusers/compare) from you!
33
-
34
- Training examples show how to pretrain or fine-tune diffusion models for a variety of tasks. Currently we support:
35
-
36
- - [Unconditional Training](./unconditional_training)
37
- - [Text-to-Image Training](./text2image)
38
- - [Text Inversion](./text_inversion)
39
- - [Dreambooth](./dreambooth)
40
- - [LoRA Support](./lora)
41
- - [ControlNet](./controlnet)
42
- - [InstructPix2Pix](./instructpix2pix)
43
- - [Custom Diffusion](./custom_diffusion)
44
-
45
- If possible, please [install xFormers](../optimization/xformers) for memory efficient attention. This could help make your training faster and less memory intensive.
46
-
47
- | Task | 🤗 Accelerate | 🤗 Datasets | Colab
48
- |---|---|:---:|:---:|
49
- | [**Unconditional Image Generation**](./unconditional_training) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
50
- | [**Text-to-Image fine-tuning**](./text2image) | ✅ | ✅ |
51
- | [**Textual Inversion**](./text_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
52
- | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
53
- | [**Training with LoRA**](./lora) | ✅ | - | - |
54
- | [**ControlNet**](./controlnet) | ✅ | ✅ | - |
55
- | [**InstructPix2Pix**](./instructpix2pix) | ✅ | ✅ | - |
56
- | [**Custom Diffusion**](./custom_diffusion) | ✅ | ✅ | - |
57
-
58
- ## Community
59
-
60
- In addition, we provide **community** examples, which are examples added and maintained by our community.
61
- Community examples can consist of both *training* examples or *inference* pipelines.
62
- For such examples, we are more lenient regarding the philosophy defined above and also cannot guarantee to provide maintenance for every issue.
63
- Examples that are useful for the community, but are either not yet deemed popular or not yet following our above philosophy should go into the [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) folder. The community folder therefore includes training examples and inference pipelines.
64
- **Note**: Community examples can be a [great first contribution](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) to show to the community how you like to use `diffusers` 🪄.
65
-
66
- ## Important note
67
-
68
- To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
69
-
70
- ```bash
71
- git clone https://github.com/huggingface/diffusers
72
- cd diffusers
73
- pip install .
74
- ```
75
-
76
- Then cd in the example folder of your choice and run
77
-
78
- ```bash
79
- pip install -r requirements.txt
80
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/overview.md DELETED
@@ -1,73 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 🧨 Diffusers 학습 예시
14
-
15
- 이번 챕터에서는 다양한 유즈케이스들에 대한 예제 코드들을 통해 어떻게하면 효과적으로 `diffusers` 라이브러리를 사용할 수 있을까에 대해 알아보도록 하겠습니다.
16
-
17
- **Note**: 혹시 오피셜한 예시코드를 찾고 있다면, [여기](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines)를 참고해보세요!
18
-
19
- 여기서 다룰 예시들은 다음을 지향합니다.
20
-
21
- - **손쉬운 디펜던시 설치** (Self-contained) : 여기서 사용될 예시 코드들의 디펜던시 패키지들은 전부 `pip install` 명령어를 통해 설치 가능한 패키지들입니다. 또한 친절하게 `requirements.txt` 파일에 해당 패키지들이 명시되어 있어, `pip install -r requirements.txt`로 간편하게 해당 디펜던시들을 설치할 수 있습니다. 예시: [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt)
22
- - **손쉬운 수정** (Easy-to-tweak) : 저희는 가능하면 많은 유즈 케이스들을 제공하고자 합니다. 하지만 예시는 결국 그저 예시라는 점들 기억해주세요. 여기서 제공되는 예시코드들을 그저 단순히 복사-붙혀넣기하는 식으로는 여러분이 마주한 문제들을 손쉽게 해결할 순 없을 것입니다. 다시 말해 어느 정도는 여러분의 상황과 니즈에 맞춰 코드를 일정 부분 고쳐나가야 할 것입니다. 따라서 대부분의 학습 예시들은 데이터의 전처리 과정과 학습 과정에 대한 코드들을 함께 제공함으로써, 사용자가 니즈에 맞게 손쉬운 수정할 수 있도록 돕고 있습니다.
23
- - **입문자 친화적인** (Beginner-friendly) : 이번 챕터는 diffusion 모델과 `diffusers` 라이브러리에 대한 전반적인 이해를 돕기 위해 작성되었습니다. 따라서 diffusion 모델에 대한 최신 SOTA (state-of-the-art) 방법론들 가운데서도, 입문자에게는 많이 어려울 수 있다고 판단되면, 해당 방법론들은 여기서 다루지 않으려고 합니다.
24
- - **하나의 태스크만 포함할 것**(One-purpose-only): 여기서 다룰 예시들은 하나의 태스크만 포함하고 있어야 합니다. 물론 이미지 초해상화(super-resolution)와 이미지 보정(modification)과 같은 유사한 모델링 프로세스를 갖는 태스크들이 존재하겠지만, 하나의 예제에 하나의 태스크만을 담는 것이 더 이해하기 용이하다고 판단했기 때문입니다.
25
-
26
-
27
-
28
- 저희는 diffusion 모델의 대표적인 태스크들을 다루는 공식 예제를 제공하고 있습니다. *공식* 예제는 현재 진행형으로 `diffusers` 관리자들(maintainers)에 의해 관리되고 있습니다. 또한 저희는 앞서 정의한 저희의 철학을 엄격하게 따르고자 노력하고 있습니다. 혹시 여러분께서 이러한 예시가 반드시 필요하다고 생각되신다면, 언제든지 [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) 혹은 직접 [Pull Request](https://github.com/huggingface/diffusers/compare)를 주시기 바랍니다. 저희는 언제나 환영입니다!
29
-
30
- 학습 예시들은 다양한 태스크들에 대해 diffusion 모델을 사전학습(pretrain)하거나 파인튜닝(fine-tuning)하는 법을 보여줍니다. 현재 다음과 같은 예제들을 지원하고 있습니다.
31
-
32
- - [Unconditional Training](./unconditional_training)
33
- - [Text-to-Image Training](./text2image)
34
- - [Text Inversion](./text_inversion)
35
- - [Dreambooth](./dreambooth)
36
-
37
- memory-efficient attention 연산을 수행하기 위해, 가능하면 [xFormers](../optimization/xformers)를 설치해주시기 바랍니다. 이를 통해 학습 속도를 늘리고 메모리에 대한 부담을 줄일 수 있습니다.
38
-
39
- | Task | 🤗 Accelerate | 🤗 Datasets | Colab
40
- |---|---|:---:|:---:|
41
- | [**Unconditional Image Generation**](./unconditional_training) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
42
- | [**Text-to-Image fine-tuning**](./text2image) | ✅ | ✅ |
43
- | [**Textual Inversion**](./text_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
44
- | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
45
- | [**Training with LoRA**](./lora) | ✅ | - | - |
46
- | [**ControlNet**](./controlnet) | ✅ | ✅ | - |
47
- | [**InstructPix2Pix**](./instructpix2pix) | ✅ | ✅ | - |
48
- | [**Custom Diffusion**](./custom_diffusion) | ✅ | ✅ | - |
49
-
50
-
51
- ## 커뮤니티
52
-
53
- 공식 예제 외에도 **커뮤니티 예제** 역시 제공하고 있습니다. 해당 예제들은 우리의 커뮤니티에 의해 관리됩니다. 커뮤니티 예쩨는 학습 예시나 추론 파이프라인으로 구성될 수 있습니다. 이러한 커뮤니티 예시들의 경우, 앞서 정의했던 철학들을 좀 더 관대하게 적용하고 있습니다. 또한 이러한 커뮤니티 예시들의 경우, 모든 이슈들에 대한 유지보수를 보장할 수는 없습니다.
54
-
55
- 유용하긴 하지만, 아직은 대중적이지 못하거나 저희의 철학에 부합하지 않는 예제들은 [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) 폴더에 담기게 됩니다.
56
-
57
- **Note**: 커뮤니티 예제는 `diffusers`에 기여(contribution)를 희망하는 분들에게 [아주 좋은 기여 수단](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)이 될 수 있습니다.
58
-
59
- ## 주목할 사항들
60
-
61
- 최신 버전의 예시 코드들의 성공적인 구동을 보장하기 위해서는, 반드시 **소스코드를 통해 `diffusers`를 설치해야 하며,** 해당 예시 코드들이 요구하는 디펜던시들 역시 설치해야 합니다. 이를 위해 새로운 가상 환경을 구축하고 다음의 명령어를 실행해야 합니다.
62
-
63
- ```bash
64
- git clone https://github.com/huggingface/diffusers
65
- cd diffusers
66
- pip install .
67
- ```
68
-
69
- 그 다음 `cd` 명령어를 통해 해당 예제 디렉토리에 접근해서 다음 명령어를 실행하면 됩니다.
70
-
71
- ```bash
72
- pip install -r requirements.txt
73
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py DELETED
@@ -1,1002 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Union
17
-
18
- import numpy as np
19
- import PIL
20
- import torch
21
- from packaging import version
22
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
23
-
24
- from ...configuration_utils import FrozenDict
25
- from ...image_processor import VaeImageProcessor
26
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
27
- from ...models import AsymmetricAutoencoderKL, AutoencoderKL, UNet2DConditionModel
28
- from ...schedulers import KarrasDiffusionSchedulers
29
- from ...utils import deprecate, is_accelerate_available, is_accelerate_version, logging, randn_tensor
30
- from ..pipeline_utils import DiffusionPipeline
31
- from . import StableDiffusionPipelineOutput
32
- from .safety_checker import StableDiffusionSafetyChecker
33
-
34
-
35
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
-
37
-
38
- def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
39
- """
40
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
41
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
42
- ``image`` and ``1`` for the ``mask``.
43
-
44
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
45
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
46
-
47
- Args:
48
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
49
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
50
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
51
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
52
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
53
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
54
-
55
-
56
- Raises:
57
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
58
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
59
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
60
- (ot the other way around).
61
-
62
- Returns:
63
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
64
- dimensions: ``batch x channels x height x width``.
65
- """
66
-
67
- if image is None:
68
- raise ValueError("`image` input cannot be undefined.")
69
-
70
- if mask is None:
71
- raise ValueError("`mask_image` input cannot be undefined.")
72
-
73
- if isinstance(image, torch.Tensor):
74
- if not isinstance(mask, torch.Tensor):
75
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
76
-
77
- # Batch single image
78
- if image.ndim == 3:
79
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
80
- image = image.unsqueeze(0)
81
-
82
- # Batch and add channel dim for single mask
83
- if mask.ndim == 2:
84
- mask = mask.unsqueeze(0).unsqueeze(0)
85
-
86
- # Batch single mask or add channel dim
87
- if mask.ndim == 3:
88
- # Single batched mask, no channel dim or single mask not batched but channel dim
89
- if mask.shape[0] == 1:
90
- mask = mask.unsqueeze(0)
91
-
92
- # Batched masks no channel dim
93
- else:
94
- mask = mask.unsqueeze(1)
95
-
96
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
97
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
98
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
99
-
100
- # Check image is in [-1, 1]
101
- if image.min() < -1 or image.max() > 1:
102
- raise ValueError("Image should be in [-1, 1] range")
103
-
104
- # Check mask is in [0, 1]
105
- if mask.min() < 0 or mask.max() > 1:
106
- raise ValueError("Mask should be in [0, 1] range")
107
-
108
- # Binarize mask
109
- mask[mask < 0.5] = 0
110
- mask[mask >= 0.5] = 1
111
-
112
- # Image as float32
113
- image = image.to(dtype=torch.float32)
114
- elif isinstance(mask, torch.Tensor):
115
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
116
- else:
117
- # preprocess image
118
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
119
- image = [image]
120
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
121
- # resize all images w.r.t passed height an width
122
- image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
123
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
124
- image = np.concatenate(image, axis=0)
125
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
126
- image = np.concatenate([i[None, :] for i in image], axis=0)
127
-
128
- image = image.transpose(0, 3, 1, 2)
129
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
130
-
131
- # preprocess mask
132
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
133
- mask = [mask]
134
-
135
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
136
- mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
137
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
138
- mask = mask.astype(np.float32) / 255.0
139
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
140
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
141
-
142
- mask[mask < 0.5] = 0
143
- mask[mask >= 0.5] = 1
144
- mask = torch.from_numpy(mask)
145
-
146
- masked_image = image * (mask < 0.5)
147
-
148
- # n.b. ensure backwards compatibility as old function does not return image
149
- if return_image:
150
- return mask, masked_image, image
151
-
152
- return mask, masked_image
153
-
154
-
155
- class StableDiffusionInpaintPipeline(
156
- DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
157
- ):
158
- r"""
159
- Pipeline for text-guided image inpainting using Stable Diffusion.
160
-
161
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
162
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
163
-
164
- The pipeline also inherits the following loading methods:
165
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
166
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
167
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
168
-
169
- Args:
170
- vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]):
171
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
172
- text_encoder ([`CLIPTextModel`]):
173
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
174
- tokenizer ([`~transformers.CLIPTokenizer`]):
175
- A `CLIPTokenizer` to tokenize text.
176
- unet ([`UNet2DConditionModel`]):
177
- A `UNet2DConditionModel` to denoise the encoded image latents.
178
- scheduler ([`SchedulerMixin`]):
179
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
180
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
181
- safety_checker ([`StableDiffusionSafetyChecker`]):
182
- Classification module that estimates whether generated images could be considered offensive or harmful.
183
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
184
- about a model's potential harms.
185
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
186
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
187
- """
188
- _optional_components = ["safety_checker", "feature_extractor"]
189
-
190
- def __init__(
191
- self,
192
- vae: Union[AutoencoderKL, AsymmetricAutoencoderKL],
193
- text_encoder: CLIPTextModel,
194
- tokenizer: CLIPTokenizer,
195
- unet: UNet2DConditionModel,
196
- scheduler: KarrasDiffusionSchedulers,
197
- safety_checker: StableDiffusionSafetyChecker,
198
- feature_extractor: CLIPImageProcessor,
199
- requires_safety_checker: bool = True,
200
- ):
201
- super().__init__()
202
-
203
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
204
- deprecation_message = (
205
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
206
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
207
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
208
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
209
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
210
- " file"
211
- )
212
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
213
- new_config = dict(scheduler.config)
214
- new_config["steps_offset"] = 1
215
- scheduler._internal_dict = FrozenDict(new_config)
216
-
217
- if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
218
- deprecation_message = (
219
- f"The configuration file of this scheduler: {scheduler} has not set the configuration"
220
- " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
221
- " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
222
- " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
223
- " Hub, it would be very nice if you could open a Pull request for the"
224
- " `scheduler/scheduler_config.json` file"
225
- )
226
- deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
227
- new_config = dict(scheduler.config)
228
- new_config["skip_prk_steps"] = True
229
- scheduler._internal_dict = FrozenDict(new_config)
230
-
231
- if safety_checker is None and requires_safety_checker:
232
- logger.warning(
233
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
234
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
235
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
236
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
237
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
238
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
239
- )
240
-
241
- if safety_checker is not None and feature_extractor is None:
242
- raise ValueError(
243
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
244
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
245
- )
246
-
247
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
248
- version.parse(unet.config._diffusers_version).base_version
249
- ) < version.parse("0.9.0.dev0")
250
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
251
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
252
- deprecation_message = (
253
- "The configuration file of the unet has set the default `sample_size` to smaller than"
254
- " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
255
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
256
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
257
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
258
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
259
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
260
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
261
- " the `unet/config.json` file"
262
- )
263
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
264
- new_config = dict(unet.config)
265
- new_config["sample_size"] = 64
266
- unet._internal_dict = FrozenDict(new_config)
267
-
268
- # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
269
- if unet.config.in_channels != 9:
270
- logger.info(f"You have loaded a UNet with {unet.config.in_channels} input channels which.")
271
-
272
- self.register_modules(
273
- vae=vae,
274
- text_encoder=text_encoder,
275
- tokenizer=tokenizer,
276
- unet=unet,
277
- scheduler=scheduler,
278
- safety_checker=safety_checker,
279
- feature_extractor=feature_extractor,
280
- )
281
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
282
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
283
- self.register_to_config(requires_safety_checker=requires_safety_checker)
284
-
285
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
286
- def enable_model_cpu_offload(self, gpu_id=0):
287
- r"""
288
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
289
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
290
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
291
- iterative execution of the `unet`.
292
- """
293
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
294
- from accelerate import cpu_offload_with_hook
295
- else:
296
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
297
-
298
- device = torch.device(f"cuda:{gpu_id}")
299
-
300
- if self.device.type != "cpu":
301
- self.to("cpu", silence_dtype_warnings=True)
302
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
303
-
304
- hook = None
305
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
306
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
307
-
308
- if self.safety_checker is not None:
309
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
310
-
311
- # We'll offload the last model manually.
312
- self.final_offload_hook = hook
313
-
314
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
315
- def _encode_prompt(
316
- self,
317
- prompt,
318
- device,
319
- num_images_per_prompt,
320
- do_classifier_free_guidance,
321
- negative_prompt=None,
322
- prompt_embeds: Optional[torch.FloatTensor] = None,
323
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
324
- lora_scale: Optional[float] = None,
325
- ):
326
- r"""
327
- Encodes the prompt into text encoder hidden states.
328
-
329
- Args:
330
- prompt (`str` or `List[str]`, *optional*):
331
- prompt to be encoded
332
- device: (`torch.device`):
333
- torch device
334
- num_images_per_prompt (`int`):
335
- number of images that should be generated per prompt
336
- do_classifier_free_guidance (`bool`):
337
- whether to use classifier free guidance or not
338
- negative_prompt (`str` or `List[str]`, *optional*):
339
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
340
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
341
- less than `1`).
342
- prompt_embeds (`torch.FloatTensor`, *optional*):
343
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
344
- provided, text embeddings will be generated from `prompt` input argument.
345
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
346
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
347
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
348
- argument.
349
- lora_scale (`float`, *optional*):
350
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
351
- """
352
- # set lora scale so that monkey patched LoRA
353
- # function of text encoder can correctly access it
354
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
355
- self._lora_scale = lora_scale
356
-
357
- if prompt is not None and isinstance(prompt, str):
358
- batch_size = 1
359
- elif prompt is not None and isinstance(prompt, list):
360
- batch_size = len(prompt)
361
- else:
362
- batch_size = prompt_embeds.shape[0]
363
-
364
- if prompt_embeds is None:
365
- # textual inversion: procecss multi-vector tokens if necessary
366
- if isinstance(self, TextualInversionLoaderMixin):
367
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
368
-
369
- text_inputs = self.tokenizer(
370
- prompt,
371
- padding="max_length",
372
- max_length=self.tokenizer.model_max_length,
373
- truncation=True,
374
- return_tensors="pt",
375
- )
376
- text_input_ids = text_inputs.input_ids
377
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
378
-
379
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
380
- text_input_ids, untruncated_ids
381
- ):
382
- removed_text = self.tokenizer.batch_decode(
383
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
384
- )
385
- logger.warning(
386
- "The following part of your input was truncated because CLIP can only handle sequences up to"
387
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
388
- )
389
-
390
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
391
- attention_mask = text_inputs.attention_mask.to(device)
392
- else:
393
- attention_mask = None
394
-
395
- prompt_embeds = self.text_encoder(
396
- text_input_ids.to(device),
397
- attention_mask=attention_mask,
398
- )
399
- prompt_embeds = prompt_embeds[0]
400
-
401
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
402
-
403
- bs_embed, seq_len, _ = prompt_embeds.shape
404
- # duplicate text embeddings for each generation per prompt, using mps friendly method
405
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
406
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
407
-
408
- # get unconditional embeddings for classifier free guidance
409
- if do_classifier_free_guidance and negative_prompt_embeds is None:
410
- uncond_tokens: List[str]
411
- if negative_prompt is None:
412
- uncond_tokens = [""] * batch_size
413
- elif prompt is not None and type(prompt) is not type(negative_prompt):
414
- raise TypeError(
415
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
416
- f" {type(prompt)}."
417
- )
418
- elif isinstance(negative_prompt, str):
419
- uncond_tokens = [negative_prompt]
420
- elif batch_size != len(negative_prompt):
421
- raise ValueError(
422
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
423
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
424
- " the batch size of `prompt`."
425
- )
426
- else:
427
- uncond_tokens = negative_prompt
428
-
429
- # textual inversion: procecss multi-vector tokens if necessary
430
- if isinstance(self, TextualInversionLoaderMixin):
431
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
432
-
433
- max_length = prompt_embeds.shape[1]
434
- uncond_input = self.tokenizer(
435
- uncond_tokens,
436
- padding="max_length",
437
- max_length=max_length,
438
- truncation=True,
439
- return_tensors="pt",
440
- )
441
-
442
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
443
- attention_mask = uncond_input.attention_mask.to(device)
444
- else:
445
- attention_mask = None
446
-
447
- negative_prompt_embeds = self.text_encoder(
448
- uncond_input.input_ids.to(device),
449
- attention_mask=attention_mask,
450
- )
451
- negative_prompt_embeds = negative_prompt_embeds[0]
452
-
453
- if do_classifier_free_guidance:
454
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
455
- seq_len = negative_prompt_embeds.shape[1]
456
-
457
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
458
-
459
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
460
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
461
-
462
- # For classifier free guidance, we need to do two forward passes.
463
- # Here we concatenate the unconditional and text embeddings into a single batch
464
- # to avoid doing two forward passes
465
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
466
-
467
- return prompt_embeds
468
-
469
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
470
- def run_safety_checker(self, image, device, dtype):
471
- if self.safety_checker is None:
472
- has_nsfw_concept = None
473
- else:
474
- if torch.is_tensor(image):
475
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
476
- else:
477
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
478
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
479
- image, has_nsfw_concept = self.safety_checker(
480
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
481
- )
482
- return image, has_nsfw_concept
483
-
484
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
485
- def prepare_extra_step_kwargs(self, generator, eta):
486
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
487
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
488
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
489
- # and should be between [0, 1]
490
-
491
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
492
- extra_step_kwargs = {}
493
- if accepts_eta:
494
- extra_step_kwargs["eta"] = eta
495
-
496
- # check if the scheduler accepts generator
497
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
498
- if accepts_generator:
499
- extra_step_kwargs["generator"] = generator
500
- return extra_step_kwargs
501
-
502
- def check_inputs(
503
- self,
504
- prompt,
505
- height,
506
- width,
507
- strength,
508
- callback_steps,
509
- negative_prompt=None,
510
- prompt_embeds=None,
511
- negative_prompt_embeds=None,
512
- ):
513
- if strength < 0 or strength > 1:
514
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
515
-
516
- if height % 8 != 0 or width % 8 != 0:
517
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
518
-
519
- if (callback_steps is None) or (
520
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
521
- ):
522
- raise ValueError(
523
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
524
- f" {type(callback_steps)}."
525
- )
526
-
527
- if prompt is not None and prompt_embeds is not None:
528
- raise ValueError(
529
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
530
- " only forward one of the two."
531
- )
532
- elif prompt is None and prompt_embeds is None:
533
- raise ValueError(
534
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
535
- )
536
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
537
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
538
-
539
- if negative_prompt is not None and negative_prompt_embeds is not None:
540
- raise ValueError(
541
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
542
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
543
- )
544
-
545
- if prompt_embeds is not None and negative_prompt_embeds is not None:
546
- if prompt_embeds.shape != negative_prompt_embeds.shape:
547
- raise ValueError(
548
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
549
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
550
- f" {negative_prompt_embeds.shape}."
551
- )
552
-
553
- def prepare_latents(
554
- self,
555
- batch_size,
556
- num_channels_latents,
557
- height,
558
- width,
559
- dtype,
560
- device,
561
- generator,
562
- latents=None,
563
- image=None,
564
- timestep=None,
565
- is_strength_max=True,
566
- return_noise=False,
567
- return_image_latents=False,
568
- ):
569
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
570
- if isinstance(generator, list) and len(generator) != batch_size:
571
- raise ValueError(
572
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
573
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
574
- )
575
-
576
- if (image is None or timestep is None) and not is_strength_max:
577
- raise ValueError(
578
- "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
579
- "However, either the image or the noise timestep has not been provided."
580
- )
581
-
582
- if return_image_latents or (latents is None and not is_strength_max):
583
- image = image.to(device=device, dtype=dtype)
584
- image_latents = self._encode_vae_image(image=image, generator=generator)
585
-
586
- if latents is None:
587
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
588
- # if strength is 1. then initialise the latents to noise, else initial to image + noise
589
- latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
590
- # if pure noise then scale the initial latents by the Scheduler's init sigma
591
- latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
592
- else:
593
- noise = latents.to(device)
594
- latents = noise * self.scheduler.init_noise_sigma
595
-
596
- outputs = (latents,)
597
-
598
- if return_noise:
599
- outputs += (noise,)
600
-
601
- if return_image_latents:
602
- outputs += (image_latents,)
603
-
604
- return outputs
605
-
606
- def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
607
- if isinstance(generator, list):
608
- image_latents = [
609
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
610
- for i in range(image.shape[0])
611
- ]
612
- image_latents = torch.cat(image_latents, dim=0)
613
- else:
614
- image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
615
-
616
- image_latents = self.vae.config.scaling_factor * image_latents
617
-
618
- return image_latents
619
-
620
- def prepare_mask_latents(
621
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
622
- ):
623
- # resize the mask to latents shape as we concatenate the mask to the latents
624
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
625
- # and half precision
626
- mask = torch.nn.functional.interpolate(
627
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
628
- )
629
- mask = mask.to(device=device, dtype=dtype)
630
-
631
- masked_image = masked_image.to(device=device, dtype=dtype)
632
- masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
633
-
634
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
635
- if mask.shape[0] < batch_size:
636
- if not batch_size % mask.shape[0] == 0:
637
- raise ValueError(
638
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
639
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
640
- " of masks that you pass is divisible by the total requested batch size."
641
- )
642
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
643
- if masked_image_latents.shape[0] < batch_size:
644
- if not batch_size % masked_image_latents.shape[0] == 0:
645
- raise ValueError(
646
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
647
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
648
- " Make sure the number of images that you pass is divisible by the total requested batch size."
649
- )
650
- masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
651
-
652
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
653
- masked_image_latents = (
654
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
655
- )
656
-
657
- # aligning device to prevent device errors when concating it with the latent model input
658
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
659
- return mask, masked_image_latents
660
-
661
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
662
- def get_timesteps(self, num_inference_steps, strength, device):
663
- # get the original timestep using init_timestep
664
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
665
-
666
- t_start = max(num_inference_steps - init_timestep, 0)
667
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
668
-
669
- return timesteps, num_inference_steps - t_start
670
-
671
- @torch.no_grad()
672
- def __call__(
673
- self,
674
- prompt: Union[str, List[str]] = None,
675
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
676
- mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
677
- height: Optional[int] = None,
678
- width: Optional[int] = None,
679
- strength: float = 1.0,
680
- num_inference_steps: int = 50,
681
- guidance_scale: float = 7.5,
682
- negative_prompt: Optional[Union[str, List[str]]] = None,
683
- num_images_per_prompt: Optional[int] = 1,
684
- eta: float = 0.0,
685
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
686
- latents: Optional[torch.FloatTensor] = None,
687
- prompt_embeds: Optional[torch.FloatTensor] = None,
688
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
689
- output_type: Optional[str] = "pil",
690
- return_dict: bool = True,
691
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
692
- callback_steps: int = 1,
693
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
694
- ):
695
- r"""
696
- The call function to the pipeline for generation.
697
-
698
- Args:
699
- prompt (`str` or `List[str]`, *optional*):
700
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
701
- image (`PIL.Image.Image`):
702
- `Image` or tensor representing an image batch to be inpainted (which parts of the image to be masked
703
- out with `mask_image` and repainted according to `prompt`).
704
- mask_image (`PIL.Image.Image`):
705
- `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted
706
- while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel
707
- (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the
708
- expected shape would be `(B, H, W, 1)`.
709
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
710
- The height in pixels of the generated image.
711
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
712
- The width in pixels of the generated image.
713
- strength (`float`, *optional*, defaults to 1.0):
714
- Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
715
- starting point and more noise is added the higher the `strength`. The number of denoising steps depends
716
- on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
717
- process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
718
- essentially ignores `image`.
719
- num_inference_steps (`int`, *optional*, defaults to 50):
720
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
721
- expense of slower inference. This parameter is modulated by `strength`.
722
- guidance_scale (`float`, *optional*, defaults to 7.5):
723
- A higher guidance scale value encourages the model to generate images closely linked to the text
724
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
725
- negative_prompt (`str` or `List[str]`, *optional*):
726
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
727
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
728
- num_images_per_prompt (`int`, *optional*, defaults to 1):
729
- The number of images to generate per prompt.
730
- eta (`float`, *optional*, defaults to 0.0):
731
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
732
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
733
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
734
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
735
- generation deterministic.
736
- latents (`torch.FloatTensor`, *optional*):
737
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
738
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
739
- tensor is generated by sampling using the supplied random `generator`.
740
- prompt_embeds (`torch.FloatTensor`, *optional*):
741
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
742
- provided, text embeddings are generated from the `prompt` input argument.
743
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
744
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
745
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
746
- output_type (`str`, *optional*, defaults to `"pil"`):
747
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
748
- return_dict (`bool`, *optional*, defaults to `True`):
749
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
750
- plain tuple.
751
- callback (`Callable`, *optional*):
752
- A function that calls every `callback_steps` steps during inference. The function is called with the
753
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
754
- callback_steps (`int`, *optional*, defaults to 1):
755
- The frequency at which the `callback` function is called. If not specified, the callback is called at
756
- every step.
757
- cross_attention_kwargs (`dict`, *optional*):
758
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
759
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
760
-
761
- Examples:
762
-
763
- ```py
764
- >>> import PIL
765
- >>> import requests
766
- >>> import torch
767
- >>> from io import BytesIO
768
-
769
- >>> from diffusers import StableDiffusionInpaintPipeline
770
-
771
-
772
- >>> def download_image(url):
773
- ... response = requests.get(url)
774
- ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
775
-
776
-
777
- >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
778
- >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
779
-
780
- >>> init_image = download_image(img_url).resize((512, 512))
781
- >>> mask_image = download_image(mask_url).resize((512, 512))
782
-
783
- >>> pipe = StableDiffusionInpaintPipeline.from_pretrained(
784
- ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16
785
- ... )
786
- >>> pipe = pipe.to("cuda")
787
-
788
- >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
789
- >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
790
- ```
791
-
792
- Returns:
793
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
794
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
795
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
796
- second element is a list of `bool`s indicating whether the corresponding generated image contains
797
- "not-safe-for-work" (nsfw) content.
798
- """
799
- # 0. Default height and width to unet
800
- height = height or self.unet.config.sample_size * self.vae_scale_factor
801
- width = width or self.unet.config.sample_size * self.vae_scale_factor
802
-
803
- # 1. Check inputs
804
- self.check_inputs(
805
- prompt,
806
- height,
807
- width,
808
- strength,
809
- callback_steps,
810
- negative_prompt,
811
- prompt_embeds,
812
- negative_prompt_embeds,
813
- )
814
-
815
- # 2. Define call parameters
816
- if prompt is not None and isinstance(prompt, str):
817
- batch_size = 1
818
- elif prompt is not None and isinstance(prompt, list):
819
- batch_size = len(prompt)
820
- else:
821
- batch_size = prompt_embeds.shape[0]
822
-
823
- device = self._execution_device
824
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
825
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
826
- # corresponds to doing no classifier free guidance.
827
- do_classifier_free_guidance = guidance_scale > 1.0
828
-
829
- # 3. Encode input prompt
830
- text_encoder_lora_scale = (
831
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
832
- )
833
- prompt_embeds = self._encode_prompt(
834
- prompt,
835
- device,
836
- num_images_per_prompt,
837
- do_classifier_free_guidance,
838
- negative_prompt,
839
- prompt_embeds=prompt_embeds,
840
- negative_prompt_embeds=negative_prompt_embeds,
841
- lora_scale=text_encoder_lora_scale,
842
- )
843
-
844
- # 4. set timesteps
845
- self.scheduler.set_timesteps(num_inference_steps, device=device)
846
- timesteps, num_inference_steps = self.get_timesteps(
847
- num_inference_steps=num_inference_steps, strength=strength, device=device
848
- )
849
- # check that number of inference steps is not < 1 - as this doesn't make sense
850
- if num_inference_steps < 1:
851
- raise ValueError(
852
- f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
853
- f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
854
- )
855
- # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
856
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
857
- # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
858
- is_strength_max = strength == 1.0
859
-
860
- # 5. Preprocess mask and image
861
- mask, masked_image, init_image = prepare_mask_and_masked_image(
862
- image, mask_image, height, width, return_image=True
863
- )
864
- mask_condition = mask.clone()
865
-
866
- # 6. Prepare latent variables
867
- num_channels_latents = self.vae.config.latent_channels
868
- num_channels_unet = self.unet.config.in_channels
869
- return_image_latents = num_channels_unet == 4
870
-
871
- latents_outputs = self.prepare_latents(
872
- batch_size * num_images_per_prompt,
873
- num_channels_latents,
874
- height,
875
- width,
876
- prompt_embeds.dtype,
877
- device,
878
- generator,
879
- latents,
880
- image=init_image,
881
- timestep=latent_timestep,
882
- is_strength_max=is_strength_max,
883
- return_noise=True,
884
- return_image_latents=return_image_latents,
885
- )
886
-
887
- if return_image_latents:
888
- latents, noise, image_latents = latents_outputs
889
- else:
890
- latents, noise = latents_outputs
891
-
892
- # 7. Prepare mask latent variables
893
- mask, masked_image_latents = self.prepare_mask_latents(
894
- mask,
895
- masked_image,
896
- batch_size * num_images_per_prompt,
897
- height,
898
- width,
899
- prompt_embeds.dtype,
900
- device,
901
- generator,
902
- do_classifier_free_guidance,
903
- )
904
-
905
- # 8. Check that sizes of mask, masked image and latents match
906
- if num_channels_unet == 9:
907
- # default case for runwayml/stable-diffusion-inpainting
908
- num_channels_mask = mask.shape[1]
909
- num_channels_masked_image = masked_image_latents.shape[1]
910
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
911
- raise ValueError(
912
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
913
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
914
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
915
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
916
- " `pipeline.unet` or your `mask_image` or `image` input."
917
- )
918
- elif num_channels_unet != 4:
919
- raise ValueError(
920
- f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
921
- )
922
-
923
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
924
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
925
-
926
- # 10. Denoising loop
927
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
928
- with self.progress_bar(total=num_inference_steps) as progress_bar:
929
- for i, t in enumerate(timesteps):
930
- # expand the latents if we are doing classifier free guidance
931
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
932
-
933
- # concat latents, mask, masked_image_latents in the channel dimension
934
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
935
-
936
- if num_channels_unet == 9:
937
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
938
-
939
- # predict the noise residual
940
- noise_pred = self.unet(
941
- latent_model_input,
942
- t,
943
- encoder_hidden_states=prompt_embeds,
944
- cross_attention_kwargs=cross_attention_kwargs,
945
- return_dict=False,
946
- )[0]
947
-
948
- # perform guidance
949
- if do_classifier_free_guidance:
950
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
951
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
952
-
953
- # compute the previous noisy sample x_t -> x_t-1
954
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
955
-
956
- if num_channels_unet == 4:
957
- init_latents_proper = image_latents[:1]
958
- init_mask = mask[:1]
959
-
960
- if i < len(timesteps) - 1:
961
- noise_timestep = timesteps[i + 1]
962
- init_latents_proper = self.scheduler.add_noise(
963
- init_latents_proper, noise, torch.tensor([noise_timestep])
964
- )
965
-
966
- latents = (1 - init_mask) * init_latents_proper + init_mask * latents
967
-
968
- # call the callback, if provided
969
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
970
- progress_bar.update()
971
- if callback is not None and i % callback_steps == 0:
972
- callback(i, t, latents)
973
-
974
- if not output_type == "latent":
975
- condition_kwargs = {}
976
- if isinstance(self.vae, AsymmetricAutoencoderKL):
977
- init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)
978
- init_image_condition = init_image.clone()
979
- init_image = self._encode_vae_image(init_image, generator=generator)
980
- mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)
981
- condition_kwargs = {"image": init_image_condition, "mask": mask_condition}
982
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **condition_kwargs)[0]
983
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
984
- else:
985
- image = latents
986
- has_nsfw_concept = None
987
-
988
- if has_nsfw_concept is None:
989
- do_denormalize = [True] * image.shape[0]
990
- else:
991
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
992
-
993
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
994
-
995
- # Offload last model to CPU
996
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
997
- self.final_offload_hook.offload()
998
-
999
- if not return_dict:
1000
- return (image, has_nsfw_concept)
1001
-
1002
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_unipc_multistep.py DELETED
@@ -1,681 +0,0 @@
1
- # Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: check https://arxiv.org/abs/2302.04867 and https://github.com/wl-zhao/UniPC for more info
16
- # The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py
17
-
18
- import math
19
- from typing import List, Optional, Tuple, Union
20
-
21
- import numpy as np
22
- import torch
23
-
24
- from ..configuration_utils import ConfigMixin, register_to_config
25
- from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
26
-
27
-
28
- def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
29
- """
30
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
31
- (1-beta) over time from t = [0,1].
32
-
33
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
34
- to that part of the diffusion process.
35
-
36
-
37
- Args:
38
- num_diffusion_timesteps (`int`): the number of betas to produce.
39
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
40
- prevent singularities.
41
-
42
- Returns:
43
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
44
- """
45
-
46
- def alpha_bar(time_step):
47
- return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
48
-
49
- betas = []
50
- for i in range(num_diffusion_timesteps):
51
- t1 = i / num_diffusion_timesteps
52
- t2 = (i + 1) / num_diffusion_timesteps
53
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
54
- return torch.tensor(betas, dtype=torch.float32)
55
-
56
-
57
- class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
58
- """
59
- UniPC is a training-free framework designed for the fast sampling of diffusion models, which consists of a
60
- corrector (UniC) and a predictor (UniP) that share a unified analytical form and support arbitrary orders. UniPC is
61
- by desinged model-agnostic, supporting pixel-space/latent-space DPMs on unconditional/conditional sampling. It can
62
- also be applied to both noise prediction model and data prediction model. The corrector UniC can be also applied
63
- after any off-the-shelf solvers to increase the order of accuracy.
64
-
65
- For more details, see the original paper: https://arxiv.org/abs/2302.04867
66
-
67
- Currently, we support the multistep UniPC for both noise prediction models and data prediction models. We recommend
68
- to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling.
69
-
70
- We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
71
- diffusion models, you can set both `predict_x0=True` and `thresholding=True` to use the dynamic thresholding. Note
72
- that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion).
73
-
74
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
75
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
76
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
77
- [`~SchedulerMixin.from_pretrained`] functions.
78
-
79
- Args:
80
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
81
- beta_start (`float`): the starting `beta` value of inference.
82
- beta_end (`float`): the final `beta` value.
83
- beta_schedule (`str`):
84
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
85
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
86
- trained_betas (`np.ndarray`, optional):
87
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
88
- solver_order (`int`, default `2`):
89
- the order of UniPC, also the p in UniPC-p; can be any positive integer. Note that the effective order of
90
- accuracy is `solver_order + 1` due to the UniC. We recommend to use `solver_order=2` for guided sampling,
91
- and `solver_order=3` for unconditional sampling.
92
- prediction_type (`str`, default `epsilon`, optional):
93
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
94
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
95
- https://imagen.research.google/video/paper.pdf)
96
- thresholding (`bool`, default `False`):
97
- whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
98
- For pixel-space diffusion models, you can set both `predict_x0=True` and `thresholding=True` to use the
99
- dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models
100
- (such as stable-diffusion).
101
- dynamic_thresholding_ratio (`float`, default `0.995`):
102
- the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
103
- (https://arxiv.org/abs/2205.11487).
104
- sample_max_value (`float`, default `1.0`):
105
- the threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`.
106
- predict_x0 (`bool`, default `True`):
107
- whether to use the updating algrithm on the predicted x0. See https://arxiv.org/abs/2211.01095 for details
108
- solver_type (`str`, default `bh2`):
109
- the solver type of UniPC. We recommend use `bh1` for unconditional sampling when steps < 10, and use `bh2`
110
- otherwise.
111
- lower_order_final (`bool`, default `True`):
112
- whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically
113
- find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10.
114
- disable_corrector (`list`, default `[]`):
115
- decide which step to disable the corrector. For large guidance scale, the misalignment between the
116
- `epsilon_theta(x_t, c)`and `epsilon_theta(x_t^c, c)` might influence the convergence. This can be mitigated
117
- by disable the corrector at the first few steps (e.g., disable_corrector=[0])
118
- solver_p (`SchedulerMixin`, default `None`):
119
- can be any other scheduler. If specified, the algorithm will become solver_p + UniC.
120
- use_karras_sigmas (`bool`, *optional*, defaults to `False`):
121
- This parameter controls whether to use Karras sigmas (Karras et al. (2022) scheme) for step sizes in the
122
- noise schedule during the sampling process. If True, the sigmas will be determined according to a sequence
123
- of noise levels {σi} as defined in Equation (5) of the paper https://arxiv.org/pdf/2206.00364.pdf.
124
- timestep_spacing (`str`, default `"linspace"`):
125
- The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
126
- Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
127
- steps_offset (`int`, default `0`):
128
- an offset added to the inference steps. You can use a combination of `offset=1` and
129
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
130
- stable diffusion.
131
- """
132
-
133
- _compatibles = [e.name for e in KarrasDiffusionSchedulers]
134
- order = 1
135
-
136
- @register_to_config
137
- def __init__(
138
- self,
139
- num_train_timesteps: int = 1000,
140
- beta_start: float = 0.0001,
141
- beta_end: float = 0.02,
142
- beta_schedule: str = "linear",
143
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
144
- solver_order: int = 2,
145
- prediction_type: str = "epsilon",
146
- thresholding: bool = False,
147
- dynamic_thresholding_ratio: float = 0.995,
148
- sample_max_value: float = 1.0,
149
- predict_x0: bool = True,
150
- solver_type: str = "bh2",
151
- lower_order_final: bool = True,
152
- disable_corrector: List[int] = [],
153
- solver_p: SchedulerMixin = None,
154
- use_karras_sigmas: Optional[bool] = False,
155
- timestep_spacing: str = "linspace",
156
- steps_offset: int = 0,
157
- ):
158
- if trained_betas is not None:
159
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
160
- elif beta_schedule == "linear":
161
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
162
- elif beta_schedule == "scaled_linear":
163
- # this schedule is very specific to the latent diffusion model.
164
- self.betas = (
165
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
166
- )
167
- elif beta_schedule == "squaredcos_cap_v2":
168
- # Glide cosine schedule
169
- self.betas = betas_for_alpha_bar(num_train_timesteps)
170
- else:
171
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
172
-
173
- self.alphas = 1.0 - self.betas
174
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
175
- # Currently we only support VP-type noise schedule
176
- self.alpha_t = torch.sqrt(self.alphas_cumprod)
177
- self.sigma_t = torch.sqrt(1 - self.alphas_cumprod)
178
- self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t)
179
-
180
- # standard deviation of the initial noise distribution
181
- self.init_noise_sigma = 1.0
182
-
183
- if solver_type not in ["bh1", "bh2"]:
184
- if solver_type in ["midpoint", "heun", "logrho"]:
185
- self.register_to_config(solver_type="bh2")
186
- else:
187
- raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}")
188
-
189
- self.predict_x0 = predict_x0
190
- # setable values
191
- self.num_inference_steps = None
192
- timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy()
193
- self.timesteps = torch.from_numpy(timesteps)
194
- self.model_outputs = [None] * solver_order
195
- self.timestep_list = [None] * solver_order
196
- self.lower_order_nums = 0
197
- self.disable_corrector = disable_corrector
198
- self.solver_p = solver_p
199
- self.last_sample = None
200
-
201
- def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
202
- """
203
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
204
-
205
- Args:
206
- num_inference_steps (`int`):
207
- the number of diffusion steps used when generating samples with a pre-trained model.
208
- device (`str` or `torch.device`, optional):
209
- the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
210
- """
211
- # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
212
- if self.config.timestep_spacing == "linspace":
213
- timesteps = (
214
- np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1)
215
- .round()[::-1][:-1]
216
- .copy()
217
- .astype(np.int64)
218
- )
219
- elif self.config.timestep_spacing == "leading":
220
- step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1)
221
- # creates integer timesteps by multiplying by ratio
222
- # casting to int to avoid issues when num_inference_step is power of 3
223
- timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64)
224
- timesteps += self.config.steps_offset
225
- elif self.config.timestep_spacing == "trailing":
226
- step_ratio = self.config.num_train_timesteps / num_inference_steps
227
- # creates integer timesteps by multiplying by ratio
228
- # casting to int to avoid issues when num_inference_step is power of 3
229
- timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64)
230
- timesteps -= 1
231
- else:
232
- raise ValueError(
233
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
234
- )
235
-
236
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
237
- if self.config.use_karras_sigmas:
238
- log_sigmas = np.log(sigmas)
239
- sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
240
- timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round()
241
- timesteps = np.flip(timesteps).copy().astype(np.int64)
242
-
243
- self.sigmas = torch.from_numpy(sigmas)
244
-
245
- # when num_inference_steps == num_train_timesteps, we can end up with
246
- # duplicates in timesteps.
247
- _, unique_indices = np.unique(timesteps, return_index=True)
248
- timesteps = timesteps[np.sort(unique_indices)]
249
-
250
- self.timesteps = torch.from_numpy(timesteps).to(device)
251
-
252
- self.num_inference_steps = len(timesteps)
253
-
254
- self.model_outputs = [
255
- None,
256
- ] * self.config.solver_order
257
- self.lower_order_nums = 0
258
- self.last_sample = None
259
- if self.solver_p:
260
- self.solver_p.set_timesteps(self.num_inference_steps, device=device)
261
-
262
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
263
- def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
264
- """
265
- "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
266
- prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
267
- s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
268
- pixels from saturation at each step. We find that dynamic thresholding results in significantly better
269
- photorealism as well as better image-text alignment, especially when using very large guidance weights."
270
-
271
- https://arxiv.org/abs/2205.11487
272
- """
273
- dtype = sample.dtype
274
- batch_size, channels, height, width = sample.shape
275
-
276
- if dtype not in (torch.float32, torch.float64):
277
- sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
278
-
279
- # Flatten sample for doing quantile calculation along each image
280
- sample = sample.reshape(batch_size, channels * height * width)
281
-
282
- abs_sample = sample.abs() # "a certain percentile absolute pixel value"
283
-
284
- s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
285
- s = torch.clamp(
286
- s, min=1, max=self.config.sample_max_value
287
- ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
288
-
289
- s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
290
- sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
291
-
292
- sample = sample.reshape(batch_size, channels, height, width)
293
- sample = sample.to(dtype)
294
-
295
- return sample
296
-
297
- def convert_model_output(
298
- self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor
299
- ) -> torch.FloatTensor:
300
- r"""
301
- Convert the model output to the corresponding type that the algorithm PC needs.
302
-
303
- Args:
304
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
305
- timestep (`int`): current discrete timestep in the diffusion chain.
306
- sample (`torch.FloatTensor`):
307
- current instance of sample being created by diffusion process.
308
-
309
- Returns:
310
- `torch.FloatTensor`: the converted model output.
311
- """
312
- if self.predict_x0:
313
- if self.config.prediction_type == "epsilon":
314
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
315
- x0_pred = (sample - sigma_t * model_output) / alpha_t
316
- elif self.config.prediction_type == "sample":
317
- x0_pred = model_output
318
- elif self.config.prediction_type == "v_prediction":
319
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
320
- x0_pred = alpha_t * sample - sigma_t * model_output
321
- else:
322
- raise ValueError(
323
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
324
- " `v_prediction` for the UniPCMultistepScheduler."
325
- )
326
-
327
- if self.config.thresholding:
328
- x0_pred = self._threshold_sample(x0_pred)
329
-
330
- return x0_pred
331
- else:
332
- if self.config.prediction_type == "epsilon":
333
- return model_output
334
- elif self.config.prediction_type == "sample":
335
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
336
- epsilon = (sample - alpha_t * model_output) / sigma_t
337
- return epsilon
338
- elif self.config.prediction_type == "v_prediction":
339
- alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep]
340
- epsilon = alpha_t * model_output + sigma_t * sample
341
- return epsilon
342
- else:
343
- raise ValueError(
344
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
345
- " `v_prediction` for the UniPCMultistepScheduler."
346
- )
347
-
348
- def multistep_uni_p_bh_update(
349
- self,
350
- model_output: torch.FloatTensor,
351
- prev_timestep: int,
352
- sample: torch.FloatTensor,
353
- order: int,
354
- ) -> torch.FloatTensor:
355
- """
356
- One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified.
357
-
358
- Args:
359
- model_output (`torch.FloatTensor`):
360
- direct outputs from learned diffusion model at the current timestep.
361
- prev_timestep (`int`): previous discrete timestep in the diffusion chain.
362
- sample (`torch.FloatTensor`):
363
- current instance of sample being created by diffusion process.
364
- order (`int`): the order of UniP at this step, also the p in UniPC-p.
365
-
366
- Returns:
367
- `torch.FloatTensor`: the sample tensor at the previous timestep.
368
- """
369
- timestep_list = self.timestep_list
370
- model_output_list = self.model_outputs
371
-
372
- s0, t = self.timestep_list[-1], prev_timestep
373
- m0 = model_output_list[-1]
374
- x = sample
375
-
376
- if self.solver_p:
377
- x_t = self.solver_p.step(model_output, s0, x).prev_sample
378
- return x_t
379
-
380
- lambda_t, lambda_s0 = self.lambda_t[t], self.lambda_t[s0]
381
- alpha_t, alpha_s0 = self.alpha_t[t], self.alpha_t[s0]
382
- sigma_t, sigma_s0 = self.sigma_t[t], self.sigma_t[s0]
383
-
384
- h = lambda_t - lambda_s0
385
- device = sample.device
386
-
387
- rks = []
388
- D1s = []
389
- for i in range(1, order):
390
- si = timestep_list[-(i + 1)]
391
- mi = model_output_list[-(i + 1)]
392
- lambda_si = self.lambda_t[si]
393
- rk = (lambda_si - lambda_s0) / h
394
- rks.append(rk)
395
- D1s.append((mi - m0) / rk)
396
-
397
- rks.append(1.0)
398
- rks = torch.tensor(rks, device=device)
399
-
400
- R = []
401
- b = []
402
-
403
- hh = -h if self.predict_x0 else h
404
- h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
405
- h_phi_k = h_phi_1 / hh - 1
406
-
407
- factorial_i = 1
408
-
409
- if self.config.solver_type == "bh1":
410
- B_h = hh
411
- elif self.config.solver_type == "bh2":
412
- B_h = torch.expm1(hh)
413
- else:
414
- raise NotImplementedError()
415
-
416
- for i in range(1, order + 1):
417
- R.append(torch.pow(rks, i - 1))
418
- b.append(h_phi_k * factorial_i / B_h)
419
- factorial_i *= i + 1
420
- h_phi_k = h_phi_k / hh - 1 / factorial_i
421
-
422
- R = torch.stack(R)
423
- b = torch.tensor(b, device=device)
424
-
425
- if len(D1s) > 0:
426
- D1s = torch.stack(D1s, dim=1) # (B, K)
427
- # for order 2, we use a simplified version
428
- if order == 2:
429
- rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device)
430
- else:
431
- rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
432
- else:
433
- D1s = None
434
-
435
- if self.predict_x0:
436
- x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0
437
- if D1s is not None:
438
- pred_res = torch.einsum("k,bkchw->bchw", rhos_p, D1s)
439
- else:
440
- pred_res = 0
441
- x_t = x_t_ - alpha_t * B_h * pred_res
442
- else:
443
- x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0
444
- if D1s is not None:
445
- pred_res = torch.einsum("k,bkchw->bchw", rhos_p, D1s)
446
- else:
447
- pred_res = 0
448
- x_t = x_t_ - sigma_t * B_h * pred_res
449
-
450
- x_t = x_t.to(x.dtype)
451
- return x_t
452
-
453
- def multistep_uni_c_bh_update(
454
- self,
455
- this_model_output: torch.FloatTensor,
456
- this_timestep: int,
457
- last_sample: torch.FloatTensor,
458
- this_sample: torch.FloatTensor,
459
- order: int,
460
- ) -> torch.FloatTensor:
461
- """
462
- One step for the UniC (B(h) version).
463
-
464
- Args:
465
- this_model_output (`torch.FloatTensor`): the model outputs at `x_t`
466
- this_timestep (`int`): the current timestep `t`
467
- last_sample (`torch.FloatTensor`): the generated sample before the last predictor: `x_{t-1}`
468
- this_sample (`torch.FloatTensor`): the generated sample after the last predictor: `x_{t}`
469
- order (`int`): the `p` of UniC-p at this step. Note that the effective order of accuracy
470
- should be order + 1
471
-
472
- Returns:
473
- `torch.FloatTensor`: the corrected sample tensor at the current timestep.
474
- """
475
- timestep_list = self.timestep_list
476
- model_output_list = self.model_outputs
477
-
478
- s0, t = timestep_list[-1], this_timestep
479
- m0 = model_output_list[-1]
480
- x = last_sample
481
- x_t = this_sample
482
- model_t = this_model_output
483
-
484
- lambda_t, lambda_s0 = self.lambda_t[t], self.lambda_t[s0]
485
- alpha_t, alpha_s0 = self.alpha_t[t], self.alpha_t[s0]
486
- sigma_t, sigma_s0 = self.sigma_t[t], self.sigma_t[s0]
487
-
488
- h = lambda_t - lambda_s0
489
- device = this_sample.device
490
-
491
- rks = []
492
- D1s = []
493
- for i in range(1, order):
494
- si = timestep_list[-(i + 1)]
495
- mi = model_output_list[-(i + 1)]
496
- lambda_si = self.lambda_t[si]
497
- rk = (lambda_si - lambda_s0) / h
498
- rks.append(rk)
499
- D1s.append((mi - m0) / rk)
500
-
501
- rks.append(1.0)
502
- rks = torch.tensor(rks, device=device)
503
-
504
- R = []
505
- b = []
506
-
507
- hh = -h if self.predict_x0 else h
508
- h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
509
- h_phi_k = h_phi_1 / hh - 1
510
-
511
- factorial_i = 1
512
-
513
- if self.config.solver_type == "bh1":
514
- B_h = hh
515
- elif self.config.solver_type == "bh2":
516
- B_h = torch.expm1(hh)
517
- else:
518
- raise NotImplementedError()
519
-
520
- for i in range(1, order + 1):
521
- R.append(torch.pow(rks, i - 1))
522
- b.append(h_phi_k * factorial_i / B_h)
523
- factorial_i *= i + 1
524
- h_phi_k = h_phi_k / hh - 1 / factorial_i
525
-
526
- R = torch.stack(R)
527
- b = torch.tensor(b, device=device)
528
-
529
- if len(D1s) > 0:
530
- D1s = torch.stack(D1s, dim=1)
531
- else:
532
- D1s = None
533
-
534
- # for order 1, we use a simplified version
535
- if order == 1:
536
- rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device)
537
- else:
538
- rhos_c = torch.linalg.solve(R, b)
539
-
540
- if self.predict_x0:
541
- x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0
542
- if D1s is not None:
543
- corr_res = torch.einsum("k,bkchw->bchw", rhos_c[:-1], D1s)
544
- else:
545
- corr_res = 0
546
- D1_t = model_t - m0
547
- x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t)
548
- else:
549
- x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0
550
- if D1s is not None:
551
- corr_res = torch.einsum("k,bkchw->bchw", rhos_c[:-1], D1s)
552
- else:
553
- corr_res = 0
554
- D1_t = model_t - m0
555
- x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t)
556
- x_t = x_t.to(x.dtype)
557
- return x_t
558
-
559
- def step(
560
- self,
561
- model_output: torch.FloatTensor,
562
- timestep: int,
563
- sample: torch.FloatTensor,
564
- return_dict: bool = True,
565
- ) -> Union[SchedulerOutput, Tuple]:
566
- """
567
- Step function propagating the sample with the multistep UniPC.
568
-
569
- Args:
570
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
571
- timestep (`int`): current discrete timestep in the diffusion chain.
572
- sample (`torch.FloatTensor`):
573
- current instance of sample being created by diffusion process.
574
- return_dict (`bool`): option for returning tuple rather than SchedulerOutput class
575
-
576
- Returns:
577
- [`~scheduling_utils.SchedulerOutput`] or `tuple`: [`~scheduling_utils.SchedulerOutput`] if `return_dict` is
578
- True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
579
-
580
- """
581
-
582
- if self.num_inference_steps is None:
583
- raise ValueError(
584
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
585
- )
586
-
587
- if isinstance(timestep, torch.Tensor):
588
- timestep = timestep.to(self.timesteps.device)
589
- step_index = (self.timesteps == timestep).nonzero()
590
- if len(step_index) == 0:
591
- step_index = len(self.timesteps) - 1
592
- else:
593
- step_index = step_index.item()
594
-
595
- use_corrector = (
596
- step_index > 0 and step_index - 1 not in self.disable_corrector and self.last_sample is not None
597
- )
598
-
599
- model_output_convert = self.convert_model_output(model_output, timestep, sample)
600
- if use_corrector:
601
- sample = self.multistep_uni_c_bh_update(
602
- this_model_output=model_output_convert,
603
- this_timestep=timestep,
604
- last_sample=self.last_sample,
605
- this_sample=sample,
606
- order=self.this_order,
607
- )
608
-
609
- # now prepare to run the predictor
610
- prev_timestep = 0 if step_index == len(self.timesteps) - 1 else self.timesteps[step_index + 1]
611
-
612
- for i in range(self.config.solver_order - 1):
613
- self.model_outputs[i] = self.model_outputs[i + 1]
614
- self.timestep_list[i] = self.timestep_list[i + 1]
615
-
616
- self.model_outputs[-1] = model_output_convert
617
- self.timestep_list[-1] = timestep
618
-
619
- if self.config.lower_order_final:
620
- this_order = min(self.config.solver_order, len(self.timesteps) - step_index)
621
- else:
622
- this_order = self.config.solver_order
623
-
624
- self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep
625
- assert self.this_order > 0
626
-
627
- self.last_sample = sample
628
- prev_sample = self.multistep_uni_p_bh_update(
629
- model_output=model_output, # pass the original non-converted model output, in case solver-p is used
630
- prev_timestep=prev_timestep,
631
- sample=sample,
632
- order=self.this_order,
633
- )
634
-
635
- if self.lower_order_nums < self.config.solver_order:
636
- self.lower_order_nums += 1
637
-
638
- if not return_dict:
639
- return (prev_sample,)
640
-
641
- return SchedulerOutput(prev_sample=prev_sample)
642
-
643
- def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:
644
- """
645
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
646
- current timestep.
647
-
648
- Args:
649
- sample (`torch.FloatTensor`): input sample
650
-
651
- Returns:
652
- `torch.FloatTensor`: scaled input sample
653
- """
654
- return sample
655
-
656
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
657
- def add_noise(
658
- self,
659
- original_samples: torch.FloatTensor,
660
- noise: torch.FloatTensor,
661
- timesteps: torch.IntTensor,
662
- ) -> torch.FloatTensor:
663
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
664
- alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
665
- timesteps = timesteps.to(original_samples.device)
666
-
667
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
668
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
669
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
670
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
671
-
672
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
673
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
674
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
675
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
676
-
677
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
678
- return noisy_samples
679
-
680
- def __len__(self):
681
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py DELETED
@@ -1,105 +0,0 @@
1
- _base_ = [
2
- '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
3
- ]
4
-
5
- # model settings
6
- model = dict(
7
- type='CornerNet',
8
- backbone=dict(
9
- type='HourglassNet',
10
- downsample_times=5,
11
- num_stacks=2,
12
- stage_channels=[256, 256, 384, 384, 384, 512],
13
- stage_blocks=[2, 2, 2, 2, 2, 4],
14
- norm_cfg=dict(type='BN', requires_grad=True)),
15
- neck=None,
16
- bbox_head=dict(
17
- type='CentripetalHead',
18
- num_classes=80,
19
- in_channels=256,
20
- num_feat_levels=2,
21
- corner_emb_channels=0,
22
- loss_heatmap=dict(
23
- type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
24
- loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
25
- loss_guiding_shift=dict(
26
- type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
27
- loss_centripetal_shift=dict(
28
- type='SmoothL1Loss', beta=1.0, loss_weight=1)),
29
- # training and testing settings
30
- train_cfg=None,
31
- test_cfg=dict(
32
- corner_topk=100,
33
- local_maximum_kernel=3,
34
- distance_threshold=0.5,
35
- score_thr=0.05,
36
- max_per_img=100,
37
- nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
38
- # data settings
39
- img_norm_cfg = dict(
40
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
41
- train_pipeline = [
42
- dict(type='LoadImageFromFile', to_float32=True),
43
- dict(type='LoadAnnotations', with_bbox=True),
44
- dict(
45
- type='PhotoMetricDistortion',
46
- brightness_delta=32,
47
- contrast_range=(0.5, 1.5),
48
- saturation_range=(0.5, 1.5),
49
- hue_delta=18),
50
- dict(
51
- type='RandomCenterCropPad',
52
- crop_size=(511, 511),
53
- ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
54
- test_mode=False,
55
- test_pad_mode=None,
56
- **img_norm_cfg),
57
- dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
58
- dict(type='RandomFlip', flip_ratio=0.5),
59
- dict(type='Normalize', **img_norm_cfg),
60
- dict(type='DefaultFormatBundle'),
61
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
62
- ]
63
- test_pipeline = [
64
- dict(type='LoadImageFromFile', to_float32=True),
65
- dict(
66
- type='MultiScaleFlipAug',
67
- scale_factor=1.0,
68
- flip=True,
69
- transforms=[
70
- dict(type='Resize'),
71
- dict(
72
- type='RandomCenterCropPad',
73
- crop_size=None,
74
- ratios=None,
75
- border=None,
76
- test_mode=True,
77
- test_pad_mode=['logical_or', 127],
78
- **img_norm_cfg),
79
- dict(type='RandomFlip'),
80
- dict(type='Normalize', **img_norm_cfg),
81
- dict(type='ImageToTensor', keys=['img']),
82
- dict(
83
- type='Collect',
84
- keys=['img'],
85
- meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
86
- 'scale_factor', 'flip', 'img_norm_cfg', 'border')),
87
- ])
88
- ]
89
- data = dict(
90
- samples_per_gpu=6,
91
- workers_per_gpu=3,
92
- train=dict(pipeline=train_pipeline),
93
- val=dict(pipeline=test_pipeline),
94
- test=dict(pipeline=test_pipeline))
95
- # optimizer
96
- optimizer = dict(type='Adam', lr=0.0005)
97
- optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
98
- # learning policy
99
- lr_config = dict(
100
- policy='step',
101
- warmup='linear',
102
- warmup_iters=500,
103
- warmup_ratio=1.0 / 3,
104
- step=[190])
105
- runner = dict(type='EpochBasedRunner', max_epochs=210)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py DELETED
@@ -1,23 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- roi_head=dict(
4
- type='DoubleHeadRoIHead',
5
- reg_roi_scale_factor=1.3,
6
- bbox_head=dict(
7
- _delete_=True,
8
- type='DoubleConvFCBBoxHead',
9
- num_convs=4,
10
- num_fcs=2,
11
- in_channels=256,
12
- conv_out_channels=1024,
13
- fc_out_channels=1024,
14
- roi_feat_size=7,
15
- num_classes=80,
16
- bbox_coder=dict(
17
- type='DeltaXYWHBBoxCoder',
18
- target_means=[0., 0., 0., 0.],
19
- target_stds=[0.1, 0.1, 0.2, 0.2]),
20
- reg_class_agnostic=False,
21
- loss_cls=dict(
22
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0),
23
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0))))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Angello06/SoylaloGaming/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: SoylaloGaming
3
- emoji: 😻
4
- colorFrom: gray
5
- colorTo: blue
6
- sdk: streamlit
7
- sdk_version: 1.21.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/Training_PRO/custom_scheduler.py DELETED
@@ -1,175 +0,0 @@
1
- from functools import partial
2
- import torch
3
- import transformers
4
- import math
5
- from torch.optim.lr_scheduler import LambdaLR
6
-
7
-
8
- #FPHAM custom training scheduller block - should be extracted to separate file
9
- last_print_label = ''
10
-
11
- # hold constant to the half of epochs then cosine down to 0
12
- def _get_fp_half_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_firstepoch_steps: int):
13
-
14
- global last_print_label
15
- print_label = ''
16
-
17
- half_steps = num_training_steps//2
18
-
19
- num_warmup_steps = min(num_warmup_steps,half_steps)
20
-
21
- if current_step < num_warmup_steps:
22
- print_label = 'Scheduler: Warmup'
23
- elif current_step < half_steps:
24
- print_label = 'Scheduler: Hold'
25
- else:
26
- print_label = 'Scheduler: Annealing'
27
-
28
- if print_label != last_print_label:
29
- print(print_label)
30
-
31
- last_print_label = print_label
32
-
33
- if current_step < num_warmup_steps:
34
- return float(current_step) / float(max(1, num_warmup_steps))
35
-
36
- if current_step < half_steps:
37
- return 1.0
38
-
39
- progress = float(current_step - half_steps) / float(max(1, num_training_steps - half_steps))
40
- num_cycles = 0.5
41
- return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
42
-
43
- # constant to the first epochs then cosine down to 0 over the rest epochs
44
- def _get_fp_cosine_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_firstepoch_steps: int):
45
-
46
- global last_print_label
47
- print_label = ''
48
-
49
- num_warmup_steps = min(num_warmup_steps,num_firstepoch_steps)
50
-
51
- if current_step < num_warmup_steps:
52
- print_label = 'Scheduler: Warmup'
53
- elif current_step < num_firstepoch_steps:
54
- print_label = 'Scheduler: Hold'
55
- else:
56
- print_label = 'Scheduler: Annealing'
57
-
58
- if print_label != last_print_label:
59
- print(print_label)
60
-
61
- last_print_label = print_label
62
-
63
- if current_step < num_warmup_steps:
64
- return float(current_step) / float(max(1, num_warmup_steps))
65
-
66
- if current_step < num_firstepoch_steps:
67
- return 1.0
68
-
69
- progress = float(current_step - num_firstepoch_steps) / float(max(1, num_training_steps - num_firstepoch_steps))
70
- num_cycles = 0.5
71
- return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
72
-
73
-
74
- def custom_cosine_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
75
- """
76
- Args:
77
- optimizer ([`~torch.optim.Optimizer`]):
78
- The optimizer for which to schedule the learning rate.
79
- num_warmup_steps (`int`):
80
- The number of steps for the warmup phase.
81
- num_training_steps (`int`):
82
- The total number of training steps.
83
- last_epoch (`int`, *optional*, defaults to -1):
84
- The index of the last epoch when resuming training.
85
-
86
- Return:
87
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
88
- """
89
-
90
- lr_lambda = partial(
91
- _get_fp_cosine_schedule_with_warmup_lr_lambda,
92
- num_warmup_steps=num_warmup_steps,
93
- num_training_steps=num_training_steps,
94
- num_firstepoch_steps = num_firstepoch_steps,
95
- )
96
- return LambdaLR(optimizer, lr_lambda, last_epoch)
97
-
98
- def custom_half_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
99
- """
100
- Args:
101
- optimizer ([`~torch.optim.Optimizer`]):
102
- The optimizer for which to schedule the learning rate.
103
- num_warmup_steps (`int`):
104
- The number of steps for the warmup phase.
105
- num_training_steps (`int`):
106
- The total number of training steps.
107
- last_epoch (`int`, *optional*, defaults to -1):
108
- The index of the last epoch when resuming training.
109
-
110
- Return:
111
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
112
- """
113
-
114
- lr_lambda = partial(
115
- _get_fp_half_schedule_with_warmup_lr_lambda,
116
- num_warmup_steps=num_warmup_steps,
117
- num_training_steps=num_training_steps,
118
- num_firstepoch_steps = num_firstepoch_steps,
119
- )
120
- return LambdaLR(optimizer, lr_lambda, last_epoch)
121
-
122
- class FPSchedulerTrainer(transformers.Trainer):
123
- def __init__(self, *args, **kwargs):
124
- super().__init__(*args, **kwargs)
125
-
126
- def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
127
- #Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.
128
-
129
- num_train_epochs = self.args.num_train_epochs
130
- num_warmup_steps=self.args.get_warmup_steps(num_training_steps)
131
- num_firstepoch_steps = math.ceil(num_training_steps/num_train_epochs)
132
- num_warmup_acc = num_warmup_steps*self.args.gradient_accumulation_steps
133
- num_firstepoch_steps_acc = num_firstepoch_steps*self.args.gradient_accumulation_steps
134
- num_training_steps_acc = num_training_steps*self.args.gradient_accumulation_steps
135
-
136
- print (f"Warm-up steps aligned to Gradient accumulation ({self.args.gradient_accumulation_steps}) = {num_warmup_acc} actual warmup steps")
137
- if self.args.lr_scheduler_type == 'cosine':
138
-
139
- num_warmup_acc_min = min(num_warmup_acc, num_firstepoch_steps_acc)
140
-
141
- if num_warmup_acc>num_firstepoch_steps_acc:
142
- print(f"\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to 1 epoch, essentially going from warmup to annealing.\033[0;37;0m")
143
- print (f"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}")
144
- else:
145
- print (f"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}")
146
-
147
- self.lr_scheduler = custom_cosine_scheduler_with_warmup(
148
- optimizer=self.optimizer if optimizer is None else optimizer,
149
- num_warmup_steps=num_warmup_steps,
150
- num_training_steps=num_training_steps,
151
- num_firstepoch_steps = num_firstepoch_steps,
152
- )
153
- self._created_lr_scheduler = True
154
- return self.lr_scheduler
155
- elif self.args.lr_scheduler_type == 'constant':
156
-
157
- half_step_acc = num_training_steps_acc//2
158
- num_warmup_acc_min = min(num_warmup_acc, half_step_acc)
159
-
160
- if num_warmup_acc>half_step_acc:
161
- print(f"\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to half of all epochs, essentially going from warmup to annealing in the middle.\033[0;37;0m")
162
- print (f"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}")
163
- else:
164
- print (f"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}")
165
-
166
- self.lr_scheduler = custom_half_scheduler_with_warmup(
167
- optimizer=self.optimizer if optimizer is None else optimizer,
168
- num_warmup_steps=num_warmup_steps,
169
- num_training_steps=num_training_steps,
170
- num_firstepoch_steps = num_firstepoch_steps,
171
- )
172
- self._created_lr_scheduler = True
173
- return self.lr_scheduler
174
- else:
175
- return super().create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anitha0531/SpeechtoText/app.py DELETED
@@ -1,116 +0,0 @@
1
- import torch
2
-
3
- import gradio as gr
4
- import pytube as pt
5
- from transformers import pipeline
6
-
7
- MODEL_NAME = "openai/whisper-large-v2"
8
- BATCH_SIZE = 8
9
-
10
- device = 0 if torch.cuda.is_available() else "cpu"
11
-
12
- pipe = pipeline(
13
- task="automatic-speech-recognition",
14
- model=MODEL_NAME,
15
- chunk_length_s=30,
16
- device=device,
17
- )
18
-
19
-
20
- all_special_ids = pipe.tokenizer.all_special_ids
21
- transcribe_token_id = all_special_ids[-5]
22
- translate_token_id = all_special_ids[-6]
23
-
24
-
25
- def transcribe(microphone, file_upload, task):
26
- warn_output = ""
27
- if (microphone is not None) and (file_upload is not None):
28
- warn_output = (
29
- "WARNING: You've uploaded an audio file and used the microphone. "
30
- "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
31
- )
32
-
33
- elif (microphone is None) and (file_upload is None):
34
- return "ERROR: You have to either use the microphone or upload an audio file"
35
-
36
- file = microphone if microphone is not None else file_upload
37
-
38
- pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
39
-
40
- textt = pipe(file, batch_size=BATCH_SIZE)["text"]
41
-
42
- with open('outt.txt', 'a+') as sw:
43
- sw.writelines(textt)
44
-
45
- return [textt,"outt.txt"]
46
-
47
-
48
- def _return_yt_html_embed(yt_url):
49
- video_id = yt_url.split("?v=")[-1]
50
- HTML_str = (
51
- f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
52
- " </center>"
53
- )
54
- return HTML_str
55
-
56
-
57
-
58
- def yt_transcribe(yt_url, task):
59
- yt = pt.YouTube(yt_url)
60
- html_embed_str = _return_yt_html_embed(yt_url)
61
- stream = yt.streams.filter(only_audio=True)[0]
62
- stream.download(filename="audio.mp3")
63
-
64
- pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
65
-
66
- text = pipe("audio.mp3", batch_size=BATCH_SIZE)["text"]
67
-
68
- return html_embed_str, text
69
- with open('outtt.txt', 'a+') as sw:
70
- sw.writelines(text)
71
-
72
- return [text,"outtt.txt"]
73
-
74
-
75
-
76
-
77
-
78
- demo = gr.Blocks()
79
- output_2 = gr.File(label="Download")
80
- output_3 = gr.File(label="Download")
81
- description = """This application displays transcribed text for given audio input <img src="https://i.ibb.co/J5DscKw/GVP-Womens.jpg" width=100px>"""
82
- mf_transcribe = gr.Interface(
83
- fn=transcribe,
84
- inputs=[
85
- gr.inputs.Audio(source="microphone", type="filepath", optional=True),
86
- gr.inputs.Audio(source="upload", type="filepath", optional=True),
87
-
88
- ],
89
- outputs=["text",output_2],
90
- layout="horizontal",
91
- theme="huggingface",
92
- title="Speech to Text Converter using OpenAI Whisper Model",
93
- description= description,
94
- allow_flagging="never",
95
- )
96
-
97
- yt_transcribe = gr.Interface(
98
- fn=yt_transcribe,
99
- inputs=[
100
- gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
101
-
102
- ],
103
- outputs=["text",output_3],
104
- layout="horizontal",
105
- theme="huggingface",
106
- title="Speech to Text Converter using OpenAI Whisper Model",
107
- description=(
108
- "Transcribe YouTube Videos to Text"
109
- ),
110
- allow_flagging="never",
111
- )
112
-
113
- with demo:
114
- gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"])
115
-
116
- demo.launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anthony7906/MengHuiMXD_GPT/readme/README_ja.md DELETED
@@ -1,126 +0,0 @@
1
- <div align="right">
2
- <!-- Language: -->
3
- <a title="Chinese" href="../README.md">简体中文</a> | <a title="English" href="README_en.md">English</a> | 日本語
4
- </div>
5
-
6
- <h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
7
- <div align="center">
8
- <a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
9
- <img src="https://user-images.githubusercontent.com/70903329/227087087-93b37d64-7dc3-4738-a518-c1cf05591c8a.png" alt="Logo" height="156">
10
- </a>
11
-
12
- <p align="center">
13
- <h3>ChatGPT/ChatGLM/LLaMAなどのLLMのための軽量でユーザーフレンドリーなWeb-UI</h3>
14
- <p align="center">
15
- <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/blob/main/LICENSE">
16
- <img alt="Tests Passing" src="https://img.shields.io/github/license/GaiZhenbiao/ChuanhuChatGPT" />
17
- </a>
18
- <a href="https://gradio.app/">
19
- <img alt="GitHub Contributors" src="https://img.shields.io/badge/Base-Gradio-fb7d1a?style=flat" />
20
- </a>
21
- <a href="https://t.me/tkdifferent">
22
- <img alt="GitHub pull requests" src="https://img.shields.io/badge/Telegram-Group-blue.svg?logo=telegram" />
23
- </a>
24
- <p>
25
- ストリーム出力/会話回数無制限/履歴保存/プリセットプロンプト/ファイルへの質問チャット<br>
26
- ウェブ検索/LaTeXレンダリング/表レンダリング/コードハイライト<br>
27
- オートダークモード/アダプティブ・ウェブ・インターフェイス/WeChatライク・テーマ<br />
28
- マルチパラメーターチューニング/マルチAPI-Key対応/マルチユーザー対応<br>
29
- GPT-4対応/LLMのローカルデプロイ可能。
30
- </p>
31
- <a href="https://www.youtube.com/watch?v=MtxS4XZWbJE"><strong>動画チュートリアル</strong></a>
32
- ·
33
- <a href="https://www.youtube.com/watch?v=77nw7iimYDE"><strong>2.0 イントロダクション</strong></a>
34
- ·
35
- <a href="https://www.youtube.com/watch?v=x-O1jjBqgu4"><strong>3.0 イントロダクション & チュートリアル</strong></a>
36
- ||
37
- <a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT"><strong>オンライントライアル</strong></a>
38
- ·
39
- <a href="https://huggingface.co/login?next=%2Fspaces%2FJohnSmith9982%2FChuanhuChatGPT%3Fduplicate%3Dtrue"><strong>ワンクリックデプロイ</strong></a>
40
- </p>
41
- <p align="center">
42
- <img alt="Animation Demo" src="https://user-images.githubusercontent.com/51039745/226255695-6b17ff1f-ea8d-464f-b69b-a7b6b68fffe8.gif" />
43
- </p>
44
- </p>
45
- </div>
46
-
47
- ## 使う上でのTips
48
-
49
- - ChatGPTをより適切に制御するために、システムプロンプトを使用できます。
50
- - プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。
51
- - 入力ボックスで改行するには、<kbd>Shift</kbd> + <kbd>Enter</kbd>キーを押してください。
52
- - 入力履歴を素早く切り替えるには、入力ボックスで <kbd>↑</kbd>と<kbd>↓</kbd>キーを押す。
53
- - プログラムをサーバにデプロイするには、プログラムの最終行を `demo.launch(server_name="0.0.0.0", server_port=<your port number>)`に変更します。
54
- - 共有リンクを取得するには、プログラムの最後の行を `demo.launch(share=True)` に変更してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。
55
- - Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。
56
-
57
- ## インストール
58
-
59
- ```shell
60
- git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
61
- cd ChuanhuChatGPT
62
- pip install -r requirements.txt
63
- ```
64
-
65
- 次に `config_example.json`をコピーして `config.json`にリネームし、そのファイルにAPI-Keyなどの設定を記入する。
66
-
67
- ```shell
68
- python ChuanhuChatbot.py
69
- ```
70
-
71
- ブラウザのウィンドウが開き、ChatGPTとチャットできるようになります。
72
-
73
- > **Note**
74
- >
75
- > 詳しい手順は[wikiページ](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程)をご確認ください。
76
-
77
- ## トラブルシューティング
78
-
79
- 問題が発生した場合は、まずこのプロジェクトの最新の変更点を手動で引っ張ってみるのがよいでしょう。その手順は以下の通りです:
80
-
81
- 1. ウェブページの `Download ZIP` をクリックして最新のコードアーカイブをダウンロードするか、または
82
- ```shell
83
- git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
84
- ```
85
- 2. 新しい依存関係が導入されている可能性があるた��、依存関係を再度インストールしてみてください。
86
- ```
87
- pip install -r requirements.txt
88
- ```
89
- 3. Gradioを更新
90
- ```
91
- pip install gradio --upgrade --force-reinstall
92
- ```
93
-
94
- 一般的に、以下の手順でほとんどの問題を解決することができます。
95
-
96
- それでも問題が解決しない場合は、こちらのページをご参照ください: [よくある質問(FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)
97
-
98
- このページでは、考えられるほぼすべての問題点と解決策を掲載しています。よくお読みください。
99
-
100
- ## More Information
101
-
102
- より詳細な情報は、[wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki) をご覧ください。:
103
-
104
- - [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization)
105
- - [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)
106
- - [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目)
107
- - [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志)
108
- - [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可)
109
-
110
- ## Starchart
111
-
112
- [![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
113
-
114
- ## Contributors
115
-
116
- <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/graphs/contributors">
117
- <img src="https://contrib.rocks/image?repo=GaiZhenbiao/ChuanhuChatGPT" />
118
- </a>
119
-
120
- ## Sponsor
121
-
122
- 🐯 この企画が役に立ったら、遠慮なくコーラかコーヒーでもおごってください〜。
123
-
124
- <a href="https://www.buymeacoffee.com/ChuanhuChat" ><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a coffee&emoji=&slug=ChuanhuChat&button_colour=219d53&font_colour=ffffff&font_family=Poppins&outline_colour=ffffff&coffee_colour=FFDD00" alt="Buy Me A Coffee" width="250"></a>
125
-
126
- <img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/nono/.github/ISSUE_TEMPLATE/bug.md DELETED
@@ -1,47 +0,0 @@
1
- ---
2
- name: Bug
3
- about: Report a bug
4
- title: '[Bug]'
5
- labels: 'bug'
6
-
7
- ---
8
-
9
- ## Description
10
-
11
- A concise description of the bug and how to reproduce it.
12
-
13
- ## Error
14
-
15
- Paste the error or exception from your console:
16
-
17
- ```
18
-
19
- ```
20
-
21
- ## Details
22
-
23
- What operating system are you using?
24
-
25
- - [ ] Windows
26
- - [ ] MacOS (Apple Silicon)
27
- - [ ] MacOS (Apple Legacy)
28
- - [ ] Linux
29
- - [ ] Linux in WSL
30
-
31
- What execution provider are you using?
32
-
33
- - [ ] CPU
34
- - [ ] CUDA
35
- - [ ] CoreML
36
- - [ ] DirectML
37
- - [ ] OpenVINO
38
- - [ ] Other
39
-
40
- What version of Roop are you using?
41
-
42
- - [ ] 1.0.0
43
- - [ ] 1.1.0
44
- - [ ] 1.2.0
45
- - [ ] 1.3.0
46
- - [ ] 1.3.1
47
- - [ ] next
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/ordered_set.py DELETED
@@ -1,488 +0,0 @@
1
- """
2
- An OrderedSet is a custom MutableSet that remembers its order, so that every
3
- entry has an index that can be looked up.
4
-
5
- Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
6
- and released under the MIT license.
7
- """
8
- import itertools as it
9
- from collections import deque
10
-
11
- try:
12
- # Python 3
13
- from collections.abc import MutableSet, Sequence
14
- except ImportError:
15
- # Python 2.7
16
- from collections import MutableSet, Sequence
17
-
18
- SLICE_ALL = slice(None)
19
- __version__ = "3.1"
20
-
21
-
22
- def is_iterable(obj):
23
- """
24
- Are we being asked to look up a list of things, instead of a single thing?
25
- We check for the `__iter__` attribute so that this can cover types that
26
- don't have to be known by this module, such as NumPy arrays.
27
-
28
- Strings, however, should be considered as atomic values to look up, not
29
- iterables. The same goes for tuples, since they are immutable and therefore
30
- valid entries.
31
-
32
- We don't need to check for the Python 2 `unicode` type, because it doesn't
33
- have an `__iter__` attribute anyway.
34
- """
35
- return (
36
- hasattr(obj, "__iter__")
37
- and not isinstance(obj, str)
38
- and not isinstance(obj, tuple)
39
- )
40
-
41
-
42
- class OrderedSet(MutableSet, Sequence):
43
- """
44
- An OrderedSet is a custom MutableSet that remembers its order, so that
45
- every entry has an index that can be looked up.
46
-
47
- Example:
48
- >>> OrderedSet([1, 1, 2, 3, 2])
49
- OrderedSet([1, 2, 3])
50
- """
51
-
52
- def __init__(self, iterable=None):
53
- self.items = []
54
- self.map = {}
55
- if iterable is not None:
56
- self |= iterable
57
-
58
- def __len__(self):
59
- """
60
- Returns the number of unique elements in the ordered set
61
-
62
- Example:
63
- >>> len(OrderedSet([]))
64
- 0
65
- >>> len(OrderedSet([1, 2]))
66
- 2
67
- """
68
- return len(self.items)
69
-
70
- def __getitem__(self, index):
71
- """
72
- Get the item at a given index.
73
-
74
- If `index` is a slice, you will get back that slice of items, as a
75
- new OrderedSet.
76
-
77
- If `index` is a list or a similar iterable, you'll get a list of
78
- items corresponding to those indices. This is similar to NumPy's
79
- "fancy indexing". The result is not an OrderedSet because you may ask
80
- for duplicate indices, and the number of elements returned should be
81
- the number of elements asked for.
82
-
83
- Example:
84
- >>> oset = OrderedSet([1, 2, 3])
85
- >>> oset[1]
86
- 2
87
- """
88
- if isinstance(index, slice) and index == SLICE_ALL:
89
- return self.copy()
90
- elif is_iterable(index):
91
- return [self.items[i] for i in index]
92
- elif hasattr(index, "__index__") or isinstance(index, slice):
93
- result = self.items[index]
94
- if isinstance(result, list):
95
- return self.__class__(result)
96
- else:
97
- return result
98
- else:
99
- raise TypeError("Don't know how to index an OrderedSet by %r" % index)
100
-
101
- def copy(self):
102
- """
103
- Return a shallow copy of this object.
104
-
105
- Example:
106
- >>> this = OrderedSet([1, 2, 3])
107
- >>> other = this.copy()
108
- >>> this == other
109
- True
110
- >>> this is other
111
- False
112
- """
113
- return self.__class__(self)
114
-
115
- def __getstate__(self):
116
- if len(self) == 0:
117
- # The state can't be an empty list.
118
- # We need to return a truthy value, or else __setstate__ won't be run.
119
- #
120
- # This could have been done more gracefully by always putting the state
121
- # in a tuple, but this way is backwards- and forwards- compatible with
122
- # previous versions of OrderedSet.
123
- return (None,)
124
- else:
125
- return list(self)
126
-
127
- def __setstate__(self, state):
128
- if state == (None,):
129
- self.__init__([])
130
- else:
131
- self.__init__(state)
132
-
133
- def __contains__(self, key):
134
- """
135
- Test if the item is in this ordered set
136
-
137
- Example:
138
- >>> 1 in OrderedSet([1, 3, 2])
139
- True
140
- >>> 5 in OrderedSet([1, 3, 2])
141
- False
142
- """
143
- return key in self.map
144
-
145
- def add(self, key):
146
- """
147
- Add `key` as an item to this OrderedSet, then return its index.
148
-
149
- If `key` is already in the OrderedSet, return the index it already
150
- had.
151
-
152
- Example:
153
- >>> oset = OrderedSet()
154
- >>> oset.append(3)
155
- 0
156
- >>> print(oset)
157
- OrderedSet([3])
158
- """
159
- if key not in self.map:
160
- self.map[key] = len(self.items)
161
- self.items.append(key)
162
- return self.map[key]
163
-
164
- append = add
165
-
166
- def update(self, sequence):
167
- """
168
- Update the set with the given iterable sequence, then return the index
169
- of the last element inserted.
170
-
171
- Example:
172
- >>> oset = OrderedSet([1, 2, 3])
173
- >>> oset.update([3, 1, 5, 1, 4])
174
- 4
175
- >>> print(oset)
176
- OrderedSet([1, 2, 3, 5, 4])
177
- """
178
- item_index = None
179
- try:
180
- for item in sequence:
181
- item_index = self.add(item)
182
- except TypeError:
183
- raise ValueError(
184
- "Argument needs to be an iterable, got %s" % type(sequence)
185
- )
186
- return item_index
187
-
188
- def index(self, key):
189
- """
190
- Get the index of a given entry, raising an IndexError if it's not
191
- present.
192
-
193
- `key` can be an iterable of entries that is not a string, in which case
194
- this returns a list of indices.
195
-
196
- Example:
197
- >>> oset = OrderedSet([1, 2, 3])
198
- >>> oset.index(2)
199
- 1
200
- """
201
- if is_iterable(key):
202
- return [self.index(subkey) for subkey in key]
203
- return self.map[key]
204
-
205
- # Provide some compatibility with pd.Index
206
- get_loc = index
207
- get_indexer = index
208
-
209
- def pop(self):
210
- """
211
- Remove and return the last element from the set.
212
-
213
- Raises KeyError if the set is empty.
214
-
215
- Example:
216
- >>> oset = OrderedSet([1, 2, 3])
217
- >>> oset.pop()
218
- 3
219
- """
220
- if not self.items:
221
- raise KeyError("Set is empty")
222
-
223
- elem = self.items[-1]
224
- del self.items[-1]
225
- del self.map[elem]
226
- return elem
227
-
228
- def discard(self, key):
229
- """
230
- Remove an element. Do not raise an exception if absent.
231
-
232
- The MutableSet mixin uses this to implement the .remove() method, which
233
- *does* raise an error when asked to remove a non-existent item.
234
-
235
- Example:
236
- >>> oset = OrderedSet([1, 2, 3])
237
- >>> oset.discard(2)
238
- >>> print(oset)
239
- OrderedSet([1, 3])
240
- >>> oset.discard(2)
241
- >>> print(oset)
242
- OrderedSet([1, 3])
243
- """
244
- if key in self:
245
- i = self.map[key]
246
- del self.items[i]
247
- del self.map[key]
248
- for k, v in self.map.items():
249
- if v >= i:
250
- self.map[k] = v - 1
251
-
252
- def clear(self):
253
- """
254
- Remove all items from this OrderedSet.
255
- """
256
- del self.items[:]
257
- self.map.clear()
258
-
259
- def __iter__(self):
260
- """
261
- Example:
262
- >>> list(iter(OrderedSet([1, 2, 3])))
263
- [1, 2, 3]
264
- """
265
- return iter(self.items)
266
-
267
- def __reversed__(self):
268
- """
269
- Example:
270
- >>> list(reversed(OrderedSet([1, 2, 3])))
271
- [3, 2, 1]
272
- """
273
- return reversed(self.items)
274
-
275
- def __repr__(self):
276
- if not self:
277
- return "%s()" % (self.__class__.__name__,)
278
- return "%s(%r)" % (self.__class__.__name__, list(self))
279
-
280
- def __eq__(self, other):
281
- """
282
- Returns true if the containers have the same items. If `other` is a
283
- Sequence, then order is checked, otherwise it is ignored.
284
-
285
- Example:
286
- >>> oset = OrderedSet([1, 3, 2])
287
- >>> oset == [1, 3, 2]
288
- True
289
- >>> oset == [1, 2, 3]
290
- False
291
- >>> oset == [2, 3]
292
- False
293
- >>> oset == OrderedSet([3, 2, 1])
294
- False
295
- """
296
- # In Python 2 deque is not a Sequence, so treat it as one for
297
- # consistent behavior with Python 3.
298
- if isinstance(other, (Sequence, deque)):
299
- # Check that this OrderedSet contains the same elements, in the
300
- # same order, as the other object.
301
- return list(self) == list(other)
302
- try:
303
- other_as_set = set(other)
304
- except TypeError:
305
- # If `other` can't be converted into a set, it's not equal.
306
- return False
307
- else:
308
- return set(self) == other_as_set
309
-
310
- def union(self, *sets):
311
- """
312
- Combines all unique items.
313
- Each items order is defined by its first appearance.
314
-
315
- Example:
316
- >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0])
317
- >>> print(oset)
318
- OrderedSet([3, 1, 4, 5, 2, 0])
319
- >>> oset.union([8, 9])
320
- OrderedSet([3, 1, 4, 5, 2, 0, 8, 9])
321
- >>> oset | {10}
322
- OrderedSet([3, 1, 4, 5, 2, 0, 10])
323
- """
324
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
325
- containers = map(list, it.chain([self], sets))
326
- items = it.chain.from_iterable(containers)
327
- return cls(items)
328
-
329
- def __and__(self, other):
330
- # the parent implementation of this is backwards
331
- return self.intersection(other)
332
-
333
- def intersection(self, *sets):
334
- """
335
- Returns elements in common between all sets. Order is defined only
336
- by the first set.
337
-
338
- Example:
339
- >>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3])
340
- >>> print(oset)
341
- OrderedSet([1, 2, 3])
342
- >>> oset.intersection([2, 4, 5], [1, 2, 3, 4])
343
- OrderedSet([2])
344
- >>> oset.intersection()
345
- OrderedSet([1, 2, 3])
346
- """
347
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
348
- if sets:
349
- common = set.intersection(*map(set, sets))
350
- items = (item for item in self if item in common)
351
- else:
352
- items = self
353
- return cls(items)
354
-
355
- def difference(self, *sets):
356
- """
357
- Returns all elements that are in this set but not the others.
358
-
359
- Example:
360
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]))
361
- OrderedSet([1, 3])
362
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3]))
363
- OrderedSet([1])
364
- >>> OrderedSet([1, 2, 3]) - OrderedSet([2])
365
- OrderedSet([1, 3])
366
- >>> OrderedSet([1, 2, 3]).difference()
367
- OrderedSet([1, 2, 3])
368
- """
369
- cls = self.__class__
370
- if sets:
371
- other = set.union(*map(set, sets))
372
- items = (item for item in self if item not in other)
373
- else:
374
- items = self
375
- return cls(items)
376
-
377
- def issubset(self, other):
378
- """
379
- Report whether another set contains this set.
380
-
381
- Example:
382
- >>> OrderedSet([1, 2, 3]).issubset({1, 2})
383
- False
384
- >>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4})
385
- True
386
- >>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5})
387
- False
388
- """
389
- if len(self) > len(other): # Fast check for obvious cases
390
- return False
391
- return all(item in other for item in self)
392
-
393
- def issuperset(self, other):
394
- """
395
- Report whether this set contains another set.
396
-
397
- Example:
398
- >>> OrderedSet([1, 2]).issuperset([1, 2, 3])
399
- False
400
- >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3})
401
- True
402
- >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3})
403
- False
404
- """
405
- if len(self) < len(other): # Fast check for obvious cases
406
- return False
407
- return all(item in self for item in other)
408
-
409
- def symmetric_difference(self, other):
410
- """
411
- Return the symmetric difference of two OrderedSets as a new set.
412
- That is, the new set will contain all elements that are in exactly
413
- one of the sets.
414
-
415
- Their order will be preserved, with elements from `self` preceding
416
- elements from `other`.
417
-
418
- Example:
419
- >>> this = OrderedSet([1, 4, 3, 5, 7])
420
- >>> other = OrderedSet([9, 7, 1, 3, 2])
421
- >>> this.symmetric_difference(other)
422
- OrderedSet([4, 5, 9, 2])
423
- """
424
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
425
- diff1 = cls(self).difference(other)
426
- diff2 = cls(other).difference(self)
427
- return diff1.union(diff2)
428
-
429
- def _update_items(self, items):
430
- """
431
- Replace the 'items' list of this OrderedSet with a new one, updating
432
- self.map accordingly.
433
- """
434
- self.items = items
435
- self.map = {item: idx for (idx, item) in enumerate(items)}
436
-
437
- def difference_update(self, *sets):
438
- """
439
- Update this OrderedSet to remove items from one or more other sets.
440
-
441
- Example:
442
- >>> this = OrderedSet([1, 2, 3])
443
- >>> this.difference_update(OrderedSet([2, 4]))
444
- >>> print(this)
445
- OrderedSet([1, 3])
446
-
447
- >>> this = OrderedSet([1, 2, 3, 4, 5])
448
- >>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6]))
449
- >>> print(this)
450
- OrderedSet([3, 5])
451
- """
452
- items_to_remove = set()
453
- for other in sets:
454
- items_to_remove |= set(other)
455
- self._update_items([item for item in self.items if item not in items_to_remove])
456
-
457
- def intersection_update(self, other):
458
- """
459
- Update this OrderedSet to keep only items in another set, preserving
460
- their order in this set.
461
-
462
- Example:
463
- >>> this = OrderedSet([1, 4, 3, 5, 7])
464
- >>> other = OrderedSet([9, 7, 1, 3, 2])
465
- >>> this.intersection_update(other)
466
- >>> print(this)
467
- OrderedSet([1, 3, 7])
468
- """
469
- other = set(other)
470
- self._update_items([item for item in self.items if item in other])
471
-
472
- def symmetric_difference_update(self, other):
473
- """
474
- Update this OrderedSet to remove items from another set, then
475
- add items from the other set that were not present in this set.
476
-
477
- Example:
478
- >>> this = OrderedSet([1, 4, 3, 5, 7])
479
- >>> other = OrderedSet([9, 7, 1, 3, 2])
480
- >>> this.symmetric_difference_update(other)
481
- >>> print(this)
482
- OrderedSet([4, 5, 9, 2])
483
- """
484
- items_to_add = [item for item in other if item not in self]
485
- items_to_remove = set(other)
486
- self._update_items(
487
- [item for item in self.items if item not in items_to_remove] + items_to_add
488
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/specifiers.py DELETED
@@ -1,802 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- import abc
6
- import functools
7
- import itertools
8
- import re
9
- import warnings
10
- from typing import (
11
- Callable,
12
- Dict,
13
- Iterable,
14
- Iterator,
15
- List,
16
- Optional,
17
- Pattern,
18
- Set,
19
- Tuple,
20
- TypeVar,
21
- Union,
22
- )
23
-
24
- from .utils import canonicalize_version
25
- from .version import LegacyVersion, Version, parse
26
-
27
- ParsedVersion = Union[Version, LegacyVersion]
28
- UnparsedVersion = Union[Version, LegacyVersion, str]
29
- VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
30
- CallableOperator = Callable[[ParsedVersion, str], bool]
31
-
32
-
33
- class InvalidSpecifier(ValueError):
34
- """
35
- An invalid specifier was found, users should refer to PEP 440.
36
- """
37
-
38
-
39
- class BaseSpecifier(metaclass=abc.ABCMeta):
40
- @abc.abstractmethod
41
- def __str__(self) -> str:
42
- """
43
- Returns the str representation of this Specifier like object. This
44
- should be representative of the Specifier itself.
45
- """
46
-
47
- @abc.abstractmethod
48
- def __hash__(self) -> int:
49
- """
50
- Returns a hash value for this Specifier like object.
51
- """
52
-
53
- @abc.abstractmethod
54
- def __eq__(self, other: object) -> bool:
55
- """
56
- Returns a boolean representing whether or not the two Specifier like
57
- objects are equal.
58
- """
59
-
60
- @abc.abstractproperty
61
- def prereleases(self) -> Optional[bool]:
62
- """
63
- Returns whether or not pre-releases as a whole are allowed by this
64
- specifier.
65
- """
66
-
67
- @prereleases.setter
68
- def prereleases(self, value: bool) -> None:
69
- """
70
- Sets whether or not pre-releases as a whole are allowed by this
71
- specifier.
72
- """
73
-
74
- @abc.abstractmethod
75
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
76
- """
77
- Determines if the given item is contained within this specifier.
78
- """
79
-
80
- @abc.abstractmethod
81
- def filter(
82
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
83
- ) -> Iterable[VersionTypeVar]:
84
- """
85
- Takes an iterable of items and filters them so that only items which
86
- are contained within this specifier are allowed in it.
87
- """
88
-
89
-
90
- class _IndividualSpecifier(BaseSpecifier):
91
-
92
- _operators: Dict[str, str] = {}
93
- _regex: Pattern[str]
94
-
95
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
96
- match = self._regex.search(spec)
97
- if not match:
98
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
99
-
100
- self._spec: Tuple[str, str] = (
101
- match.group("operator").strip(),
102
- match.group("version").strip(),
103
- )
104
-
105
- # Store whether or not this Specifier should accept prereleases
106
- self._prereleases = prereleases
107
-
108
- def __repr__(self) -> str:
109
- pre = (
110
- f", prereleases={self.prereleases!r}"
111
- if self._prereleases is not None
112
- else ""
113
- )
114
-
115
- return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
116
-
117
- def __str__(self) -> str:
118
- return "{}{}".format(*self._spec)
119
-
120
- @property
121
- def _canonical_spec(self) -> Tuple[str, str]:
122
- return self._spec[0], canonicalize_version(self._spec[1])
123
-
124
- def __hash__(self) -> int:
125
- return hash(self._canonical_spec)
126
-
127
- def __eq__(self, other: object) -> bool:
128
- if isinstance(other, str):
129
- try:
130
- other = self.__class__(str(other))
131
- except InvalidSpecifier:
132
- return NotImplemented
133
- elif not isinstance(other, self.__class__):
134
- return NotImplemented
135
-
136
- return self._canonical_spec == other._canonical_spec
137
-
138
- def _get_operator(self, op: str) -> CallableOperator:
139
- operator_callable: CallableOperator = getattr(
140
- self, f"_compare_{self._operators[op]}"
141
- )
142
- return operator_callable
143
-
144
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
145
- if not isinstance(version, (LegacyVersion, Version)):
146
- version = parse(version)
147
- return version
148
-
149
- @property
150
- def operator(self) -> str:
151
- return self._spec[0]
152
-
153
- @property
154
- def version(self) -> str:
155
- return self._spec[1]
156
-
157
- @property
158
- def prereleases(self) -> Optional[bool]:
159
- return self._prereleases
160
-
161
- @prereleases.setter
162
- def prereleases(self, value: bool) -> None:
163
- self._prereleases = value
164
-
165
- def __contains__(self, item: str) -> bool:
166
- return self.contains(item)
167
-
168
- def contains(
169
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
170
- ) -> bool:
171
-
172
- # Determine if prereleases are to be allowed or not.
173
- if prereleases is None:
174
- prereleases = self.prereleases
175
-
176
- # Normalize item to a Version or LegacyVersion, this allows us to have
177
- # a shortcut for ``"2.0" in Specifier(">=2")
178
- normalized_item = self._coerce_version(item)
179
-
180
- # Determine if we should be supporting prereleases in this specifier
181
- # or not, if we do not support prereleases than we can short circuit
182
- # logic if this version is a prereleases.
183
- if normalized_item.is_prerelease and not prereleases:
184
- return False
185
-
186
- # Actually do the comparison to determine if this item is contained
187
- # within this Specifier or not.
188
- operator_callable: CallableOperator = self._get_operator(self.operator)
189
- return operator_callable(normalized_item, self.version)
190
-
191
- def filter(
192
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
193
- ) -> Iterable[VersionTypeVar]:
194
-
195
- yielded = False
196
- found_prereleases = []
197
-
198
- kw = {"prereleases": prereleases if prereleases is not None else True}
199
-
200
- # Attempt to iterate over all the values in the iterable and if any of
201
- # them match, yield them.
202
- for version in iterable:
203
- parsed_version = self._coerce_version(version)
204
-
205
- if self.contains(parsed_version, **kw):
206
- # If our version is a prerelease, and we were not set to allow
207
- # prereleases, then we'll store it for later in case nothing
208
- # else matches this specifier.
209
- if parsed_version.is_prerelease and not (
210
- prereleases or self.prereleases
211
- ):
212
- found_prereleases.append(version)
213
- # Either this is not a prerelease, or we should have been
214
- # accepting prereleases from the beginning.
215
- else:
216
- yielded = True
217
- yield version
218
-
219
- # Now that we've iterated over everything, determine if we've yielded
220
- # any values, and if we have not and we have any prereleases stored up
221
- # then we will go ahead and yield the prereleases.
222
- if not yielded and found_prereleases:
223
- for version in found_prereleases:
224
- yield version
225
-
226
-
227
- class LegacySpecifier(_IndividualSpecifier):
228
-
229
- _regex_str = r"""
230
- (?P<operator>(==|!=|<=|>=|<|>))
231
- \s*
232
- (?P<version>
233
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
234
- # string can be just about anything, we match everything
235
- # except for whitespace, a semi-colon for marker support,
236
- # a closing paren since versions can be enclosed in
237
- # them, and a comma since it's a version separator.
238
- )
239
- """
240
-
241
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
242
-
243
- _operators = {
244
- "==": "equal",
245
- "!=": "not_equal",
246
- "<=": "less_than_equal",
247
- ">=": "greater_than_equal",
248
- "<": "less_than",
249
- ">": "greater_than",
250
- }
251
-
252
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
253
- super().__init__(spec, prereleases)
254
-
255
- warnings.warn(
256
- "Creating a LegacyVersion has been deprecated and will be "
257
- "removed in the next major release",
258
- DeprecationWarning,
259
- )
260
-
261
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
262
- if not isinstance(version, LegacyVersion):
263
- version = LegacyVersion(str(version))
264
- return version
265
-
266
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
267
- return prospective == self._coerce_version(spec)
268
-
269
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
270
- return prospective != self._coerce_version(spec)
271
-
272
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
273
- return prospective <= self._coerce_version(spec)
274
-
275
- def _compare_greater_than_equal(
276
- self, prospective: LegacyVersion, spec: str
277
- ) -> bool:
278
- return prospective >= self._coerce_version(spec)
279
-
280
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
281
- return prospective < self._coerce_version(spec)
282
-
283
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
284
- return prospective > self._coerce_version(spec)
285
-
286
-
287
- def _require_version_compare(
288
- fn: Callable[["Specifier", ParsedVersion, str], bool]
289
- ) -> Callable[["Specifier", ParsedVersion, str], bool]:
290
- @functools.wraps(fn)
291
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
292
- if not isinstance(prospective, Version):
293
- return False
294
- return fn(self, prospective, spec)
295
-
296
- return wrapped
297
-
298
-
299
- class Specifier(_IndividualSpecifier):
300
-
301
- _regex_str = r"""
302
- (?P<operator>(~=|==|!=|<=|>=|<|>|===))
303
- (?P<version>
304
- (?:
305
- # The identity operators allow for an escape hatch that will
306
- # do an exact string match of the version you wish to install.
307
- # This will not be parsed by PEP 440 and we cannot determine
308
- # any semantic meaning from it. This operator is discouraged
309
- # but included entirely as an escape hatch.
310
- (?<====) # Only match for the identity operator
311
- \s*
312
- [^\s]* # We just match everything, except for whitespace
313
- # since we are only testing for strict identity.
314
- )
315
- |
316
- (?:
317
- # The (non)equality operators allow for wild card and local
318
- # versions to be specified so we have to define these two
319
- # operators separately to enable that.
320
- (?<===|!=) # Only match for equals and not equals
321
-
322
- \s*
323
- v?
324
- (?:[0-9]+!)? # epoch
325
- [0-9]+(?:\.[0-9]+)* # release
326
- (?: # pre release
327
- [-_\.]?
328
- (a|b|c|rc|alpha|beta|pre|preview)
329
- [-_\.]?
330
- [0-9]*
331
- )?
332
- (?: # post release
333
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
334
- )?
335
-
336
- # You cannot use a wild card and a dev or local version
337
- # together so group them with a | and make them optional.
338
- (?:
339
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
340
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
341
- |
342
- \.\* # Wild card syntax of .*
343
- )?
344
- )
345
- |
346
- (?:
347
- # The compatible operator requires at least two digits in the
348
- # release segment.
349
- (?<=~=) # Only match for the compatible operator
350
-
351
- \s*
352
- v?
353
- (?:[0-9]+!)? # epoch
354
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
355
- (?: # pre release
356
- [-_\.]?
357
- (a|b|c|rc|alpha|beta|pre|preview)
358
- [-_\.]?
359
- [0-9]*
360
- )?
361
- (?: # post release
362
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
363
- )?
364
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
365
- )
366
- |
367
- (?:
368
- # All other operators only allow a sub set of what the
369
- # (non)equality operators do. Specifically they do not allow
370
- # local versions to be specified nor do they allow the prefix
371
- # matching wild cards.
372
- (?<!==|!=|~=) # We have special cases for these
373
- # operators so we want to make sure they
374
- # don't match here.
375
-
376
- \s*
377
- v?
378
- (?:[0-9]+!)? # epoch
379
- [0-9]+(?:\.[0-9]+)* # release
380
- (?: # pre release
381
- [-_\.]?
382
- (a|b|c|rc|alpha|beta|pre|preview)
383
- [-_\.]?
384
- [0-9]*
385
- )?
386
- (?: # post release
387
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
388
- )?
389
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
390
- )
391
- )
392
- """
393
-
394
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
395
-
396
- _operators = {
397
- "~=": "compatible",
398
- "==": "equal",
399
- "!=": "not_equal",
400
- "<=": "less_than_equal",
401
- ">=": "greater_than_equal",
402
- "<": "less_than",
403
- ">": "greater_than",
404
- "===": "arbitrary",
405
- }
406
-
407
- @_require_version_compare
408
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
409
-
410
- # Compatible releases have an equivalent combination of >= and ==. That
411
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
412
- # implement this in terms of the other specifiers instead of
413
- # implementing it ourselves. The only thing we need to do is construct
414
- # the other specifiers.
415
-
416
- # We want everything but the last item in the version, but we want to
417
- # ignore suffix segments.
418
- prefix = ".".join(
419
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
420
- )
421
-
422
- # Add the prefix notation to the end of our string
423
- prefix += ".*"
424
-
425
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
426
- prospective, prefix
427
- )
428
-
429
- @_require_version_compare
430
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
431
-
432
- # We need special logic to handle prefix matching
433
- if spec.endswith(".*"):
434
- # In the case of prefix matching we want to ignore local segment.
435
- prospective = Version(prospective.public)
436
- # Split the spec out by dots, and pretend that there is an implicit
437
- # dot in between a release segment and a pre-release segment.
438
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
439
-
440
- # Split the prospective version out by dots, and pretend that there
441
- # is an implicit dot in between a release segment and a pre-release
442
- # segment.
443
- split_prospective = _version_split(str(prospective))
444
-
445
- # Shorten the prospective version to be the same length as the spec
446
- # so that we can determine if the specifier is a prefix of the
447
- # prospective version or not.
448
- shortened_prospective = split_prospective[: len(split_spec)]
449
-
450
- # Pad out our two sides with zeros so that they both equal the same
451
- # length.
452
- padded_spec, padded_prospective = _pad_version(
453
- split_spec, shortened_prospective
454
- )
455
-
456
- return padded_prospective == padded_spec
457
- else:
458
- # Convert our spec string into a Version
459
- spec_version = Version(spec)
460
-
461
- # If the specifier does not have a local segment, then we want to
462
- # act as if the prospective version also does not have a local
463
- # segment.
464
- if not spec_version.local:
465
- prospective = Version(prospective.public)
466
-
467
- return prospective == spec_version
468
-
469
- @_require_version_compare
470
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
471
- return not self._compare_equal(prospective, spec)
472
-
473
- @_require_version_compare
474
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
475
-
476
- # NB: Local version identifiers are NOT permitted in the version
477
- # specifier, so local version labels can be universally removed from
478
- # the prospective version.
479
- return Version(prospective.public) <= Version(spec)
480
-
481
- @_require_version_compare
482
- def _compare_greater_than_equal(
483
- self, prospective: ParsedVersion, spec: str
484
- ) -> bool:
485
-
486
- # NB: Local version identifiers are NOT permitted in the version
487
- # specifier, so local version labels can be universally removed from
488
- # the prospective version.
489
- return Version(prospective.public) >= Version(spec)
490
-
491
- @_require_version_compare
492
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
493
-
494
- # Convert our spec to a Version instance, since we'll want to work with
495
- # it as a version.
496
- spec = Version(spec_str)
497
-
498
- # Check to see if the prospective version is less than the spec
499
- # version. If it's not we can short circuit and just return False now
500
- # instead of doing extra unneeded work.
501
- if not prospective < spec:
502
- return False
503
-
504
- # This special case is here so that, unless the specifier itself
505
- # includes is a pre-release version, that we do not accept pre-release
506
- # versions for the version mentioned in the specifier (e.g. <3.1 should
507
- # not match 3.1.dev0, but should match 3.0.dev0).
508
- if not spec.is_prerelease and prospective.is_prerelease:
509
- if Version(prospective.base_version) == Version(spec.base_version):
510
- return False
511
-
512
- # If we've gotten to here, it means that prospective version is both
513
- # less than the spec version *and* it's not a pre-release of the same
514
- # version in the spec.
515
- return True
516
-
517
- @_require_version_compare
518
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
519
-
520
- # Convert our spec to a Version instance, since we'll want to work with
521
- # it as a version.
522
- spec = Version(spec_str)
523
-
524
- # Check to see if the prospective version is greater than the spec
525
- # version. If it's not we can short circuit and just return False now
526
- # instead of doing extra unneeded work.
527
- if not prospective > spec:
528
- return False
529
-
530
- # This special case is here so that, unless the specifier itself
531
- # includes is a post-release version, that we do not accept
532
- # post-release versions for the version mentioned in the specifier
533
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
534
- if not spec.is_postrelease and prospective.is_postrelease:
535
- if Version(prospective.base_version) == Version(spec.base_version):
536
- return False
537
-
538
- # Ensure that we do not allow a local version of the version mentioned
539
- # in the specifier, which is technically greater than, to match.
540
- if prospective.local is not None:
541
- if Version(prospective.base_version) == Version(spec.base_version):
542
- return False
543
-
544
- # If we've gotten to here, it means that prospective version is both
545
- # greater than the spec version *and* it's not a pre-release of the
546
- # same version in the spec.
547
- return True
548
-
549
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
550
- return str(prospective).lower() == str(spec).lower()
551
-
552
- @property
553
- def prereleases(self) -> bool:
554
-
555
- # If there is an explicit prereleases set for this, then we'll just
556
- # blindly use that.
557
- if self._prereleases is not None:
558
- return self._prereleases
559
-
560
- # Look at all of our specifiers and determine if they are inclusive
561
- # operators, and if they are if they are including an explicit
562
- # prerelease.
563
- operator, version = self._spec
564
- if operator in ["==", ">=", "<=", "~=", "==="]:
565
- # The == specifier can include a trailing .*, if it does we
566
- # want to remove before parsing.
567
- if operator == "==" and version.endswith(".*"):
568
- version = version[:-2]
569
-
570
- # Parse the version, and if it is a pre-release than this
571
- # specifier allows pre-releases.
572
- if parse(version).is_prerelease:
573
- return True
574
-
575
- return False
576
-
577
- @prereleases.setter
578
- def prereleases(self, value: bool) -> None:
579
- self._prereleases = value
580
-
581
-
582
- _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
583
-
584
-
585
- def _version_split(version: str) -> List[str]:
586
- result: List[str] = []
587
- for item in version.split("."):
588
- match = _prefix_regex.search(item)
589
- if match:
590
- result.extend(match.groups())
591
- else:
592
- result.append(item)
593
- return result
594
-
595
-
596
- def _is_not_suffix(segment: str) -> bool:
597
- return not any(
598
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
599
- )
600
-
601
-
602
- def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
603
- left_split, right_split = [], []
604
-
605
- # Get the release segment of our versions
606
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
607
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
608
-
609
- # Get the rest of our versions
610
- left_split.append(left[len(left_split[0]) :])
611
- right_split.append(right[len(right_split[0]) :])
612
-
613
- # Insert our padding
614
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
615
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
616
-
617
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
618
-
619
-
620
- class SpecifierSet(BaseSpecifier):
621
- def __init__(
622
- self, specifiers: str = "", prereleases: Optional[bool] = None
623
- ) -> None:
624
-
625
- # Split on , to break each individual specifier into it's own item, and
626
- # strip each item to remove leading/trailing whitespace.
627
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
628
-
629
- # Parsed each individual specifier, attempting first to make it a
630
- # Specifier and falling back to a LegacySpecifier.
631
- parsed: Set[_IndividualSpecifier] = set()
632
- for specifier in split_specifiers:
633
- try:
634
- parsed.add(Specifier(specifier))
635
- except InvalidSpecifier:
636
- parsed.add(LegacySpecifier(specifier))
637
-
638
- # Turn our parsed specifiers into a frozen set and save them for later.
639
- self._specs = frozenset(parsed)
640
-
641
- # Store our prereleases value so we can use it later to determine if
642
- # we accept prereleases or not.
643
- self._prereleases = prereleases
644
-
645
- def __repr__(self) -> str:
646
- pre = (
647
- f", prereleases={self.prereleases!r}"
648
- if self._prereleases is not None
649
- else ""
650
- )
651
-
652
- return f"<SpecifierSet({str(self)!r}{pre})>"
653
-
654
- def __str__(self) -> str:
655
- return ",".join(sorted(str(s) for s in self._specs))
656
-
657
- def __hash__(self) -> int:
658
- return hash(self._specs)
659
-
660
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
661
- if isinstance(other, str):
662
- other = SpecifierSet(other)
663
- elif not isinstance(other, SpecifierSet):
664
- return NotImplemented
665
-
666
- specifier = SpecifierSet()
667
- specifier._specs = frozenset(self._specs | other._specs)
668
-
669
- if self._prereleases is None and other._prereleases is not None:
670
- specifier._prereleases = other._prereleases
671
- elif self._prereleases is not None and other._prereleases is None:
672
- specifier._prereleases = self._prereleases
673
- elif self._prereleases == other._prereleases:
674
- specifier._prereleases = self._prereleases
675
- else:
676
- raise ValueError(
677
- "Cannot combine SpecifierSets with True and False prerelease "
678
- "overrides."
679
- )
680
-
681
- return specifier
682
-
683
- def __eq__(self, other: object) -> bool:
684
- if isinstance(other, (str, _IndividualSpecifier)):
685
- other = SpecifierSet(str(other))
686
- elif not isinstance(other, SpecifierSet):
687
- return NotImplemented
688
-
689
- return self._specs == other._specs
690
-
691
- def __len__(self) -> int:
692
- return len(self._specs)
693
-
694
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
695
- return iter(self._specs)
696
-
697
- @property
698
- def prereleases(self) -> Optional[bool]:
699
-
700
- # If we have been given an explicit prerelease modifier, then we'll
701
- # pass that through here.
702
- if self._prereleases is not None:
703
- return self._prereleases
704
-
705
- # If we don't have any specifiers, and we don't have a forced value,
706
- # then we'll just return None since we don't know if this should have
707
- # pre-releases or not.
708
- if not self._specs:
709
- return None
710
-
711
- # Otherwise we'll see if any of the given specifiers accept
712
- # prereleases, if any of them do we'll return True, otherwise False.
713
- return any(s.prereleases for s in self._specs)
714
-
715
- @prereleases.setter
716
- def prereleases(self, value: bool) -> None:
717
- self._prereleases = value
718
-
719
- def __contains__(self, item: UnparsedVersion) -> bool:
720
- return self.contains(item)
721
-
722
- def contains(
723
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
724
- ) -> bool:
725
-
726
- # Ensure that our item is a Version or LegacyVersion instance.
727
- if not isinstance(item, (LegacyVersion, Version)):
728
- item = parse(item)
729
-
730
- # Determine if we're forcing a prerelease or not, if we're not forcing
731
- # one for this particular filter call, then we'll use whatever the
732
- # SpecifierSet thinks for whether or not we should support prereleases.
733
- if prereleases is None:
734
- prereleases = self.prereleases
735
-
736
- # We can determine if we're going to allow pre-releases by looking to
737
- # see if any of the underlying items supports them. If none of them do
738
- # and this item is a pre-release then we do not allow it and we can
739
- # short circuit that here.
740
- # Note: This means that 1.0.dev1 would not be contained in something
741
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
742
- if not prereleases and item.is_prerelease:
743
- return False
744
-
745
- # We simply dispatch to the underlying specs here to make sure that the
746
- # given version is contained within all of them.
747
- # Note: This use of all() here means that an empty set of specifiers
748
- # will always return True, this is an explicit design decision.
749
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
750
-
751
- def filter(
752
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
753
- ) -> Iterable[VersionTypeVar]:
754
-
755
- # Determine if we're forcing a prerelease or not, if we're not forcing
756
- # one for this particular filter call, then we'll use whatever the
757
- # SpecifierSet thinks for whether or not we should support prereleases.
758
- if prereleases is None:
759
- prereleases = self.prereleases
760
-
761
- # If we have any specifiers, then we want to wrap our iterable in the
762
- # filter method for each one, this will act as a logical AND amongst
763
- # each specifier.
764
- if self._specs:
765
- for spec in self._specs:
766
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
767
- return iterable
768
- # If we do not have any specifiers, then we need to have a rough filter
769
- # which will filter out any pre-releases, unless there are no final
770
- # releases, and which will filter out LegacyVersion in general.
771
- else:
772
- filtered: List[VersionTypeVar] = []
773
- found_prereleases: List[VersionTypeVar] = []
774
-
775
- item: UnparsedVersion
776
- parsed_version: Union[Version, LegacyVersion]
777
-
778
- for item in iterable:
779
- # Ensure that we some kind of Version class for this item.
780
- if not isinstance(item, (LegacyVersion, Version)):
781
- parsed_version = parse(item)
782
- else:
783
- parsed_version = item
784
-
785
- # Filter out any item which is parsed as a LegacyVersion
786
- if isinstance(parsed_version, LegacyVersion):
787
- continue
788
-
789
- # Store any item which is a pre-release for later unless we've
790
- # already found a final version or we are accepting prereleases
791
- if parsed_version.is_prerelease and not prereleases:
792
- if not filtered:
793
- found_prereleases.append(item)
794
- else:
795
- filtered.append(item)
796
-
797
- # If we've found no items except for pre-releases, then we'll go
798
- # ahead and use the pre-releases
799
- if not filtered and found_prereleases and prereleases is None:
800
- return found_prereleases
801
-
802
- return filtered
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/windows_support.py DELETED
@@ -1,29 +0,0 @@
1
- import platform
2
-
3
-
4
- def windows_only(func):
5
- if platform.system() != 'Windows':
6
- return lambda *args, **kwargs: None
7
- return func
8
-
9
-
10
- @windows_only
11
- def hide_file(path):
12
- """
13
- Set the hidden attribute on a file or directory.
14
-
15
- From http://stackoverflow.com/questions/19622133/
16
-
17
- `path` must be text.
18
- """
19
- import ctypes
20
- __import__('ctypes.wintypes')
21
- SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
22
- SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
23
- SetFileAttributes.restype = ctypes.wintypes.BOOL
24
-
25
- FILE_ATTRIBUTE_HIDDEN = 0x02
26
-
27
- ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
28
- if not ret:
29
- raise ctypes.WinError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Avkash/Satellite_Segmentation_Prediction/app.py DELETED
@@ -1,66 +0,0 @@
1
- import os
2
- import cv2
3
- from PIL import Image
4
- import numpy as np
5
- import segmentation_models as sm
6
- from matplotlib import pyplot as plt
7
- import random
8
-
9
-
10
- from keras import backend as K
11
- from keras.models import load_model
12
-
13
- import gradio as gr
14
-
15
- def jaccard_coef(y_true, y_pred):
16
- y_true_flatten = K.flatten(y_true)
17
- y_pred_flatten = K.flatten(y_pred)
18
- intersection = K.sum(y_true_flatten * y_pred_flatten)
19
- final_coef_value = (intersection + 1.0) / (K.sum(y_true_flatten) + K.sum(y_pred_flatten) - intersection + 1.0)
20
- return final_coef_value
21
-
22
-
23
- weights = [0.1666, 0.1666, 0.1666, 0.1666, 0.1666, 0.1666]
24
- dice_loss = sm.losses.DiceLoss(class_weights = weights)
25
- focal_loss = sm.losses.CategoricalFocalLoss()
26
- total_loss = dice_loss + (1 * focal_loss)
27
-
28
-
29
- satellite_model = load_model('model/satellite-imagery.h5', custom_objects=({'dice_loss_plus_1focal_loss': total_loss, 'jaccard_coef': jaccard_coef}))
30
-
31
- def process_input_image(image_source):
32
- image = np.expand_dims(image_source, 0)
33
-
34
- prediction = satellite_model.predict(image)
35
- predicted_image = np.argmax(prediction, axis=3)
36
-
37
- predicted_image = predicted_image[0,:,:]
38
- predicted_image = predicted_image * 50
39
- return 'Predicted Masked Image', predicted_image
40
-
41
-
42
- my_app = gr.Blocks()
43
-
44
- with my_app:
45
- gr.Markdown("Satellite Image Segmentation Application UI with Gradio")
46
- with gr.Tabs():
47
- with gr.TabItem("Select your image"):
48
- with gr.Row():
49
- with gr.Column():
50
- img_source = gr.Image(label="Please select source Image", shape=(256, 256))
51
- source_image_loader = gr.Button("Load above Image")
52
- with gr.Column():
53
- output_label = gr.Label(label="Image Info")
54
- img_output = gr.Image(label="Image Output")
55
- source_image_loader.click(
56
- process_input_image,
57
- [
58
- img_source
59
- ],
60
- [
61
- output_label,
62
- img_output
63
- ]
64
- )
65
-
66
- my_app.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py DELETED
@@ -1,124 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- # Part of the code is from https://github.com/tztztztztz/eql.detectron2/blob/master/projects/EQL/eql/fast_rcnn.py
3
- import logging
4
- import math
5
- import json
6
- from typing import Dict, Union
7
- import torch
8
- from fvcore.nn import giou_loss, smooth_l1_loss
9
- from torch import nn
10
- from torch.nn import functional as F
11
-
12
- from detectron2.config import configurable
13
- from detectron2.layers import Linear, ShapeSpec, batched_nms, cat, nonzero_tuple
14
- from detectron2.modeling.box_regression import Box2BoxTransform
15
- from detectron2.structures import Boxes, Instances
16
- from detectron2.utils.events import get_event_storage
17
- from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
18
- from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
19
- from detectron2.modeling.roi_heads.fast_rcnn import _log_classification_stats
20
- from detectron2.utils.comm import get_world_size
21
- from .fed_loss import load_class_freq, get_fed_loss_inds
22
-
23
- __all__ = ["CustomFastRCNNOutputLayers"]
24
-
25
- class CustomFastRCNNOutputLayers(FastRCNNOutputLayers):
26
- def __init__(
27
- self,
28
- cfg,
29
- input_shape: ShapeSpec,
30
- **kwargs
31
- ):
32
- super().__init__(cfg, input_shape, **kwargs)
33
-
34
- self.cfg = cfg
35
-
36
- def losses(self, predictions, proposals):
37
- """
38
- enable advanced loss
39
- """
40
- scores, proposal_deltas = predictions
41
- gt_classes = (
42
- cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
43
- )
44
- num_classes = self.num_classes
45
- _log_classification_stats(scores, gt_classes)
46
-
47
- if len(proposals):
48
- proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
49
- assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
50
- gt_boxes = cat(
51
- [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
52
- dim=0,
53
- )
54
- else:
55
- proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
56
-
57
- loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)
58
- return {
59
- "loss_cls": loss_cls,
60
- "loss_box_reg": self.box_reg_loss(
61
- proposal_boxes, gt_boxes, proposal_deltas, gt_classes)
62
- }
63
-
64
-
65
- def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):
66
- if pred_class_logits.numel() == 0:
67
- return pred_class_logits.new_zeros([1])[0] # This is more robust than .sum() * 0.
68
-
69
- B = pred_class_logits.shape[0]
70
- C = pred_class_logits.shape[1] - 1
71
-
72
- target = pred_class_logits.new_zeros(B, C + 1)
73
- target[range(len(gt_classes)), gt_classes] = 1 # B x (C + 1)
74
- target = target[:, :C] # B x C
75
-
76
- weight = 1
77
-
78
- cls_loss = F.binary_cross_entropy_with_logits(
79
- pred_class_logits[:, :-1], target, reduction='none') # B x C
80
- loss = torch.sum(cls_loss * weight) / B
81
- return loss
82
-
83
-
84
- def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes):
85
- """
86
- change _no_instance handling
87
- """
88
- if pred_class_logits.numel() == 0:
89
- return pred_class_logits.new_zeros([1])[0]
90
-
91
- loss = F.cross_entropy(
92
- pred_class_logits, gt_classes, reduction="mean")
93
- return loss
94
-
95
-
96
- def inference(self, predictions, proposals):
97
- """
98
- enable use proposal boxes
99
- """
100
- boxes = self.predict_boxes(predictions, proposals)
101
- scores = self.predict_probs(predictions, proposals)
102
- if self.cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE:
103
- proposal_scores = [p.get('objectness_logits') for p in proposals]
104
- scores = [(s * ps[:, None]) ** 0.5 \
105
- for s, ps in zip(scores, proposal_scores)]
106
- image_shapes = [x.image_size for x in proposals]
107
- return fast_rcnn_inference(
108
- boxes,
109
- scores,
110
- image_shapes,
111
- self.test_score_thresh,
112
- self.test_nms_thresh,
113
- self.test_topk_per_image,
114
- )
115
-
116
-
117
- def predict_probs(self, predictions, proposals):
118
- """
119
- support sigmoid
120
- """
121
- scores, _ = predictions
122
- num_inst_per_image = [len(p) for p in proposals]
123
- probs = F.softmax(scores, dim=-1)
124
- return probs.split(num_inst_per_image, dim=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benebene/Chat-question-answering/test.py DELETED
@@ -1,62 +0,0 @@
1
- from utils import Stuff
2
-
3
- def assert_equal(actual, expected):
4
-
5
- if actual == expected:
6
- return 1
7
- else:
8
- pass
9
-
10
- def test(test_bench: list, s: Stuff)-> None:
11
-
12
- ok = 0
13
- for i, elem in enumerate(test_bench):
14
- ok += assert_equal(s.most_similar(elem['question']), elem['index'])
15
-
16
- pourcentage_ok = ok*100/(i+1)
17
-
18
- print(f'Le pourcentage de bonne réponse est : {pourcentage_ok} %')
19
-
20
-
21
- test_bench = [
22
- {
23
- "question": 'Is there a possibility of a discrepancy between the official time-measure UT1 and other credible measures of mean solar time, indicating a potential progressive difference between the two? Essentially, the question is asking whether Universal Time truly tracks mean solar time, but answering it may not be a straightforward task.',
24
- "index": 0
25
- },
26
- {
27
- "question": "In certain astronomical photographs such as Centaurus A, what is the red substance that can be observed?",
28
- "index": 4
29
- },
30
- {
31
- "question": 'Is it possible to remotely measure isotope ratios, or is it necessary to acquire a sample for analysis?',
32
- "index": 7
33
- },
34
- {
35
- "question": 'What is the reason behind in-the-sky.org stating that Mercury is not observable from Taipei during these days?',
36
- "index": 1000
37
- },
38
- {
39
- "question": 'What is the reason for the connection between the gravitational acceleration $g$ and the oscillator strength $f$ in the expression $\log{gf}_{\odot}$?',
40
- "index": 554
41
- },
42
- {
43
- "question": "In Saint-Exupery's account of his visit to a plateau, he mentions finding several meteorites with ease. Can this be considered a realistic portrayal?",
44
- "index": 900
45
- },
46
- {
47
- "question": 'Is it likely that the recent passage of comet 21P near Earth on September 10th will result in a Draconid storm on October 9th?',
48
- "index": 87
49
- },
50
- {
51
- "question": "From which source can I obtain data regarding the orbit of Mercury, in order to apply it to a model?",
52
- "index": 52
53
- },
54
- {
55
- "question": 'What kind of information can we gather from the collective amount of stellar mass present in galaxies?',
56
- "index": 322
57
- },
58
- {
59
- "question": 'What is the process for converting magnitudes into bolometric luminosity?',
60
- "index": 6
61
- }
62
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Coche Carretera Carreras Mod Apk Happymod.md DELETED
@@ -1,47 +0,0 @@
1
- <br />
2
- <h1>CarX Highway Racing Mod APK Happymod: Una revisión</h1>
3
- <p>Si eres un fan de los juegos de carreras de coches, es posible que hayas oído hablar de CarX Highway Racing, un emocionante y realista juego de carreras para dispositivos Android. Pero ¿sabías que puedes disfrutar de este juego aún más con una versión modificada de Happymod? En este artículo, vamos a revisar CarX Highway Racing Mod APK Happymod, una versión modificada del juego que ofrece dinero ilimitado, coches desbloqueados, y más. También le mostraremos cómo descargar e instalar este apk mod en su dispositivo de forma fácil y segura. </p>
4
- <h2>coche carretera carreras mod apk happymod</h2><br /><p><b><b>Download</b> &#10042;&#10042;&#10042; <a href="https://bltlly.com/2v6KNs">https://bltlly.com/2v6KNs</a></b></p><br /><br />
5
- <h2>¿Qué es CarX Highway Racing? </h2>
6
- <p>CarX Highway Racing es un juego de carreras desarrollado por CarX Technologies, la misma compañía detrás de la popular serie CarX Drift Racing. En este juego, puedes experimentar la emoción de las carreras de alta velocidad en carreteras realistas, con gráficos impresionantes, física y efectos de sonido. Usted puede elegir entre una variedad de coches, de los coches deportivos a los coches del músculo, y personalizarlos a su gusto. También puedes competir en diferentes modos de juego, como el modo carrera, el modo de ataque temporal, el modo persecución policial y el modo en línea. También puede asumir misiones y misiones desafiantes para ganar recompensas y desbloquear nuevos coches y pistas. </p>
7
- <h3>Características de CarX Highway Racing</h3>
8
- <h4>Física y gráficos realistas</h4>
9
- <p>Una de las principales atracciones de CarX Highway Racing es su física realista y gráficos. El juego utiliza el motor CarX avanzado, que simula el comportamiento de los coches reales en diferentes superficies de carreteras y condiciones climáticas. Puede sentir la diferencia entre conducir sobre asfalto, arena o nieve, y ajustar su estilo de conducción en consecuencia. El juego también cuenta con gráficos impresionantes, con modelos de coches detallados, sombras dinámicas, reflejos y efectos de iluminación. También se puede disfrutar de las vistas panorámicas de las carreteras, desde desiertos hasta bosques, desde el día hasta la noche. </p>
10
- <h4>Diversos coches y pistas</h4>
11
-
12
- <h4>Desafiantes modos de juego y misiones</h4>
13
- <p>Una tercera característica de CarX Highway Racing son sus desafiantes modos de juego y misiones. El juego ofrece cuatro modos de juego para poner a prueba tus habilidades y divertirse. En el modo carrera, puedes seguir la historia de un corredor que quiere convertirse en el mejor del mundo. Puedes competir contra diferentes rivales y jefes, y ganar dinero y reputación. En el modo de ataque de tiempo, puede correr contra el reloj y tratar de batir sus propios registros. En el modo de persecución policial, puedes escapar de la policía o perseguir a los criminales. En el modo online, puedes competir contra otros jugadores de todo el mundo y mostrar tus habilidades. El juego también ofrece varias misiones y misiones que requieren que usted complete ciertos objetivos o tareas dentro de un límite de tiempo o distancia dada. Puedes ganar recompensas como dinero, monedas de oro, llaves o cofres para completar estas misiones. </p>
14
- <p></p>
15
- <h2>¿Qué es Happymod? </h2>
16
- <p>Happymod es un sitio web que proporciona versiones modificadas de juegos populares para Android de forma gratuita. Una versión modificada es una versión del juego que ha sido modificada por jugadores o fans para cambiar o mejorar algunos aspectos del juego, como gráficos, jugabilidad, características o contenido. Los mods pueden hacer el juego más divertido, desafiante, realista o inmersivo, dependiendo de las preferencias del modder y el jugador. Los mods también pueden corregir errores, errores o fallos que los desarrolladores de juegos originales no abordaron. <h3>Beneficios de usar Happymod</h3>
17
- <h4>Descargas gratuitas y seguras</h4>
18
-
19
- <h4>Versiones modificadas de juegos populares</h4>
20
- <p>Otro beneficio de usar Happymod es que proporciona versiones modificadas de juegos populares que puede que no encuentre en otros lugares. Puedes encontrar mods para juegos como Minecraft, GTA, PUBG, Among Us, Roblox, y muchos más. Puedes disfrutar de estos juegos con recursos ilimitados, funciones desbloqueadas, artículos premium y otras ventajas que los juegos originales no tienen. También puedes descubrir nuevos juegos y géneros que quizás no hayas probado antes. </p>
21
- <h4>Interfaz fácil de usar y comunidad</h4>
22
- <p>Un tercer beneficio de usar Happymod es que tiene una interfaz fácil de usar y una comunidad. El sitio web es fácil de navegar y buscar, con categorías, etiquetas, filtros y recomendaciones para ayudarle a encontrar los mods que desee. También puede subir sus propios mods o solicitar mods de otros usuarios. También puedes unirte a la comunidad de Happymod y chatear con otros modders y jugadores, compartir tus comentarios, sugerencias, consejos y trucos, y hacer nuevos amigos. </p>
23
- <h2>¿Cómo descargar e instalar CarX Highway Racing Mod APK Happymod? </h2>
24
- <p>Si usted está interesado en descargar e instalar CarX Highway Racing Mod APK Happymod en su dispositivo, es necesario seguir estos sencillos pasos:</p>
25
- <h3>Pasos a seguir</h3>
26
- <h4>Habilitar fuentes desconocidas en su dispositivo</h4>
27
- <p>El primer paso es habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo > seguridad > fuentes desconocidas > habilitar. Esto puede variar dependiendo del modelo de dispositivo y la versión de Android. </p>
28
- <h4>Descargar el archivo mod apk desde el sitio web de Happymod</h4>
29
-
30
- <h4>Instalar el archivo apk mod y disfrutar del juego</h4>
31
- <p>El tercer paso es instalar el archivo apk mod y disfrutar del juego. Para hacer esto, ir a su gestor de archivos y localizar el archivo apk mod descargado. Toca en él y sigue las instrucciones de instalación. Una vez completada la instalación, puedes iniciar el juego desde el cajón de la app o la pantalla de inicio. Verás que tienes dinero ilimitado, coches desbloqueados y más en el juego. ¡Disfruta! </p>
32
- <h2>Conclusión</h2>
33
- <p>En conclusión, CarX Highway Racing Mod APK Happymod es una gran manera de disfrutar de CarX Highway Racing juego con más diversión y emoción. Usted puede experimentar la física realista y gráficos, diversos coches y pistas, desafiantes modos de juego y misiones, y más con esta versión modificada del juego. También puede descargar e instalar este mod de forma fácil y segura desde el sitio web de Happymod, que ofrece descargas gratuitas y seguras de juegos modificados para dispositivos Android. Si usted es un fan de los juegos de carreras de coches, usted debe probar definitivamente CarX Highway Racing Mod APK Happymod.</p>
34
- <h3>Preguntas frecuentes</h3>
35
- <ul>
36
- <li><b>¿Qué es CarX Highway Racing Mod APK Happymod? </b></li>
37
- <li>A: Es una versión modificada del juego CarX Highway Racing que ofrece dinero ilimitado, coches desbloqueados y más. </li>
38
- <li><b>¿Qué es Happymod? </b></li>
39
- <li>A: Es un sitio web que proporciona versiones modificadas de juegos populares para Android de forma gratuita. </li>
40
- <li><b>Cómo descargar e instalar CarX Highway Racing Mod APK Happymod? </b></ li>A: Necesita habilitar fuentes desconocidas en su dispositivo, descargar el archivo apk mod del sitio web Happymod e instalar el archivo apk mod en su dispositivo. </li>
41
- <li><b> ¿Es CarX Highway Racing Mod APK Happymod seguro de usar? </b></li>
42
- <li>A: Sí, es seguro de usar. Happymod verifica y prueba cada mod antes de subirlo al sitio web, y solo permite mods que son seguros y funcionan. </li>
43
- <li><b>¿Cuáles son las ventajas de usar CarX Highway Racing Mod APK Happymod? </b></li>
44
-
45
- </ul></p> 64aa2da5cf<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/_jaraco_text.py DELETED
@@ -1,109 +0,0 @@
1
- """Functions brought over from jaraco.text.
2
-
3
- These functions are not supposed to be used within `pip._internal`. These are
4
- helper functions brought over from `jaraco.text` to enable vendoring newer
5
- copies of `pkg_resources` without having to vendor `jaraco.text` and its entire
6
- dependency cone; something that our vendoring setup is not currently capable of
7
- handling.
8
-
9
- License reproduced from original source below:
10
-
11
- Copyright Jason R. Coombs
12
-
13
- Permission is hereby granted, free of charge, to any person obtaining a copy
14
- of this software and associated documentation files (the "Software"), to
15
- deal in the Software without restriction, including without limitation the
16
- rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
17
- sell copies of the Software, and to permit persons to whom the Software is
18
- furnished to do so, subject to the following conditions:
19
-
20
- The above copyright notice and this permission notice shall be included in
21
- all copies or substantial portions of the Software.
22
-
23
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29
- IN THE SOFTWARE.
30
- """
31
-
32
- import functools
33
- import itertools
34
-
35
-
36
- def _nonblank(str):
37
- return str and not str.startswith("#")
38
-
39
-
40
- @functools.singledispatch
41
- def yield_lines(iterable):
42
- r"""
43
- Yield valid lines of a string or iterable.
44
-
45
- >>> list(yield_lines(''))
46
- []
47
- >>> list(yield_lines(['foo', 'bar']))
48
- ['foo', 'bar']
49
- >>> list(yield_lines('foo\nbar'))
50
- ['foo', 'bar']
51
- >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
52
- ['foo', 'baz #comment']
53
- >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
54
- ['foo', 'bar', 'baz', 'bing']
55
- """
56
- return itertools.chain.from_iterable(map(yield_lines, iterable))
57
-
58
-
59
- @yield_lines.register(str)
60
- def _(text):
61
- return filter(_nonblank, map(str.strip, text.splitlines()))
62
-
63
-
64
- def drop_comment(line):
65
- """
66
- Drop comments.
67
-
68
- >>> drop_comment('foo # bar')
69
- 'foo'
70
-
71
- A hash without a space may be in a URL.
72
-
73
- >>> drop_comment('http://example.com/foo#bar')
74
- 'http://example.com/foo#bar'
75
- """
76
- return line.partition(" #")[0]
77
-
78
-
79
- def join_continuation(lines):
80
- r"""
81
- Join lines continued by a trailing backslash.
82
-
83
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
84
- ['foobar', 'baz']
85
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
86
- ['foobar', 'baz']
87
- >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
88
- ['foobarbaz']
89
-
90
- Not sure why, but...
91
- The character preceeding the backslash is also elided.
92
-
93
- >>> list(join_continuation(['goo\\', 'dly']))
94
- ['godly']
95
-
96
- A terrible idea, but...
97
- If no line is available to continue, suppress the lines.
98
-
99
- >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
100
- ['foo']
101
- """
102
- lines = iter(lines)
103
- for item in lines:
104
- while item.endswith("\\"):
105
- try:
106
- item = item[:-2].strip() + next(lines)
107
- except StopIteration:
108
- return
109
- yield item
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py DELETED
File without changes
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/__init__.py DELETED
File without changes
spaces/Bostoncake/ChatAssistant/app.py DELETED
@@ -1,146 +0,0 @@
1
- import numpy as np
2
- import os
3
- import re
4
- from io import BytesIO
5
- import datetime
6
- import time
7
- import openai, tenacity
8
- import argparse
9
- import configparser
10
- import json
11
- import tiktoken
12
- import PyPDF2
13
- import gradio
14
-
15
- # 定义Reviewer类
16
- class Reviewer:
17
- # 初始化方法,设置属性
18
- def __init__(self, api, research_field, question, paper_pdf, language):
19
- self.api = api
20
- self.research_field = research_field
21
- self.question = question
22
-
23
- self.language = language
24
- self.paper_pdf = paper_pdf
25
- self.max_token_num = 4097
26
- self.encoding = tiktoken.get_encoding("gpt2")
27
-
28
-
29
- def review_by_chatgpt(self, paper_list):
30
- text = self.extract_chapter(self.paper_pdf)
31
- chat_review_text, total_token_used = self.chat_review(text=text)
32
- return chat_review_text, total_token_used
33
-
34
-
35
-
36
- @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
37
- stop=tenacity.stop_after_attempt(5),
38
- reraise=True)
39
- def chat_review(self, text):
40
- openai.api_key = self.api # 读取api
41
- review_prompt_token = 1000
42
- text_token = len(self.encoding.encode(text))
43
- input_text_index = int(len(text)*(self.max_token_num-review_prompt_token)/(text_token+1))
44
- input_text = "This is the paper you are asked to read:" + text[:input_text_index]
45
- input_text = input_text + "The question from your student is: " + self.question
46
- messages=[
47
- {"role": "system", "content": "You are a professional researcher in the field of "+self.research_field+". You are the mentor of a student who is new to this field. Now I will give you a paper. You need to help your student to read this paper by instructing him to read the important sections in this paper and answer his questions towards these sections. Please answer in {}.".format(self.language)},
48
- {"role": "user", "content": input_text},
49
- ]
50
-
51
- response = openai.ChatCompletion.create(
52
- model="gpt-3.5-turbo",
53
- messages=messages,
54
- )
55
- result = ''
56
- for choice in response.choices:
57
- result += choice.message.content
58
- print("********"*10)
59
- print(result)
60
- print("********"*10)
61
- print("prompt_token_used:", response.usage.prompt_tokens)
62
- print("completion_token_used:", response.usage.completion_tokens)
63
- print("total_token_used:", response.usage.total_tokens)
64
- print("response_time:", response.response_ms/1000.0, 's')
65
- return result, response.usage.total_tokens
66
-
67
- def extract_chapter(self, pdf_path):
68
- file_object = BytesIO(pdf_path)
69
- # 创建一个PDF阅读器对象
70
- pdf_reader = PyPDF2.PdfReader(file_object)
71
- # 获取PDF的总页数
72
- num_pages = len(pdf_reader.pages)
73
- # 初始化提取状态和提取文本
74
- extraction_started = False
75
- extracted_text = ""
76
- # 遍历PDF中的每一页
77
- for page_number in range(num_pages):
78
- page = pdf_reader.pages[page_number]
79
- page_text = page.extract_text()
80
-
81
- # 如果找到了章节标题,开始提取
82
- if 'Abstract'.lower() in page_text.lower() and not extraction_started:
83
- extraction_started = True
84
- page_number_start = page_number
85
- # 如果提取已开始,将页面文本添加到提取文本中
86
- if extraction_started:
87
- extracted_text += page_text
88
- # 如果找到下一章节标题,停止提取
89
- if page_number_start + 1 < page_number:
90
- break
91
- return extracted_text
92
-
93
- def main(api, research_field, question, paper_pdf, language):
94
- start_time = time.time()
95
- if not api or not research_field or not question or not paper_pdf:
96
- return "请输入完整内容!"
97
- # 判断PDF文件
98
- else:
99
- # 创建一个Reader对象
100
- reviewer1 = Reviewer(api, research_field, question, paper_pdf, language)
101
- # 开始判断是路径还是文件:
102
- comments, total_token_used = reviewer1.review_by_chatgpt(paper_list=paper_pdf)
103
- time_used = time.time() - start_time
104
- output2 ="使用token数:"+ str(total_token_used)+"\n花费时间:"+ str(round(time_used, 2)) +"秒"
105
- return comments, output2
106
-
107
-
108
-
109
- ########################################################################################################
110
- # 标题
111
- title = "ChatAssistant: ChatGPT论文阅读助手"
112
- # 描述
113
-
114
- description = '''<div align='left'>
115
- <strong>ChatAssistant是一款基于ChatGPT-3.5的API开发的论文阅读助手。</strong>其用途如下:
116
- ⭐️针对用户对论文的内容所提出的问题,给出相关的解答或学习建议。
117
- ([获取Api Key](https://chatgpt.cn.obiscr.com/blog/posts/2023/How-to-get-api-key/))
118
- </div>
119
- '''
120
-
121
- # 创建Gradio界���
122
- inp = [gradio.inputs.Textbox(label="请输入你的API-key(sk开头的字符串)",
123
- default="",
124
- type='password'),
125
- gradio.inputs.Textbox(lines=3,
126
- label="请输入论文的研究方向(语言和输出语言一致)",
127
- default="""eg. computer science, artificial intelligence and transfer learning"""
128
- ),
129
- gradio.inputs.Textbox(lines=3,
130
- label="请输入你的问题(语言和输出语言一致)。请尽可能地在问题之后概括你想要得到的输出的回答方向。",
131
- default="""eg. What are the main contributions of this article? Please summarize the technical details in your reply as well."""
132
- ),
133
- gradio.inputs.File(label="请上传论文PDF(必填)",type="bytes"),
134
- gradio.inputs.Radio(choices=["English", "Chinese"],
135
- default="English",
136
- label="选择输出语言"),
137
- ]
138
-
139
- chat_assistant_gui = gradio.Interface(fn=main,
140
- inputs=inp,
141
- outputs = [gradio.Textbox(lines=25, label="参考回答"), gradio.Textbox(lines=2, label="资源统计")],
142
- title=title,
143
- description=description)
144
-
145
- # Start server
146
- chat_assistant_gui.launch(quiet=True, show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/common.py DELETED
@@ -1,147 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import copy
3
- import logging
4
- import numpy as np
5
- import pickle
6
- import random
7
- import torch.utils.data as data
8
-
9
- from detectron2.utils.serialize import PicklableWrapper
10
-
11
- __all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset"]
12
-
13
-
14
- class MapDataset(data.Dataset):
15
- """
16
- Map a function over the elements in a dataset.
17
-
18
- Args:
19
- dataset: a dataset where map function is applied.
20
- map_func: a callable which maps the element in dataset. map_func is
21
- responsible for error handling, when error happens, it needs to
22
- return None so the MapDataset will randomly use other
23
- elements from the dataset.
24
- """
25
-
26
- def __init__(self, dataset, map_func):
27
- self._dataset = dataset
28
- self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
29
-
30
- self._rng = random.Random(42)
31
- self._fallback_candidates = set(range(len(dataset)))
32
-
33
- def __len__(self):
34
- return len(self._dataset)
35
-
36
- def __getitem__(self, idx):
37
- retry_count = 0
38
- cur_idx = int(idx)
39
-
40
- while True:
41
- data = self._map_func(self._dataset[cur_idx])
42
- if data is not None:
43
- self._fallback_candidates.add(cur_idx)
44
- return data
45
-
46
- # _map_func fails for this idx, use a random new index from the pool
47
- retry_count += 1
48
- self._fallback_candidates.discard(cur_idx)
49
- cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
50
-
51
- if retry_count >= 3:
52
- logger = logging.getLogger(__name__)
53
- logger.warning(
54
- "Failed to apply `_map_func` for idx: {}, retry count: {}".format(
55
- idx, retry_count
56
- )
57
- )
58
-
59
-
60
- class DatasetFromList(data.Dataset):
61
- """
62
- Wrap a list to a torch Dataset. It produces elements of the list as data.
63
- """
64
-
65
- def __init__(self, lst: list, copy: bool = True, serialize: bool = True):
66
- """
67
- Args:
68
- lst (list): a list which contains elements to produce.
69
- copy (bool): whether to deepcopy the element when producing it,
70
- so that the result can be modified in place without affecting the
71
- source in the list.
72
- serialize (bool): whether to hold memory using serialized objects, when
73
- enabled, data loader workers can use shared RAM from master
74
- process instead of making a copy.
75
- """
76
- self._lst = lst
77
- self._copy = copy
78
- self._serialize = serialize
79
-
80
- def _serialize(data):
81
- buffer = pickle.dumps(data, protocol=-1)
82
- return np.frombuffer(buffer, dtype=np.uint8)
83
-
84
- if self._serialize:
85
- logger = logging.getLogger(__name__)
86
- logger.info(
87
- "Serializing {} elements to byte tensors and concatenating them all ...".format(
88
- len(self._lst)
89
- )
90
- )
91
- self._lst = [_serialize(x) for x in self._lst]
92
- self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
93
- self._addr = np.cumsum(self._addr)
94
- self._lst = np.concatenate(self._lst)
95
- logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024 ** 2))
96
-
97
- def __len__(self):
98
- if self._serialize:
99
- return len(self._addr)
100
- else:
101
- return len(self._lst)
102
-
103
- def __getitem__(self, idx):
104
- if self._serialize:
105
- start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
106
- end_addr = self._addr[idx].item()
107
- bytes = memoryview(self._lst[start_addr:end_addr])
108
- return pickle.loads(bytes)
109
- elif self._copy:
110
- return copy.deepcopy(self._lst[idx])
111
- else:
112
- return self._lst[idx]
113
-
114
-
115
- class AspectRatioGroupedDataset(data.IterableDataset):
116
- """
117
- Batch data that have similar aspect ratio together.
118
- In this implementation, images whose aspect ratio < (or >) 1 will
119
- be batched together.
120
-
121
- It assumes the underlying dataset produces dicts with "width" and "height" keys.
122
- It will then produce a list of original dicts with length = batch_size,
123
- all with similar aspect ratios.
124
- """
125
-
126
- def __init__(self, dataset, batch_size):
127
- """
128
- Args:
129
- dataset: an iterable. Each element must be a dict with keys
130
- "width" and "height", which will be used to batch data.
131
- batch_size (int):
132
- """
133
- self.dataset = dataset
134
- self.batch_size = batch_size
135
- self._buckets = [[] for _ in range(2)]
136
- # Hard-coded two aspect ratio groups: w > h and w < h.
137
- # Can add support for more aspect ratio groups, but doesn't seem useful
138
-
139
- def __iter__(self):
140
- for d in self.dataset:
141
- w, h = d["width"], d["height"]
142
- bucket_id = 0 if w > h else 1
143
- bucket = self._buckets[bucket_id]
144
- bucket.append(d)
145
- if len(bucket) == self.batch_size:
146
- yield bucket[:]
147
- del bucket[:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/rotated_boxes.py DELETED
@@ -1,23 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from __future__ import absolute_import, division, print_function, unicode_literals
3
-
4
- # import torch
5
- from detectron2 import _C
6
-
7
-
8
- def pairwise_iou_rotated(boxes1, boxes2):
9
- """
10
- Return intersection-over-union (Jaccard index) of boxes.
11
-
12
- Both sets of boxes are expected to be in
13
- (x_center, y_center, width, height, angle) format.
14
-
15
- Arguments:
16
- boxes1 (Tensor[N, 5])
17
- boxes2 (Tensor[M, 5])
18
-
19
- Returns:
20
- iou (Tensor[N, M]): the NxM matrix containing the pairwise
21
- IoU values for every element in boxes1 and boxes2
22
- """
23
- return _C.box_iou_rotated(boxes1, boxes2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/test_time_augmentation.py DELETED
@@ -1,285 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import copy
3
- import numpy as np
4
- from contextlib import contextmanager
5
- from itertools import count
6
- import torch
7
- from torch import nn
8
- from torch.nn.parallel import DistributedDataParallel
9
-
10
- from detectron2.data.detection_utils import read_image
11
- from detectron2.data.transforms import ResizeShortestEdge
12
- from detectron2.structures import Instances
13
-
14
- from .meta_arch import GeneralizedRCNN
15
- from .postprocessing import detector_postprocess
16
- from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image
17
-
18
- __all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"]
19
-
20
-
21
- class DatasetMapperTTA:
22
- """
23
- Implement test-time augmentation for detection data.
24
- It is a callable which takes a dataset dict from a detection dataset,
25
- and returns a list of dataset dicts where the images
26
- are augmented from the input image by the transformations defined in the config.
27
- This is used for test-time augmentation.
28
- """
29
-
30
- def __init__(self, cfg):
31
- self.min_sizes = cfg.TEST.AUG.MIN_SIZES
32
- self.max_size = cfg.TEST.AUG.MAX_SIZE
33
- self.flip = cfg.TEST.AUG.FLIP
34
- self.image_format = cfg.INPUT.FORMAT
35
-
36
- def __call__(self, dataset_dict):
37
- """
38
- Args:
39
- dict: a detection dataset dict
40
-
41
- Returns:
42
- list[dict]:
43
- a list of dataset dicts, which contain augmented version of the input image.
44
- The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``.
45
- """
46
- ret = []
47
- if "image" not in dataset_dict:
48
- numpy_image = read_image(dataset_dict["file_name"], self.image_format)
49
- else:
50
- numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy().astype("uint8")
51
- for min_size in self.min_sizes:
52
- image = np.copy(numpy_image)
53
- tfm = ResizeShortestEdge(min_size, self.max_size).get_transform(image)
54
- resized = tfm.apply_image(image)
55
- resized = torch.as_tensor(resized.transpose(2, 0, 1).astype("float32"))
56
-
57
- dic = copy.deepcopy(dataset_dict)
58
- dic["horiz_flip"] = False
59
- dic["image"] = resized
60
- ret.append(dic)
61
-
62
- if self.flip:
63
- dic = copy.deepcopy(dataset_dict)
64
- dic["horiz_flip"] = True
65
- dic["image"] = torch.flip(resized, dims=[2])
66
- ret.append(dic)
67
- return ret
68
-
69
-
70
- class GeneralizedRCNNWithTTA(nn.Module):
71
- """
72
- A GeneralizedRCNN with test-time augmentation enabled.
73
- Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`.
74
- """
75
-
76
- def __init__(self, cfg, model, tta_mapper=None, batch_size=3):
77
- """
78
- Args:
79
- cfg (CfgNode):
80
- model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
81
- tta_mapper (callable): takes a dataset dict and returns a list of
82
- augmented versions of the dataset dict. Defaults to
83
- `DatasetMapperTTA(cfg)`.
84
- batch_size (int): batch the augmented images into this batch size for inference.
85
- """
86
- super().__init__()
87
- if isinstance(model, DistributedDataParallel):
88
- model = model.module
89
- assert isinstance(
90
- model, GeneralizedRCNN
91
- ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model))
92
- self.cfg = cfg.clone()
93
- assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet"
94
- assert (
95
- not self.cfg.MODEL.LOAD_PROPOSALS
96
- ), "TTA for pre-computed proposals is not supported yet"
97
-
98
- self.model = model
99
-
100
- if tta_mapper is None:
101
- tta_mapper = DatasetMapperTTA(cfg)
102
- self.tta_mapper = tta_mapper
103
- self.batch_size = batch_size
104
-
105
- @contextmanager
106
- def _turn_off_roi_heads(self, attrs):
107
- """
108
- Open a context where some heads in `model.roi_heads` are temporarily turned off.
109
- Args:
110
- attr (list[str]): the attribute in `model.roi_heads` which can be used
111
- to turn off a specific head, e.g., "mask_on", "keypoint_on".
112
- """
113
- roi_heads = self.model.roi_heads
114
- old = {}
115
- for attr in attrs:
116
- try:
117
- old[attr] = getattr(roi_heads, attr)
118
- except AttributeError:
119
- # The head may not be implemented in certain ROIHeads
120
- pass
121
-
122
- if len(old.keys()) == 0:
123
- yield
124
- else:
125
- for attr in old.keys():
126
- setattr(roi_heads, attr, False)
127
- yield
128
- for attr in old.keys():
129
- setattr(roi_heads, attr, old[attr])
130
-
131
- def _batch_inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
132
- """
133
- Execute inference on a list of inputs,
134
- using batch size = self.batch_size, instead of the length of the list.
135
-
136
- Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference`
137
- """
138
- if detected_instances is None:
139
- detected_instances = [None] * len(batched_inputs)
140
-
141
- outputs = []
142
- inputs, instances = [], []
143
- for idx, input, instance in zip(count(), batched_inputs, detected_instances):
144
- inputs.append(input)
145
- instances.append(instance)
146
- if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:
147
- outputs.extend(
148
- self.model.inference(
149
- inputs,
150
- instances if instances[0] is not None else None,
151
- do_postprocess=do_postprocess,
152
- )
153
- )
154
- inputs, instances = [], []
155
- return outputs
156
-
157
- def __call__(self, batched_inputs):
158
- """
159
- Same input/output format as :meth:`GeneralizedRCNN.forward`
160
- """
161
- return [self._inference_one_image(x) for x in batched_inputs]
162
-
163
- def _detector_postprocess(self, outputs, aug_vars):
164
- return detector_postprocess(outputs, aug_vars["height"], aug_vars["width"])
165
-
166
- def _inference_one_image(self, input):
167
- """
168
- Args:
169
- input (dict): one dataset dict
170
-
171
- Returns:
172
- dict: one output dict
173
- """
174
-
175
- augmented_inputs, aug_vars = self._get_augmented_inputs(input)
176
- # Detect boxes from all augmented versions
177
- with self._turn_off_roi_heads(["mask_on", "keypoint_on"]):
178
- # temporarily disable roi heads
179
- all_boxes, all_scores, all_classes = self._get_augmented_boxes(
180
- augmented_inputs, aug_vars
181
- )
182
- merged_instances = self._merge_detections(
183
- all_boxes, all_scores, all_classes, (aug_vars["height"], aug_vars["width"])
184
- )
185
-
186
- if self.cfg.MODEL.MASK_ON:
187
- # Use the detected boxes to obtain new fields
188
- augmented_instances = self._rescale_detected_boxes(
189
- augmented_inputs, merged_instances, aug_vars
190
- )
191
- # run forward on the detected boxes
192
- outputs = self._batch_inference(
193
- augmented_inputs, augmented_instances, do_postprocess=False
194
- )
195
- # Delete now useless variables to avoid being out of memory
196
- del augmented_inputs, augmented_instances, merged_instances
197
- # average the predictions
198
- outputs[0].pred_masks = self._reduce_pred_masks(outputs, aug_vars)
199
- # postprocess
200
- output = self._detector_postprocess(outputs[0], aug_vars)
201
- return {"instances": output}
202
- else:
203
- return {"instances": merged_instances}
204
-
205
- def _get_augmented_inputs(self, input):
206
- augmented_inputs = self.tta_mapper(input)
207
-
208
- do_hflip = [k.pop("horiz_flip", False) for k in augmented_inputs]
209
- heights = [k["height"] for k in augmented_inputs]
210
- widths = [k["width"] for k in augmented_inputs]
211
- assert (
212
- len(set(heights)) == 1 and len(set(widths)) == 1
213
- ), "Augmented version of the inputs should have the same original resolution!"
214
- height = heights[0]
215
- width = widths[0]
216
- aug_vars = {"height": height, "width": width, "do_hflip": do_hflip}
217
-
218
- return augmented_inputs, aug_vars
219
-
220
- def _get_augmented_boxes(self, augmented_inputs, aug_vars):
221
- # 1: forward with all augmented images
222
- outputs = self._batch_inference(augmented_inputs, do_postprocess=False)
223
- # 2: union the results
224
- all_boxes = []
225
- all_scores = []
226
- all_classes = []
227
- for idx, output in enumerate(outputs):
228
- rescaled_output = self._detector_postprocess(output, aug_vars)
229
- pred_boxes = rescaled_output.pred_boxes.tensor
230
- if aug_vars["do_hflip"][idx]:
231
- pred_boxes[:, [0, 2]] = aug_vars["width"] - pred_boxes[:, [2, 0]]
232
- all_boxes.append(pred_boxes)
233
- all_scores.extend(rescaled_output.scores)
234
- all_classes.extend(rescaled_output.pred_classes)
235
- all_boxes = torch.cat(all_boxes, dim=0).cpu()
236
- return all_boxes, all_scores, all_classes
237
-
238
- def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw):
239
- # select from the union of all results
240
- num_boxes = len(all_boxes)
241
- num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES
242
- # +1 because fast_rcnn_inference expects background scores as well
243
- all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device)
244
- for idx, cls, score in zip(count(), all_classes, all_scores):
245
- all_scores_2d[idx, cls] = score
246
-
247
- merged_instances, _ = fast_rcnn_inference_single_image(
248
- all_boxes,
249
- all_scores_2d,
250
- shape_hw,
251
- 1e-8,
252
- self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
253
- self.cfg.TEST.DETECTIONS_PER_IMAGE,
254
- )
255
-
256
- return merged_instances
257
-
258
- def _rescale_detected_boxes(self, augmented_inputs, merged_instances, aug_vars):
259
- augmented_instances = []
260
- for idx, input in enumerate(augmented_inputs):
261
- actual_height, actual_width = input["image"].shape[1:3]
262
- scale_x = actual_width * 1.0 / aug_vars["width"]
263
- scale_y = actual_height * 1.0 / aug_vars["height"]
264
- pred_boxes = merged_instances.pred_boxes.clone()
265
- pred_boxes.tensor[:, 0::2] *= scale_x
266
- pred_boxes.tensor[:, 1::2] *= scale_y
267
- if aug_vars["do_hflip"][idx]:
268
- pred_boxes.tensor[:, [0, 2]] = actual_width - pred_boxes.tensor[:, [2, 0]]
269
-
270
- aug_instances = Instances(
271
- image_size=(actual_height, actual_width),
272
- pred_boxes=pred_boxes,
273
- pred_classes=merged_instances.pred_classes,
274
- scores=merged_instances.scores,
275
- )
276
- augmented_instances.append(aug_instances)
277
- return augmented_instances
278
-
279
- def _reduce_pred_masks(self, outputs, aug_vars):
280
- for idx, output in enumerate(outputs):
281
- if aug_vars["do_hflip"][idx]:
282
- output.pred_masks = output.pred_masks.flip(dims=[3])
283
- all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0)
284
- avg_pred_masks = torch.mean(all_pred_masks, dim=0)
285
- return avg_pred_masks