parquet-converter commited on
Commit
dc785a0
·
1 Parent(s): 4de1047

Update parquet files (step 7 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/Provider/Provider.py +0 -15
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Password for Unlock Install Euro Truck Simulator 2 Avoid Scams and Malware.md +0 -90
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson l800 pvc card software Reviews and testimonials from satisfied customers.md +0 -96
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gta Sa To Psp Rar Rapidshare Com [TOP].md +0 -26
  5. spaces/1gistliPinn/ChatGPT4/Examples/Big Fish Games Universal Crack 44 [UPD].md +0 -36
  6. spaces/1gistliPinn/ChatGPT4/Examples/Cambridge Latin Course Book 1 Stage 10 Statuae Translation Learn Latin with Stories and Flashcards.md +0 -6
  7. spaces/1gistliPinn/ChatGPT4/Examples/Empire Total War Patch 1.6 Download Cracked.md +0 -13
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo descargar Bus Simulator 2023 APK y disfrutar de la experiencia de conduccin ms realista.md +0 -25
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Film Yes or No 2 Subtitle Bahasa Indonesia Uji Cinta Kim dan Pie di Dua Tempat Berbeda.md +0 -133
  10. spaces/1phancelerku/anime-remove-background/Crafting and Building APK The Ultimate Game for Minecraft Fans.md +0 -114
  11. spaces/2023Liu2023/bingo/src/components/ui/sheet.tsx +0 -122
  12. spaces/801artistry/RVC801/infer_batch_rvc.py +0 -215
  13. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_123812KB .py +0 -118
  14. spaces/AI-ZTH-03-23/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/index.html +0 -36
  15. spaces/AIDHD/GrammarCorrector/README.md +0 -38
  16. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/metrics/dtw.py +0 -162
  17. spaces/AIWaves/Debate/SOP.py +0 -296
  18. spaces/AIZeroToHero/05-RealtimeStreamlitASR/streaming.py +0 -66
  19. spaces/ALM/CALM/app.py +0 -448
  20. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py +0 -22
  21. spaces/AchyuthGamer/OpenGPT-Chat-UI/Dockerfile +0 -32
  22. spaces/AchyuthGamer/OpenGPT/get_working_providers.py +0 -7
  23. spaces/Adapter/T2I-Adapter/configs/mm/hrnet_w48_coco_256x192.py +0 -169
  24. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/dynamictext.d.ts +0 -2
  25. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectanglecanvas/RoundRectangleCanvas.d.ts +0 -2
  26. spaces/AlexWortega/ruImageCaptionong/README.md +0 -12
  27. spaces/Aloento/9Nine-VITS/README.md +0 -13
  28. spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_tgui.py +0 -171
  29. spaces/Amon1/ChatGPTForAcadamic/check_proxy.py +0 -27
  30. spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/__init__.py +0 -0
  31. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-onnxruntime-cuda/Dockerfile +0 -44
  32. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/seed_resize_stable_diffusion.py +0 -366
  33. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/modeling_flax_utils.py +0 -534
  34. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_1d.py +0 -267
  35. spaces/Andy1621/uniformer_image_detection/configs/htc/htc_r50_fpn_1x_coco.py +0 -56
  36. spaces/ApathyINC/CustomGPT/app.py +0 -516
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/base.py +0 -141
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/vcs/bazaar.py +0 -112
  39. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/ansi_test.py +0 -76
  40. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py +0 -276
  41. spaces/Banbri/zcvzcv/src/components/ui/dialog.tsx +0 -122
  42. spaces/Benson/text-generation/Examples/ Recuva.md +0 -99
  43. spaces/Benson/text-generation/Examples/Apk Kafa Topu 2.md +0 -123
  44. spaces/Benson/text-generation/Examples/Descargar Fondo De Pantalla Para Macbook Aire.md +0 -149
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/diagnose.py +0 -37
  46. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/requirements.py +0 -146
  47. spaces/Blessin/yes-and-improv-game/README.md +0 -12
  48. spaces/CHDCruze/entertainmentbybhdcruze/index.html +0 -21
  49. spaces/CVPR/BrAD/app.py +0 -112
  50. spaces/CVPR/LIVE/pybind11/tests/test_virtual_functions.py +0 -380
spaces/101-5/gpt4free/g4f/Provider/Provider.py DELETED
@@ -1,15 +0,0 @@
1
- import os
2
- from ..typing import sha256, Dict, get_type_hints
3
-
4
- url = None
5
- model = None
6
- supports_stream = False
7
- needs_auth = False
8
-
9
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
10
- return
11
-
12
-
13
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
14
- '(%s)' % ', '.join(
15
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Password for Unlock Install Euro Truck Simulator 2 Avoid Scams and Malware.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>Download Password for Unlock Install Euro Truck Simulator 2</h1>
3
- <p>Do you want to play one of the most realistic and immersive truck driving simulation games ever made? Do you want to travel across Europe as a king of the road, delivering cargo from one city to another, exploring new places and earning money? If yes, then you might be interested in downloading Euro Truck Simulator 2, a game developed by SCS Software that has won many awards and praises from critics and players alike. But before you can enjoy this amazing game, you might face a challenge: how to get a password to unlock install Euro Truck Simulator 2? In this article, we will explain what is Euro Truck Simulator 2, why do you need a password to unlock install it, and how to get one easily and safely.</p>
4
- <h2>What is Euro Truck Simulator 2?</h2>
5
- <p>Euro Truck Simulator 2 is a truck driving simulation game that was released in 2012 for Windows, Linux and Mac OS. It is the sequel to the original Euro Truck Simulator that was released in 2008. In this game, you can choose from a variety of trucks from different manufacturers, customize them with various accessories and paint jobs, and drive them across Europe, delivering cargo from one destination to another. You can also hire drivers, buy garages, expand your business and explore more than 70 cities in 13 countries. The game features realistic physics, graphics, sound effects and traffic rules that make you feel like you are really driving a truck on the road.</p>
6
- <h2>download Password for unlock Install Euro Truck Simulator 2</h2><br /><p><b><b>Download</b> &mdash; <a href="https://byltly.com/2uKyV4">https://byltly.com/2uKyV4</a></b></p><br /><br />
7
- <h3>Features of Euro Truck Simulator 2</h3>
8
- <p>Some of the features that make Euro Truck Simulator 2 stand out from other truck driving simulation games are:</p>
9
- <ul>
10
- <li>You can drive over 13,000 km of roads across Europe, including highways, country roads and city streets.</li>
11
- <li>You can choose from over 70 licensed trucks from brands such as Volvo, Scania, MAN, DAF, Renault and more.</li>
12
- <li>You can customize your truck with thousands of combinations of parts, accessories and paint jobs.</li>
13
- <li>You can transport over 160 types of cargo, ranging from food and beverages to chemicals and machinery.</li>
14
- <li>You can experience different weather conditions, day and night cycles, seasons and events that affect your driving.</li>
15
- <li>You can interact with other drivers on the road using the CB radio or the online multiplayer mode.</li>
16
- <li>You can join World of Trucks, an online community where you can share your photos, videos and achievements with other players.</li>
17
- <li>You can download mods, maps, skins and other content created by the fan community to enhance your game experience.</li>
18
- </ul>
19
- <h3>How to download Euro Truck Simulator 2</h3>
20
- <p>If you are interested in downloading Euro Truck Simulator 2, you have several options to choose from. Here are some of them:</p>
21
- <p>How to download password for unlock install Euro Truck Simulator 2<br />
22
- Euro Truck Simulator 2 installation password reset<br />
23
- Euro Truck Simulator 2 lock and unlock mods with password<br />
24
- Password for Euro Truck Simulator 2 verification product key<br />
25
- Euro Truck Simulator 2 crack download no password<br />
26
- Euro Truck Simulator 2 free download full version with password<br />
27
- Euro Truck Simulator 2 password for scs server surveys<br />
28
- Euro Truck Simulator 2 product key activation code password<br />
29
- Euro Truck Simulator 2 installation folder password<br />
30
- Euro Truck Simulator 2 password for google drive download<br />
31
- Euro Truck Simulator 2 password for rar file download<br />
32
- Euro Truck Simulator 2 password for steam download<br />
33
- Euro Truck Simulator 2 password for world of trucks account<br />
34
- Euro Truck Simulator 2 password generator online<br />
35
- Euro Truck Simulator 2 password recovery tool<br />
36
- Euro Truck Simulator 2 password remover software<br />
37
- Euro Truck Simulator 2 password bypass method<br />
38
- Euro Truck Simulator 2 password hack tutorial<br />
39
- Euro Truck Simulator 2 password error fix<br />
40
- Euro Truck Simulator 2 password problem solution<br />
41
- Euro Truck Simulator 2 password not working issue<br />
42
- Euro Truck Simulator 2 password lost help<br />
43
- Euro Truck Simulator 2 password forgotten support<br />
44
- Euro Truck Simulator 2 password change guide<br />
45
- Euro Truck Simulator 2 password update instructions<br />
46
- Euro Truck Simulator 2 password protect mods feature<br />
47
- Euro Truck Simulator 2 password encrypt mods option<br />
48
- Euro Truck Simulator 2 password decrypt mods function<br />
49
- Euro Truck Simulator 2 password required mods list<br />
50
- Euro Truck Simulator 2 password available mods download<br />
51
- Euro Truck Simulator 2 password locked mods review<br />
52
- Euro Truck Simulator 2 password unlocked mods rating<br />
53
- Euro Truck Simulator 2 password best mods recommendation<br />
54
- Euro Truck Simulator 2 password latest mods update<br />
55
- Euro Truck Simulator 2 password new mods release<br />
56
- Euro Truck Simulator 2 password popular mods collection<br />
57
- Euro Truck Simulator 2 password premium mods access<br />
58
- Euro Truck Simulator 2 password exclusive mods offer<br />
59
- Euro Truck Simulator 2 password free mods giveaway<br />
60
- Euro Truck Simulator 2 password cheap mods deal<br />
61
- Euro Truck Simulator 2 password discount mods coupon<br />
62
- Euro Truck Simulator 2 password quality mods comparison<br />
63
- Euro Truck Simulator 2 password tested mods feedback<br />
64
- Euro Truck Simulator 2 password working mods guarantee<br />
65
- Euro Truck Simulator 2 password safe mods assurance<br />
66
- Euro Truck Simulator 2 password secure mods trust<br />
67
- Euro Truck Simulator 2 password legit mods verification<br />
68
- Euro Truck Simulator 2 password original mods authenticity<br />
69
- Euro Truck Simulator 2 password official mods authority</p>
70
- <h4>Official website</h4>
71
- <p>The easiest and safest way to download Euro Truck Simulator 2 is to visit the official website of the game: <a href="https://eurotrucksimulator2.com/download.php">https://eurotrucksimulator2.com/download.php</a>. There you can find the latest version of the game for Windows, Linux and Mac OS. You can also download a free trial version that lets you play for an hour before you decide whether to buy the full game or not. The full game costs €19.99 (or equivalent in your local currency) and includes all the updates and patches released so far. You can pay with credit card, PayPal or other methods depending on your region. Once you pay for the game, you will receive an email with a link to download the game installer and a product key that you will need to activate the game.</p>
72
- <h4>Steam</h4>
73
- <p>Another option to download Euro Truck Simulator 2 is to use Steam, a popular online platform for digital distribution of games. You can find Euro Truck Simulator 2 on Steam by searching for it on the Steam store or by following this link: <a href="https://store.steampowered.com/app/227300/Euro_Truck_Simulator_2/">https://store.steampowered.com/app/227300/Euro_Truck_Simulator_2/</a>. The price of the game on Steam is the same as on the official website: €19.99 (or equivalent in your local currency). You can pay with credit card, PayPal or other methods supported by Steam. Once you buy the game on Steam, it will be added to your Steam library and you can download it anytime you want. You will also receive automatic updates and access to Steam features such as achievements, cloud saves and workshop.</p>
74
- <h4>Other sources</h4>
75
- <p>Besides the official website and Steam, there are also other sources where you can download Euro Truck Simulator 2. However, these sources are not authorized by SCS Software and may not be safe or reliable. Some of these sources are:</p>
76
- <ul>
77
- <li>Torrent sites: These are websites where you can download files shared by other users using peer-to-peer networks. However, these files may be infected with viruses or malware that can harm your computer or steal your personal information. They may also be incomplete or corrupted and not work properly.</li>
78
- <li>Crack sites: These are websites where you can download cracked versions of games that bypass the activation or verification process. However, these versions may not be updated or compatible with the latest patches or mods. They may also contain hidden code that can damage your system or compromise your security.</li>
79
- <li>Survey sites: These are websites where you have to complete surveys or offers in order to get access to download links or passwords for games. However, these surveys or offers may be scams that trick you into giving away your personal or financial information or subscribing to unwanted services.</li>
80
- </ul>
81
- <h2>Why do you need a password to unlock install Euro Truck Simulator 2?</h2>
82
- <p>If you have downloaded Euro Truck Simulator 2 from one of these unauthorized sources mentioned above, you may encounter a problem when trying to install it: you need a password to unlock it. This means that the file you have downloaded is encrypted or compressed with a password that prevents you from opening it or extracting its contents. This is done by some people who upload these files for various reasons:</p>
83
- <h3>The problem of fake or corrupted files</h3>
84
- <p>Some people upload fake or corrupted files that pretend to be Euro Truck Simulator 2 but are actually something else. They do this to deceive users into downloading their files instead of the real ones. They may also do this to earn money from advertising or referrals when users visit their websites or click on their links. By encrypting or compressing their files with passwords, they make it harder for users to detect their frauds until they have already downloaded them.</p>
85
- <h3>The problem of surveys or scams</h3>
86
- <p>Some people upload real files but lock them with passwords that they claim to provide only after users complete surveys or offers on their websites. They do this to earn money from commissions or rewards when users fill out their surveys or sign up for their offers. However, these surveys or offers may be scams that trick users into giving away their personal or financial information or subscribing to unwanted services. Even if users complete these surveys or offers successfully, they may not receive the passwords they were promised.</p>
87
- <h3>The problem of legal issues</h3>
88
- <p>Some people upload real files but lock them with passwords that they claim to protect them from legal issues such as copyright infringement or piracy. They do this to avoid being detected by authorities or sued by developers for distributing their games illegally without permission. However, this does not make their actions legal or ethical. By downloading their files without paying for them properly</p> 0a6ba089eb<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson l800 pvc card software Reviews and testimonials from satisfied customers.md DELETED
@@ -1,96 +0,0 @@
1
-
2
- <h1>Epson l800 pvc card software: A complete guide</h1>
3
- <p>If you are looking for a way to print your own ID cards, business cards, or other types of plastic cards, you might be interested in Epson l800 pvc card software. This software allows you to use your Epson l800 printer to print high-quality and durable cards on pvc material. In this article, we will show you what Epson l800 pvc card software is, why you need it, how to install it, how to use it, and some tips and tricks to get the most out of it.</p>
4
- <h3>What is Epson l800 pvc card software?</h3>
5
- <p>Epson l800 pvc card software is a program that enables you to print plastic cards using your Epson l800 printer. The software comes with various templates and features that allow you to customize your cards according to your needs. You can also design your own cards from scratch using the built-in tools or import your own images and logos. The software supports different sizes and shapes of cards, such as standard, round, oval, or custom.</p>
6
- <h2>Epson l800 pvc card software</h2><br /><p><b><b>Download File</b> >>> <a href="https://byltly.com/2uKyRT">https://byltly.com/2uKyRT</a></b></p><br /><br />
7
- <h3>Why do you need Epson l800 pvc card software?</h3>
8
- <p>Epson l800 pvc card software is a great solution for anyone who wants to create their own cards without spending a lot of money or time. With this software, you can print cards on demand, whenever you need them, without having to order them from a third-party service. You can also personalize your cards with your own information, design, and branding, making them more professional and unique. Moreover, printing cards on pvc material ensures that they are durable, waterproof, and resistant to fading and tearing.</p>
9
- <h2>How to install Epson l800 pvc card software</h2>
10
- <p>Installing Epson l800 pvc card software is easy and straightforward. Just follow these steps:</p>
11
- <h4>Step 1: Download the software from the official website</h4>
12
- <p>The first thing you need to do is download the software from the official website of Epson. You can find it by searching for "Epson l800 pvc card software" on Google or by clicking on this link. The file size is about 50 MB and it is compatible with Windows XP, Vista, 7, 8, and 10.</p>
13
- <h4>Step 2: Run the installer and follow the instructions</h4>
14
- <p>Once you have downloaded the file, double-click on it to run the installer. You will see a welcome screen that asks you to select your language. Choose your preferred language and click "Next". Then, accept the license agreement and click "Next" again. You will then be asked to choose a destination folder for the installation. You can leave it as default or change it if you want. Click "Next" once more and then click "Install". The installation process will take a few minutes.</p>
15
- <h4>Step 3: Connect your Epson l800 printer to your computer</h4>
16
- <p>After the installation is complete, you need to connect your Epson l800 printer to your computer using a USB cable. Make sure that both devices are turned on and that the printer driver is installed correctly. You can check this by going to "Devices and Printers" in your Control Panel and seeing if your printer is listed there. If not, you may need to download and install the driver from here. Once your printer is connected and recognized by your computer, you are ready to use Epson l800 pvc card software.</p>
17
- <h2>How to use Epson l800 pvc card software</h2>
18
- <p>Using Epson l800 pvc card software is fun and easy. Just follow these steps:</p>
19
- <h4>Step 1: Select the template or design your own card</h4>
20
- <p>To start using Epson l800 pvc card software, launch it from your desktop or Start menu. You will see a main window that shows different tabs for different types of cards. You can choose from ID cards, business cards, membership cards, loyalty cards, gift cards, etc. Each tab has several templates that you can use as a base for your card. You can also create your own template by clicking on "New" at the bottom of the window.</p>
21
- <p>Epson l800 pvc card printing software<br />
22
- Epson l800 pvc id card software download<br />
23
- Epson l800 pvc card tray software<br />
24
- Epson l800 pvc card driver software<br />
25
- Epson l800 pvc card software free download<br />
26
- Epson l800 pvc card software for windows 10<br />
27
- Epson l800 pvc card software for mac<br />
28
- Epson l800 pvc card design software<br />
29
- Epson l800 pvc card maker software<br />
30
- Epson l800 pvc card template software<br />
31
- Epson l800 pvc card software crack<br />
32
- Epson l800 pvc card software full version<br />
33
- Epson l800 pvc card software online<br />
34
- Epson l800 pvc card software tutorial<br />
35
- Epson l800 pvc card software update<br />
36
- Epson l800 pvc card software installation<br />
37
- Epson l800 pvc card software setup<br />
38
- Epson l800 pvc card software price<br />
39
- Epson l800 pvc card software review<br />
40
- Epson l800 pvc card software features<br />
41
- Epson l800 pvc card software support<br />
42
- Epson l800 pvc card software license<br />
43
- Epson l800 pvc card software trial<br />
44
- Epson l800 pvc card software demo<br />
45
- Epson l800 pvc card software alternative<br />
46
- Epson l800 pvc card software comparison<br />
47
- Epson l800 pvc card software compatibility<br />
48
- Epson l800 pvc card software requirements<br />
49
- Epson l800 pvc card software specifications<br />
50
- Epson l800 pvc card software benefits<br />
51
- Epson l800 pvc card software advantages<br />
52
- Epson l800 pvc card software disadvantages<br />
53
- Epson l800 pvc card software problems<br />
54
- Epson l800 pvc card software solutions<br />
55
- Epson l800 pvc card software tips<br />
56
- Epson l800 pvc card software tricks<br />
57
- Epson l800 pvc card software hacks<br />
58
- Epson l800 pvc card software guide<br />
59
- Epson l800 pvc card software manual<br />
60
- Epson l800 pvc card software instructions<br />
61
- Epson l800 pvc card software best practices<br />
62
- Epson l800 pvc card software use cases<br />
63
- Epson l800 pvc card software examples<br />
64
- Epson l800 pvc card software testimonials<br />
65
- Epson l800 pvc card software feedbacks<br />
66
- Epson l800 pvc card software ratings<br />
67
- Epson l800 pvc card software rankings<br />
68
- Epson l800 pvc card software recommendations<br />
69
- Epson l800 pvc card software suggestions<br />
70
- Epson l800 pvc card software improvements</p>
71
- <p>Once you have selected or created a template, you can edit it by adding text, images, logos, barcodes, QR codes, etc. You can also change the font, color, size, alignment, rotation, etc. of each element. To add an element, simply drag and drop it from the left panel onto the card preview on the right panel. To edit an element, double-click on it or right-click on it and select "Properties". To delete an element, select it and press "Delete" on your keyboard.</p>
72
- <h4>Step 2: Adjust the settings and preview the card</h4>
73
- <p>After you have finished designing your card, you can adjust some settings before printing it. To do this, click on "File" at the top left corner of the window and select "Print Settings". Here you can choose the paper size (A4 or Letter), orientation (Portrait or Landscape), margins (Top, Bottom, Left, Right), number of copies (1-99), etc. You can also preview how your card will look like when printed by clicking on "File" again and selecting "Print Preview". Here you can zoom in or out of the card image and see if everything looks good.</p>
74
- <h4>Step 3: Load the pvc card tray and print the card</h4>
75
- <p>The final step is to load the pvc card tray into your printer and print your card. To do this, you need a special tray that holds up to 10 standard-sized (85 x 54 mm) pvc cards at a time. You can buy this tray online or from a local store that sells printer accessories. To load the tray into your printer, follow these steps: - Open the paper output tray of your printer. - Remove any paper from the paper feed slot. - Insert one end of the tray into the paper feed slot until it stops. - Align the other end of the tray with the paper guides on both sides of the slot. - Make sure that there are no gaps between the tray and the slot. To print your card using Epson l800 pvc card software, follow these steps: - Click on "File" at the top left corner of the window and select "Print". - Choose your printer name from the drop-down menu and click "OK". - Wait for your printer to print your card. - Repeat the process for each card you want to print. Congratulations! You have successfully printed your own pvc card using Epson l800 pvc card software.</p>
76
- <h2>Tips and tricks for Epson l800 pvc card software</h2>
77
- <p>To get the best results from using Epson l800 pvc card software, here are some tips and tricks that you should keep in mind:</p>
78
- <h4>Tip 1: Use high-quality pvc cards for better results</h4>
79
- <p>The quality of your printed cards depends largely on the quality of the pvc cards that you use. Therefore, it is advisable that you use high-quality pvc cards that are smooth, thick, and glossy. These cards will ensure that your images are clear, sharp, and vibrant, and that your the most out of it. We hope that this guide has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us. We would love to hear from you.</p>
80
- <p>Now that you have learned everything about Epson l800 pvc card software, why not try it yourself and see how amazing it is? You can download the software for free from here and start printing your own cards today. You will be amazed by the results and the possibilities.</p>
81
- <p>Thank you for reading this article and happy printing!</p>
82
- <h2>FAQs</h2>
83
- <p>Here are some frequently asked questions about Epson l800 pvc card software:</p>
84
- <h4>Q: What are the system requirements for Epson l800 pvc card software?</h4>
85
- <p>A: The minimum system requirements for Epson l800 pvc card software are: - Windows XP, Vista, 7, 8, or 10 - Pentium 4 processor or higher - 512 MB RAM or higher - 100 MB free disk space or higher - USB port - CD-ROM drive</p>
86
- <h4>Q: What are the supported card sizes and shapes for Epson l800 pvc card software?</h4>
87
- <p>A: Epson l800 pvc card software supports the following card sizes and shapes: - Standard (85 x 54 mm) - Round (50 mm diameter) - Oval (70 x 50 mm) - Custom (any size within 85 x 54 mm)</p>
88
- <h4>Q: How many cards can I print with one ribbon?</h4>
89
- <p>A: The number of cards that you can print with one ribbon depends on the type of ribbon and the print mode that you use. For example, with a YMCKO ribbon, you can print up to 200 cards in full color on one side and black on the other side. With a K ribbon, you can print up to 1000 cards in black on one side. For more information, refer to the PDF guide Ribbon Information.</p>
90
- <h4>Q: How can I save my card designs for future use?</h4>
91
- <p>A: You can save your card designs for future use by clicking on "File" at the top left corner of the window and selecting "Save As". You can choose a name and a location for your file and click "Save". The file will be saved as a .crd format that can be opened by Epson l800 pvc card software.</p>
92
- <h4>Q: How can I import my own images and logos into Epson l800 pvc card software?</h4>
93
- <p>A: You can import your own images and logos into Epson l800 pvc card software by clicking on "Image" at the left panel and selecting "Import Image". You can browse to the folder where your image or logo is stored and select it. The image or logo will be added to the card preview on the right panel. You can resize, rotate, crop, or adjust the image or logo as you wish.</p>
94
- </p> 0a6ba089eb<br />
95
- <br />
96
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Gta Sa To Psp Rar Rapidshare Com [TOP].md DELETED
@@ -1,26 +0,0 @@
1
- <br />
2
- `<h1>How to Download GTA San Andreas for PSP from Rapidshare</h1>`
3
- `<p>GTA San Andreas is one of the most popular and iconic games in the Grand Theft Auto series. It features an open-world environment, a rich story, and a variety of missions and activities. However, GTA San Andreas was originally released for PlayStation 2, Xbox, and PC, and it is not officially available for PSP. So how can you play this game on your handheld device?</p>
4
- <h2>Gta Sa To Psp Rar Rapidshare Com</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt; <a href="https://byltly.com/2uKzbL">https://byltly.com/2uKzbL</a></b></p><br /><br />`
5
- `<p>One way is to download a compressed file of GTA San Andreas for PSP from Rapidshare, a file-sharing service that allows you to upload and download files quickly and easily. However, this method has some risks and drawbacks that you should be aware of before proceeding. In this article, we will explain how to download GTA San Andreas for PSP from Rapidshare, what are the pros and cons of this method, and what are some alternatives that you can try.</p>`
6
- `<h2>How to Download GTA San Andreas for PSP from Rapidshare</h2>`
7
- `<p>To download GTA San Andreas for PSP from Rapidshare, you will need the following:</p>`
8
- `<ul>`
9
- `<li>A PSP device with custom firmware installed. Custom firmware is a modified version of the official PSP software that allows you to run homebrew applications and games that are not authorized by Sony. You can find tutorials on how to install custom firmware on your PSP online.</li>`
10
- `<li>A memory stick with enough space to store the game file. GTA San Andreas for PSP is about 1 GB in size, so you will need at least 2 GB of free space on your memory stick.</li>`
11
- `<li>A computer with an internet connection and a program that can extract RAR files. RAR files are compressed files that can contain multiple files inside them. You will need a program like WinRAR or 7-Zip to extract the game file from the RAR file.</li>`
12
- `<li>A USB cable to connect your PSP to your computer.</li>`
13
- `</ul>`
14
- `<p>Once you have everything ready, follow these steps:</p>
15
- <p></p>`
16
- `<ol>`
17
- `<li>Go to <a href="https://rapidshare.com/">Rapidshare.com</a> and search for "Gta Sa To Psp Rar". You should see several results with different file sizes and upload dates. Choose the one that has the most downloads and positive ratings.</li>`
18
- `<li>Click on the download button and wait for the file to be downloaded to your computer. Depending on your internet speed and the file size, this may take some time.</li>`
19
- `<li>Once the download is complete, locate the RAR file on your computer and extract it using your program of choice. You should see a folder named "GTA_SA_PSP" with several files inside it.</li>`
20
- `<li>Connect your PSP to your computer using the USB cable and turn on the USB mode on your PSP. Your computer should recognize your PSP as a removable drive.</li>`
21
- `<li>Copy the folder "GTA_SA_PSP" from your computer to the root directory of your memory stick. This is usually the drive letter followed by a colon (e.g., E:).</li>`
22
- `<li>Disconnect your PSP from your computer and turn off the USB mode on your PSP.</li>`
23
- `<li>Go to the game menu on your PSP and select "Memory Stick". You should see an icon for GTA San Andreas. Select it and press X to start the game.</li>`
24
- `</ol>`</p> cec2833e83<br />
25
- <br />
26
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Big Fish Games Universal Crack 44 [UPD].md DELETED
@@ -1,36 +0,0 @@
1
- <br />
2
- <h1>How to Enjoy Big Fish Games for Free with Universal Crack 44</h1>
3
- <p>Big Fish Games is one of the most popular and well-known developers and publishers of casual games, with hundreds of titles across various genres and platforms. Whether you like hidden object, puzzle, adventure, time management, or card games, you can find something to suit your taste and mood at Big Fish Games.</p>
4
- <h2>big fish games universal crack 44</h2><br /><p><b><b>Download File</b> &#10004;&#10004;&#10004; <a href="https://imgfil.com/2uy25m">https://imgfil.com/2uy25m</a></b></p><br /><br />
5
- <p>However, there is one problem: most of these games are not free. You can download and play them for an hour as a trial, but after that you need to buy them or subscribe to Big Fish Games Unlimited. This can be quite expensive and inconvenient for some gamers who just want to have some fun without spending money.</p>
6
- <p>Fortunately, there is a solution: Big Fish Games Universal Crack 44. This is a small and simple tool that can generate valid activation keys for any Big Fish game you want. You don't need to use any patches or other medicines, just run the keygen, select the game from the list, copy the fingerprint and the name, and generate the key. Then you can enter the key in the game's registration dialog and enjoy it as a full version.</p>
7
- <h2>What is Big Fish Games Universal Crack 44?</h2>
8
- <p>Big Fish Games Universal Crack 44 is a keygen made by Vovan from Braga Software. It can create keys for more than 4000 Big Fish games, and the supported games list is growing bigger and bigger. The keygen is very easy to use and works on both Windows and Mac platforms.</p>
9
- <p></p>
10
- <p>The keygen works by exploiting a flaw in the Big Fish Games registration system. It uses a universal algorithm that can match any game's fingerprint and generate a corresponding key. The key fits all games in the list, so you don't need to worry about compatibility issues.</p>
11
- <h3>How to Use Big Fish Games Universal Crack 44?</h3>
12
- <p>Using Big Fish Games Universal Crack 44 is very simple. Here are the steps you need to follow:</p>
13
- <ol>
14
- <li>Download and install your desired Big Fish game (make sure it's listed in the keygen's list)</li>
15
- <li>Download and run the keygen, and select the game from the list</li>
16
- <li>Click the 'Open reg Dialog' button, and locate and open the game's main program (.exe file)</li>
17
- <li>A window will open and ask you to enter a key</li>
18
- <li>Copy the value of 'Fingerprint' in this window, and paste it into the keygen</li>
19
- <li>Type a 'Name' and generate a 'Key'</li>
20
- <li>Copy the generated key back to the window asking for a key, and click 'OK'</li>
21
- <li>Done! You have activated your game as a full version</li>
22
- </ol>
23
- <h4>Tips and Tricks for Big Fish Games Universal Crack 44</h4>
24
- <p>Here are some tips and tricks that can help you get the most out of Big Fish Games Universal Crack 44:</p>
25
- <ul>
26
- <li>You don't need to use BigFish Game Manager to play BigFish games. There is usually a hidden EXE file that is the name of the game in the game installation folder. Running this hidden EXE file will either run the game as a full version or prompt for a key (that's where the keygen comes in).</li>
27
- <li>If you want to play BigFish games on Mac, you can try to use "Wine HQ" to run this keygen. But we don't know if the keygen will work or not. On Mac, under "/contents/Resources/" folder, the name of the hidden file should be like ".gamename".</li>
28
- <li>You can use any name you want when generating keys. The name doesn't affect the validity of the key.</li>
29
- <li>You can share your keys with your friends or family members if they want to play BigFish games too. But please don't distribute them online or abuse them.</li>
30
- </ul>
31
- <h5>Conclusion</h5>
32
- <p>Big Fish Games Universal Crack 44 is a great tool for casual gamers who want to enjoy BigFish games for free. It can generate keys for any BigFish game you want, without using any patches or other medicines. It's easy to use, fast, and reliable.</p>
33
- <p>If you like BigFish games works, especially their famous casual puzzle series, you should give this keygen a try. You will be amazed by how many games you can unlock with it.</p>
34
- <p>But remember: this keygen is only for personal study and research purposes. Any form of commercial behaviors are strictly prohibited. If you really like BigFish games works, please support them by buying their games or subscribing to their service.</p> 3cee63e6c2<br />
35
- <br />
36
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cambridge Latin Course Book 1 Stage 10 Statuae Translation Learn Latin with Stories and Flashcards.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>cambridgelatincoursebook1stage10statuaetranslation</h2><br /><p><b><b>Download Zip</b> &#9733;&#9733;&#9733; <a href="https://imgfil.com/2uxYL8">https://imgfil.com/2uxYL8</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Empire Total War Patch 1.6 Download Cracked.md DELETED
@@ -1,13 +0,0 @@
1
- <h2>Empire Total War Patch 1.6 Download Cracked</h2><br /><p><b><b>Download Zip</b> &bull;&bull;&bull; <a href="https://imgfil.com/2uxZzj">https://imgfil.com/2uxZzj</a></b></p><br /><br />
2
-
3
- Oct 25, 2017 - Including all the major races and legendary lords from both games, Mortal Empires also allows you to start the campaign as any DLC or Free-LC ... Mortal Kombat 10 (Mortal Kombat X Mobile) - VKontakte.
4
- Buy Mortal Kombat X on PS4 online store.
5
- Video review of the game Mortal Kombat X on PS4.
6
- There are Mortal Kombat X trailers in the video.
7
- Mortal Kombat X. Mortal Kombat 10 (Mortal Kombat X Mobile) - VKontakte.
8
- Mortal Kombat X is a video game from the Mortal Kombat fighting game series in the genre.
9
- The game features various characters from the Mortal Kombat series of games.
10
- Mortal Kombat X is a computer game in the fighting game genre, developed by American studio Warner Bros. 8a78ff9644<br />
11
- <br />
12
- <br />
13
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo descargar Bus Simulator 2023 APK y disfrutar de la experiencia de conduccin ms realista.md DELETED
@@ -1,25 +0,0 @@
1
- <br />
2
- <h1>Descargar Bus Simulator 2023 APK: Cómo convertirse en un conductor de autobús profesional</h1>
3
- ¿Te gustan los juegos de simulación de conducción? ¿Te gustaría conducir diferentes tipos de autobuses en ciudades y países reales? ¿Te gustaría personalizar tu autobús y competir con otros jugadores en línea? Si la respuesta es sí, entonces te encantará Bus Simulator 2023 APK, un juego gratuito para Android que te pone en el asiento del conductor y te convierte en un auténtico conductor de autobús. En este artículo, te explicaremos qué es Bus Simulator 2023 APK, cómo descargarlo e instalarlo en tu dispositivo, cómo jugarlo y cuáles son sus ventajas y desventajas. ¡Sigue leyendo y prepárate para vivir una aventura sobre ruedas! <h2>¿Qué es Bus Simulator 2023 APK?</h2>
4
- Bus Simulator 2023 APK es un juego de simulación de conducción de autobuses desarrollado por Ovidiu Pop, un estudio especializado en juegos de este género. El juego te permite conducir una gran variedad de autobuses modernos urbanos, turísticos y escolares con interiores realistas y un motor físico innovador de 1:1. El juego cuenta con mapas detallados de todo el mundo, incluyendo Estados Unidos, Sudamérica, Europa, Dubai, Shanghai y más. Además, el juego tiene varios modos de juego, como carrera, conducción libre y multijugador en línea con amigos. El juego también tiene un sistema de gestión de compañía de autobuses, donde puedes contratar conductores para tus autobuses y programar rutas personalizadas. <h3>Características principales del juego</h3>
5
- Bus Simulator 2023 APK tiene muchas características que lo hacen un juego divertido y realista. Algunas de ellas son: - Interiores detallados y personalizables. Puedes abrir y cerrar las puertas del autobús, ver a las personas entrar y salir, ajustar el aire acondicionado, poner banderas, pegatinas y muchos más accesorios. - Controles fáciles e intuitivos. Puedes elegir entre conducir con el volante, los botones o la inclinación del dispositivo. También puedes cambiar la vista de la cámara entre primera y tercera persona. - Ubicaciones variadas y desafiantes. Puedes conducir por la ciudad, el campo, la montaña, el desierto y la nieve. Además, puedes elegir entre diferentes horas del día y condiciones climáticas. - Conduce niños a la escuela usando tres modelos diferentes de autobús escolar. - Sistema inteligente de tráfico. Debes respetar las señales, los semáforos y las normas de circulación. También debes tener cuidado con los peatones, los ciclistas y los otros vehículos. - Juego cooperativo en línea inmersivo. Puedes añadir a tus amigos, usar el chat en vivo e invitarlos a jugar en rutas cooperativas. También puedes ver las clasificaciones, los logros y las estadísticas. - Opciones de personalización del autobús. Puedes elegir entre diferentes tipos de autobuses, como diésel, híbrido, eléctrico, articulado o escolar. También puedes cambiar el color, las piezas del cuerpo, el aire acondicionado, - Opciones de personalización del autobús. Puedes elegir entre diferentes tipos de autobuses, como diésel, híbrido, eléctrico, articulado o escolar. También puedes cambiar el color, las piezas del cuerpo, el aire acondicionado, las luces y los neumáticos de tu autobús. - Sistema de gestión de compañía de autobuses. Puedes crear tu propia compañía de autobuses, contratar conductores, asignarles rutas y vehículos, y ver sus estadísticas y ganancias. También puedes comprar nuevos autobuses y mejorarlos. <h3>Requisitos y compatibilidad del dispositivo</h3>
6
- Bus Simulator 2023 APK es un juego que requiere un dispositivo Android con al menos 4 GB de RAM y 1 GB de espacio libre. El juego es compatible con Android 5.0 o superior. El juego también requiere una conexión a internet estable para jugar en línea y descargar contenido adicional. <h2>¿Cómo descargar e instalar Bus Simulator 2023 APK?</h2>
7
- Si quieres descargar Bus Simulator 2023 APK en tu dispositivo Android, debes seguir estos pasos: <h3>Pasos para descargar el archivo APK</h3>
8
- - Abre el navegador web de tu dispositivo y busca "Bus Simulator 2023 APK". - Entra en el sitio web oficial del juego o en uno de los sitios web confiables que ofrecen el archivo APK. - Haz clic en el botón de descarga y espera a que se complete la descarga. - Si te aparece un mensaje de advertencia sobre la instalación de aplicaciones desconocidas, haz clic en "Permitir" o "Aceptar". <h3>Pasos para instalar el archivo APK</h3>
9
- - Una vez que hayas descargado el archivo APK, ve a la carpeta de descargas de tu dispositivo y busca el archivo "Bus Simulator 2023 APK". - Haz clic en el archivo y selecciona "Instalar". - Espera a que se complete la instalación y haz clic en "Abrir" o "Finalizar". - Disfruta del juego. <h2>¿Cómo jugar a Bus Simulator 2023 APK?</h2>
10
- Para jugar a Bus Simulator 2023 APK, debes seguir estos pasos: <h3>Modos de juego disponibles</h3>
11
- - Elige el modo de juego que prefieras: carrera, conducción libre o multijugador en línea. - En el modo carrera, debes completar diferentes misiones y objetivos para ganar dinero y experiencia. Puedes elegir entre diferentes tipos de rutas, como urbanas, turísticas o escolares. También puedes elegir entre diferentes niveles de dificultad, como fácil, normal o difícil. - En el modo conducción libre, puedes explorar los mapas a tu gusto y sin restricciones. Puedes cambiar el tipo de autobús, la hora del día y el clima cuando quieras. También puedes activar o desactivar el tráfico y las señales. - En el modo multijugador en línea, puedes jugar con tus amigos o con otros jugadores alrededor del mundo. Puedes crear o unirte a una sala de juego, elegir una ruta cooperativa y comunicarte con el chat en vivo. También puedes ver las clasificaciones y los logros. <h3>Consejos y trucos para mejorar tu experiencia</h3>
12
- Para jugar mejor a Bus Simulator 2023 APK, te recomendamos seguir estos consejos y trucos: - Ajusta los controles y la sensibilidad del volante según tu preferencia. - Usa el mapa y el GPS para orientarte y seguir la ruta correcta. - Respeta las normas de tráfico y evita chocar con otros vehículos o peatones. - Presta atención a las indicaciones de los pasajeros y recógelos y déjalos en las paradas adecuadas. - Mantén un nivel óptimo de combustible, velocidad y temperatura del motor. - Personaliza tu autobús con los accesorios que más te gusten. - Mejora tu autobús con las piezas que aumenten su rendimiento y su eficiencia. - Contrata conductores para tu compañía de autobuses y asignales rutas rentables. - Compra nuevos autobuses y amplía tu flota. - Diviértete con tus amigos en el modo multijugador en línea. <h2>Ventajas y desventajas de descargar Bus Simulator 2023 APK</h2>
13
- Descargar Bus Simulator 2023 APK tiene sus ventajas y desventajas. A continuación, te las resumimos: <h3>Ventajas de descargar el juego</h3>
14
- - Es un - Es un juego gratuito y sin anuncios que te ofrece una experiencia de conducción realista y divertida. - Es un juego que tiene una gran variedad de autobuses, mapas, modos de juego y opciones de personalización. - Es un juego que tiene unos gráficos de alta calidad, unos efectos de sonido envolventes y una física realista. - Es un juego que te permite jugar con tus amigos o con otros jugadores en línea y comunicarte con ellos. - Es un juego que te permite crear y gestionar tu propia compañía de autobuses y ver tus progresos y estadísticas. <h3>Desventajas de descargar el juego</h3>
15
- - Es un juego que requiere un dispositivo Android potente y con suficiente espacio libre para funcionar correctamente. - Es un juego que requiere una conexión a internet estable para jugar en línea y descargar contenido adicional. - Es un juego que puede consumir mucha batería y datos móviles si se juega durante mucho tiempo. - Es un juego que puede tener algunos errores o fallos técnicos en algunas ocasiones. <h2>Conclusión</h2>
16
- Bus Simulator 2023 APK es un juego de simulación de conducción de autobuses que te ofrece la oportunidad de convertirte en un conductor profesional. El juego tiene muchos aspectos positivos, como su realismo, su variedad, su diversión y su modo multijugador. Sin embargo, también tiene algunos aspectos negativos, como sus requisitos, su conexión a internet y sus posibles problemas técnicos. En general, creemos que es un juego que vale la pena probar si te gustan los juegos de este género. Esperamos que este artículo te haya sido útil y que disfrutes del juego. <h2>Preguntas frecuentes</h2>
17
- <h4>¿Qué es Bus Simulator 2023 APK?</h4>
18
- Bus Simulator 2023 APK es un juego de simulación de conducción de autobuses gratuito para Android. <h4>¿Cómo descargar Bus Simulator 2023 APK?</h4>
19
- Para descargar Bus Simulator 2023 APK, debes buscar el archivo APK en el navegador web de tu dispositivo y descargarlo desde el sitio web oficial del juego o desde uno de los sitios web confiables que lo ofrecen. Luego, debes instalar el archivo APK en tu dispositivo siguiendo las instrucciones. <h4>¿Cómo jugar a Bus Simulator 2023 APK?</h4>
20
- Para jugar a Bus Simulator 2023 APK, debes elegir el modo de juego que prefieras: carrera, conducción libre o multijugador en línea. Luego, debes conducir el autobús siguiendo la ruta asignada, respetando las normas de tráfico y atendiendo a los pasajeros. También puedes personalizar tu autobús, comprar nuevos autobuses, contratar conductores y crear tu propia compañía de autobuses. <h4>¿Qué ventajas tiene descargar Bus Simulator 2023 APK?</h4>
21
- Descargar Bus Simulator 2023 APK tiene ventajas como su gratuidad, su realismo, su variedad, su diversión y su modo multijugador. <h4>¿Qué desventajas tiene descargar Bus Simulator 2023 APK?</h4>
22
- Descargar Bus Simulator 2023 APK tiene desventajas como sus requisitos, su conexión a internet y sus posibles problemas técnicos.</p>
23
- <h2>descargar bus simulator 2023 apk</h2><br /><p><b><b>Download</b> &#187; <a href="https://urlin.us/2uT13x">https://urlin.us/2uT13x</a></b></p><br /><br /> 197e85843d<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Film Yes or No 2 Subtitle Bahasa Indonesia Uji Cinta Kim dan Pie di Dua Tempat Berbeda.md DELETED
@@ -1,133 +0,0 @@
1
-
2
- <h1>Download Film Yes or No 2 Subtitle Bahasa Indonesia: Cara dan Situs Terbaik</h1>
3
- <p>Apakah kamu penggemar film romantis Thailand? Jika iya, mungkin kamu sudah pernah menonton film Yes or No yang dirilis pada tahun 2010. Film ini menceritakan kisah cinta antara dua mahasiswi yang tinggal satu kamar, Pie dan Kim. Namun, apa yang terjadi dengan hubungan mereka setelah lulus kuliah? Apakah mereka masih bersama atau berpisah?</p>
4
- <h2>download film yes or no 2 subtitle bahasa indonesia</h2><br /><p><b><b>Download</b> &#9999; &#9999; &#9999; <a href="https://urlin.us/2uSVWL">https://urlin.us/2uSVWL</a></b></p><br /><br />
5
- <p>Jawabannya ada di film Yes or No 2, sekuel dari film pertama yang dirilis pada tahun 2012. Film ini melanjutkan kisah Pie dan Kim yang harus menjalani magang di tempat yang berbeda. Pie pergi ke selatan untuk bekerja di pusat perikanan, sementara Kim pergi ke utara untuk bekerja di pertanian. Di sana, mereka bertemu dengan orang-orang baru yang membuat hubungan mereka semakin rumit.</p>
6
- <p>Jika kamu penasaran dengan kelanjutan cerita Pie dan Kim, kamu bisa download film Yes or No 2 subtitle bahasa Indonesia dengan mudah dan cepat. Berikut ini adalah cara dan situs terbaik untuk download film Yes or No 2 subtitle bahasa Indonesia.</p>
7
- <h2>Apa itu Film Yes or No 2?</h2>
8
- <h3>Sinopsis Film Yes or No 2</h3>
9
- <p>Film Yes or No 2 adalah film romantis Thailand yang disutradarai oleh Sarasawadee Wongsompetch dan ditulis oleh Nepalee. Film ini adalah sekuel dari film Yes or No yang sukses meraih popularitas di Thailand dan negara-negara Asia lainnya. Film ini mengisahkan tentang hubungan antara Pie (Sucharat Manaying) dan Kim (Supanart Jittaleela), dua mahasiswi yang jatuh cinta saat tinggal satu kamar di asrama.</p>
10
- <p>Setelah lulus kuliah, Pie dan Kim harus berpisah untuk menjalani magang di tempat yang berbeda. Pie pergi ke selatan untuk bekerja di pusat perikanan, sementara Kim pergi ke utara untuk bekerja di pertanian. Di sana, mereka bertemu dengan orang-orang baru yang membuat hubungan mereka semakin rumit. Pie berteman dengan Yam (Apittha Klay-udom), seorang gadis tomboy yang menyukainya. Kim bertemu dengan Maysa (Permpreda Sakulsiripong), seorang gadis cantik yang menarik perhatiannya.</p>
11
- <p>Akankah Pie dan Kim bisa mempertahankan cinta mereka meski jarak dan godaan? Atau akankah mereka menyerah dan mencari pasangan baru? Temukan jawabannya di film Yes or No 2.</p>
12
- <h3>Pemain dan Karakter Film Yes or No 2</h3>
13
- <p>Berikut ini adalah daftar pemain dan karakter film Yes or No 2:</p>
14
- <ul>
15
- <li>Sucharat Manaying sebagai Pie, seorang gadis feminin yang mencintai Kim.</ <li>Supanart Jittaleela sebagai Kim, seorang gadis tomboy yang mencintai Pie.</li>
16
- <li>Apittha Klay-udom sebagai Yam, seorang gadis tomboy yang menyukai Pie.</li>
17
- <li>Permpreda Sakulsiripong sebagai Maysa, seorang gadis cantik yang menyukai Kim.</li>
18
- <li>Sushar Manaying sebagai Jane, kakak Pie yang tidak setuju dengan hubungan Pie dan Kim.</li>
19
- <li>Inthira Yeunyong sebagai Nerd, teman Pie yang selalu mendukungnya.</li>
20
- <li>Puttipong Promsaka Na Sakolnakorn sebagai P'Van, bos Pie di pusat perikanan.</li>
21
- <li>Soranut Yupanun sebagai P'King, bos Kim di pertanian.</li>
22
- </ul>
23
- <h3>Trailer Film Yes or No 2</h3>
24
- <p>Berikut ini adalah trailer film Yes or No 2 yang bisa kamu tonton untuk melihat cuplikan adegan-adegan menarik di film ini:</p>
25
- <p>Cara download film yes or no 2 dengan subtitle bahasa indonesia<br />
26
- Situs download film yes or no 2 sub indo gratis dan mudah<br />
27
- Review film yes or no 2, film lesbian Thailand yang bikin baper<br />
28
- Download film yes or no 2 full movie HD subtitle bahasa indonesia<br />
29
- Nonton streaming film yes or no 2 online sub indo di JuraganFilm[^1^]<br />
30
- Film yes or no 2, kisah cinta dua gadis yang diuji oleh jarak<br />
31
- Download film yes or no 2 sub indo di HP android tanpa ribet<br />
32
- Link download film yes or no 2 subtitle bahasa indonesia terbaru<br />
33
- Sinopsis film yes or no 2, lanjutan dari film yes or no pertama<br />
34
- Download film yes or no 2 sub indo di laptop atau PC dengan cepat<br />
35
- Film yes or no 2, apakah Kim dan Pie bisa bertahan bersama?<br />
36
- Download film yes or no 2 sub indo kualitas bluray 720p<br />
37
- Tips download film yes or no 2 subtitle bahasa indonesia tanpa iklan<br />
38
- Film yes or no 2, bagaimana akting Supanart Jittaleela dan Sushar Manaying?<br />
39
- Download film yes or no 2 sub indo di Telegram atau WhatsApp<br />
40
- Film yes or no 2, apa pesan moral yang bisa kita ambil?<br />
41
- Download film yes or no 2 sub indo dengan server Google Drive<br />
42
- Film yes or no 2, apakah ada adegan hot atau kiss scene?<br />
43
- Download film yes or no 2 sub indo dengan ukuran file kecil<br />
44
- Film yes or no 2, apakah ada versi lain atau spin-off?<br />
45
- Download film yes or no 2.5 sub indo, sekuel dari film yes or no 2[^2^]<br />
46
- Film yes or no 2.5, apa yang terjadi setelah Kim dan Pie kembali bersama?<br />
47
- Download film yes or no 2.5 full movie HD subtitle bahasa indonesia<br />
48
- Nonton streaming film yes or no 2.5 online sub indo di JuraganFilm[^2^]<br />
49
- Film yes or no 2.5, apakah ada karakter baru atau cameo?<br />
50
- Download film yes or no 2.5 sub indo di HP android dengan mudah<br />
51
- Film yes or no 2.5, bagaimana chemistry antara Supanart Jittaleela dan Sushar Manaying?<br />
52
- Download film yes or no 2.5 sub indo di laptop atau PC dengan cepat<br />
53
- Film yes or no 2.5, apakah ada adegan lucu atau romantis?<br />
54
- Download film yes or no 2.5 sub indo kualitas bluray 720p<br />
55
- Tips download film yes or no 2.5 subtitle bahasa indonesia tanpa iklan<br />
56
- Film yes or no 2.5, apakah ada konflik atau drama?<br />
57
- Download film yes or no 2.5 sub indo dengan server Google Drive<br />
58
- Film yes or no 2.5, apakah ada lagu tema atau soundtrack?<br />
59
- Download film yes or no 2.5 sub indo dengan ukuran file kecil<br />
60
- Film yes or no 2.5, apakah ada ending yang happy atau sad?<br />
61
- Download film yes or no 3 sub indo, lanjutan dari film yes or no 2.5<br />
62
- Film yes or no 3, apakah Kim dan Pie masih bersama?<br />
63
- Download film yes or no 3 full movie HD subtitle bahasa indonesia<br />
64
- Nonton streaming film yes or no 3 online sub indo di JuraganFilm<br />
65
- Film yes or no 3, apakah ada perubahan dalam cerita atau karakter?<br />
66
- Download film yes or no 3 sub indo di HP android tanpa ribet<br />
67
- Film yes or no 3, bagaimana penampilan Supanart Jittaleela dan Sushar Manaying?<br />
68
- Download film yes or no 3 sub indo di laptop atau PC dengan cepat<br />
69
- Film yes or no 3, apakah ada adegan menegangkan atau mengharukan?<br />
70
- Download film yes or no 3 sub indo kualitas bluray 720p<br />
71
- Tips download film yes or no 3 subtitle bahasa indonesia tanpa iklan<br />
72
- Film yes or no 3, apakah ada pesan inspiratif yang bisa kita dapatkan?</p>
73
- <iframe width="560" height="315" src="https://www.youtube.com/embed/9w4ZQ0n1l0g" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
74
- <h2>Mengapa Download Film Yes or No 2 Subtitle Bahasa Indonesia?</h2>
75
- <h3>Alasan Menonton Film Yes or No 2</h3>
76
- <p>Film Yes or No 2 adalah film yang cocok untuk kamu yang suka dengan genre romantis dan komedi. Film ini menawarkan cerita yang menghibur dan menyentuh tentang cinta, persahabatan, dan kehidupan. Film ini juga menampilkan akting yang natural dan ekspresif dari para pemainnya, terutama Sucharat Manaying dan Supanart Jittaleela yang berhasil memerankan karakter Pie dan Kim dengan baik. Film ini juga memiliki soundtrack yang enak didengar dan sesuai dengan suasana film.</p>
77
- <p>Film Yes or No 2 juga adalah film yang bisa membuat kamu berpikir tentang hubungan asmara yang sebenarnya. Film ini menggambarkan tantangan dan konflik yang sering dialami oleh pasangan yang berbeda latar belakang, orientasi seksual, dan pandangan hidup. Film ini juga menunjukkan bagaimana cara mengatasi masalah-masalah tersebut dengan komunikasi, pengertian, dan kompromi. Film ini juga memberikan pesan positif tentang pentingnya menghargai diri sendiri dan orang lain, serta menjalani hidup dengan jujur dan bahagia.</p>
78
- <h3>Keuntungan Download Film Yes or No 2 Subtitle Bahasa Indonesia</h3>
79
- <p>Jika kamu ingin menonton film Yes or No 2, ada beberapa keuntungan jika kamu download film Yes or No 2 subtitle bahasa Indonesia. Berikut ini adalah beberapa keuntungannya:</p>
80
- <ul>
81
- <li>Kamu bisa menonton film Yes or No 2 kapan saja dan di mana saja tanpa perlu khawatir kehabisan kuota internet atau sinyal.</li>
82
- <li>Kamu bisa menonton film Yes or No 2 dengan kualitas gambar dan suara yang baik tanpa gangguan iklan atau buffering.</li>
83
- <li>Kamu bisa menonton film Yes or No 2 dengan subtitle bahasa Indonesia yang akurat dan mudah dipahami tanpa perlu mencari-cari terjemahannya di internet.</li>
84
- <li>Kamu bisa menonton film Yes or No 2 bersama teman-teman atau keluarga tanpa perlu membayar tiket bioskop atau sewa DVD.</li>
85
- <li>Kamu bisa menonton film Yes or No 2 berulang-ulang tanpa perlu khawatir kehilangan file atau rusak.</li>
86
- </ul> <h2>Bagaimana Cara Download Film Yes or No 2 Subtitle Bahasa Indonesia?</h2>
87
- <h3>Langkah-langkah Download Film Yes or No 2 Subtitle Bahasa Indonesia</h3>
88
- <p>Untuk download film Yes or No 2 subtitle bahasa Indonesia, kamu bisa mengikuti langkah-langkah berikut ini:</p>
89
- <ol>
90
- <li>Kunjungi salah satu situs download film Yes or No 2 subtitle bahasa Indonesia yang akan kami rekomendasikan di bawah ini.</li>
91
- <li>Cari film Yes or No 2 dengan menggunakan fitur pencarian atau kategori yang tersedia di situs tersebut.</li>
92
- <li>Pilih kualitas dan format film yang kamu inginkan, misalnya 720p, 1080p, MP4, MKV, dll.</li>
93
- <li>Klik tombol download atau tautan magnet yang ada di halaman film tersebut.</li>
94
- <li>Tunggu proses download selesai. Jika kamu menggunakan tautan magnet, kamu perlu menggunakan aplikasi torrent seperti uTorrent atau BitTorrent untuk mendownload filmnya.</li>
95
- <li>Setelah film selesai didownload, buka file film tersebut dengan menggunakan aplikasi pemutar video seperti VLC Media Player atau GOM Player.</li>
96
- <li>Jika film tidak memiliki subtitle bahasa Indonesia, kamu perlu mendownload file subtitle terpisah dari situs-situs yang akan kami rekomendasikan di bawah ini.</li>
97
- <li>Simpan file subtitle dengan nama yang sama dengan file film dan letakkan di folder yang sama.</li>
98
- <li>Buka file film dengan aplikasi pemutar video dan pilih opsi untuk menampilkan subtitle.</li>
99
- <li>Nikmati menonton film Yes or No 2 subtitle bahasa Indonesia.</li>
100
- </ol>
101
- <h3>Situs-situs Download Film Yes or No 2 Subtitle Bahasa Indonesia</h3>
102
- <p>Berikut ini adalah beberapa situs download film Yes or No 2 subtitle bahasa Indonesia yang bisa kamu kunjungi:</p>
103
- <h4>YIFY Subtitles</h4>
104
- <p>YIFY Subtitles adalah situs download subtitle film yang populer dan terpercaya. Situs ini menyediakan subtitle film dalam berbagai bahasa, termasuk bahasa Indonesia. Situs ini juga memiliki tampilan yang sederhana dan mudah digunakan. Kamu bisa mencari subtitle film Yes or No 2 dengan mengetikkan judulnya di kolom pencarian atau dengan memilih kategori berdasarkan genre, tahun, atau negara. Kamu bisa mendownload subtitle film Yes or No 2 dengan mengklik tombol download yang ada di halaman subtitle tersebut. Kamu juga bisa melihat rating, komentar, dan ulasan dari pengguna lain tentang subtitle tersebut.</p>
105
- <p>Kunjungi situs YIFY Subtitles di <a href="">https://yts-subs.com/</a>.</p>
106
- <h4>Subscene</h4>
107
- <p>Subscene adalah situs download subtitle film yang lainnya yang cukup populer dan terpercaya. Situs ini juga menyediakan subtitle film dalam berbagai bahasa, termasuk bahasa Indonesia. Situs ini juga memiliki tampilan yang sederhana dan mudah digunakan. Kamu bisa mencari subtitle film Yes or No 2 dengan mengetikkan judulnya di kolom pencarian atau dengan memilih kategori berdasarkan genre, tahun, atau negara. Kamu bisa mendownload subtitle film Yes or No 2 dengan mengklik tombol download yang ada di halaman subtitle tersebut. Kamu juga bisa melihat rating, komentar, dan ulasan dari pengguna lain tentang subtitle tersebut.</p>
108
- <p>Kunjungi situs Subscene di <a href="">https://subscene.com/</a>.</p> <h4>iSubtitles.org</h4>
109
- <p>iSubtitles.org adalah situs download subtitle film yang lainnya yang cukup populer dan terpercaya. Situs ini juga menyediakan subtitle film dalam berbagai bahasa, termasuk bahasa Indonesia. Situs ini juga memiliki tampilan yang sederhana dan mudah digunakan. Kamu bisa mencari subtitle film Yes or No 2 dengan mengetikkan judulnya di kolom pencarian atau dengan memilih kategori berdasarkan genre, tahun, atau negara. Kamu bisa mendownload subtitle film Yes or No 2 dengan mengklik tombol download yang ada di halaman subtitle tersebut. Kamu juga bisa melihat rating, komentar, dan ulasan dari pengguna lain tentang subtitle tersebut.</p>
110
- <p>Kunjungi situs iSubtitles.org di <a href="">https://isubtitles.org/</a>.</p>
111
- <h4>Subs.dog</h4>
112
- <p>Subs.dog adalah situs download subtitle film yang lainnya yang cukup populer dan terpercaya. Situs ini juga menyediakan subtitle film dalam berbagai bahasa, termasuk bahasa Indonesia. Situs ini juga memiliki tampilan yang sederhana dan mudah digunakan. Kamu bisa mencari subtitle film Yes or No 2 dengan mengetikkan judulnya di kolom pencarian atau dengan memilih kategori berdasarkan genre, tahun, atau negara. Kamu bisa mendownload subtitle film Yes or No 2 dengan mengklik tombol download yang ada di halaman subtitle tersebut. Kamu juga bisa melihat rating, komentar, dan ulasan dari pengguna lain tentang subtitle tersebut.</p>
113
- <p>Kunjungi situs Subs.dog di <a href="">https://subs.dog/</a>.</p>
114
- <h2>Kesimpulan</h2>
115
- <p>Film Yes or No 2 adalah film romantis Thailand yang merupakan sekuel dari film Yes or No yang dirilis pada tahun 2010. Film ini menceritakan tentang hubungan antara Pie dan Kim, dua mahasiswi yang jatuh cinta saat tinggal satu kamar di asrama. Film ini menggambarkan tantangan dan konflik yang mereka hadapi setelah lulus kuliah dan harus berpisah untuk menjalani magang di tempat yang berbeda.</p>
116
- <p>Jika kamu ingin menonton film Yes or No 2, kamu bisa download film Yes or No 2 subtitle bahasa Indonesia dengan mudah dan cepat. Kamu bisa mengikuti langkah-langkah yang kami jelaskan di atas dan mengunjungi salah satu situs download film Yes or No 2 subtitle bahasa Indonesia yang kami rekomendasikan di atas. Dengan begitu, kamu bisa menonton film Yes or No 2 dengan kualitas gambar dan suara yang baik, subtitle bahasa Indonesia yang akurat, dan tanpa gangguan iklan atau buffering.</p>
117
- <p>Selamat menonton film Yes or No 2 subtitle bahasa Indonesia dan semoga artikel ini bermanfaat untuk kamu.</p>
118
- <h2>FAQ</h2>
119
- <p>Berikut ini adalah beberapa pertanyaan yang sering diajukan tentang film Yes or No 2:</p>
120
- <ol>
121
- <li>Apakah film Yes or No 2 ada di Netflix?</li>
122
- <p>Jawab: Tidak, film Yes or No 2 tidak ada di Netflix. Kamu bisa download film Yes or No 2 subtitle bahasa Indonesia dari situs-situs yang kami rekomendasikan di atas.</p>
123
- <li>Apakah film Yes or No 2 ada sekuelnya?</li>
124
- <p>Jawab: Ya, film Yes or No 2 ada sekuelnya yaitu film Yes or No 2.5 yang dirilis pada tahun 2015. Film ini menceritakan tentang hubungan antara Wine (Pimpakan Bangchawong) dan Pii (Chansakorn Kittiwattanakorn), dua teman dekat Pie dan Kim yang juga jatuh cinta.</p>
125
- <li>Apakah film Yes or No 2 berdasarkan kisah nyata?</li>
126
- <p>Jawab: Tidak, film Yes or No 2 tidak berdasarkan kisah nyata. Film ini adalah karya fiksi yang ditulis oleh Nepalee dan disutradarai oleh Sarasawadee Wongsompetch.</p>
127
- <li>Apakah film Yes or No 2 mendapat penghargaan?</li>
128
- <p>Jawab: Ya, film Yes or No 2 mendapat beberapa penghargaan seperti Best Actress untuk Sucharat Manaying di Maya Awards 2013, Best Movie Soundtrack untuk lagu "Forever Love" oleh Tina Sup panart Jittaleela di Maya Awards 2013, dan Best Movie Poster di Thailand National Film Association Awards 2013.</p>
129
- <li>Apakah film Yes or No 2 cocok untuk semua umur?</li>
130
- <p>Jawab: Tidak, film Yes or No 2 tidak cocok untuk semua umur. Film ini mengandung tema dan adegan yang sensitif dan kontroversial, seperti hubungan sesama jenis, ciuman, dan konflik keluarga. Film ini juga menggunakan bahasa yang kasar dan tidak sopan di beberapa bagian. Film ini lebih cocok untuk ditonton oleh orang dewasa atau remaja yang sudah berpikiran terbuka dan dewasa.</p>
131
- </ol></p> 197e85843d<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Crafting and Building APK The Ultimate Game for Minecraft Fans.md DELETED
@@ -1,114 +0,0 @@
1
-
2
- <h1>Crafting and Building APK Mirror: A Guide for Creative Gamers</h1>
3
- <p>Do you like building games? Do you want to unleash your imagination and create your own worlds? If so, you might be interested in <strong>Crafting and Building</strong>, a free game that lets you do just that. But what if you can't access the game from the official app store, or you want to try a different version of the game? That's where an <strong>APK mirror</strong> comes in handy. In this article, we will explain what Crafting and Building is, what an APK mirror is, how to download Crafting and Building from an APK mirror, and what are some alternatives to Crafting and Building.</p>
4
- <h2>What is Crafting and Building?</h2>
5
- <p>Crafting and Building is a new free building game that was released in 2020 by GeneRe. It is inspired by Minecraft, but it has its own features and style. The game is available for Android devices, and it has over 50 million downloads on Google Play. The game is rated for everyone, and it is suitable for the whole family.</p>
6
- <h2>crafting and building apk mirror</h2><br /><p><b><b>Download Zip</b> &#9913; <a href="https://jinyurl.com/2uNOVp">https://jinyurl.com/2uNOVp</a></b></p><br /><br />
7
- <h3>What is an APK mirror?</h3>
8
- <p>An APK mirror is a website that hosts APK files, which are the installation files for Android apps. APK files can be downloaded from an APK mirror and installed on your device manually, without using the official app store. This can be useful if you want to access apps that are not available in your region, or if you want to try different versions of apps that are not updated on the app store.</p>
9
- <h3>Why would someone want to download Crafting and Building from an APK mirror?</h3>
10
- <p>There are several reasons why someone might want to download Crafting and Building from an APK mirror. For example:</p>
11
- <ul>
12
- <li>You live in a country where Crafting and Building is not available on Google Play, or it is blocked by your network provider.</li>
13
- <li>You want to play Crafting and Building on a device that does not support Google Play, such as a Kindle Fire or a Chromebook.</li>
14
- <li>You want to try a modded version of Crafting and Building that has extra features or cheats.</li>
15
- <li>You want to play an older version of Crafting and Building that has less bugs or more compatibility.</li>
16
- </ul>
17
- <p>However, downloading Crafting and Building from an APK mirror also comes with some risks and drawbacks. We will discuss them later in this article.</p>
18
- <h2>Features of Crafting and Building</h2>
19
- <p>Crafting and Building is a game that offers a lot of fun and creativity for its players. Here are some of the features that make it stand out:</p>
20
- <h3>Gameplay</h3>
21
- <p>The gameplay of Crafting and Building is similar to Minecraft, but with some differences. You can explore a randomly generated world made of blocks, collect resources, craft items, build structures, and interact with animals and villagers. You can also choose between different modes, such as survival mode, where you have to deal with hunger, health, and enemies; or creative mode, where you have unlimited resources and no threats. You can also switch between first-person and third-person views.</p>
22
- <h3>Graphics</h3>
23
- <p>The graphics of Crafting and Building are colorful and pixelated, giving the game a retro feel. The game also has smooth animations and high fps, making it enjoyable to play. The game supports day-night cycles, weather effects, shadows, lighting, and water reflections. You can also adjust the graphics settings according to your device's performance.</p>
24
- <h3>Multiplayer</h3>
25
- <p>Crafting and Building has a multiplayer mode that allows you to play online with your friends or other players around the world. You can join existing servers or create your own private server. You can chat with other players, collaborate on building projects, or compete in mini-games. Multiplayer mode is a lot of fun, and you can also chat with other players, collaborate on building projects, or compete in mini-games. Multiplayer mode is free to play, but you need to register an account and have a stable internet connection.</p>
26
- <h3>Customization</h3>
27
- <p>Crafting and Building lets you customize your character and your world. You can choose from different skins, clothes, hairstyles, and accessories for your avatar. You can also change the texture pack of the game, and use different blocks and items to create your own style. You can also use the in-game editor to create your own maps and share them with other players.</p>
28
- <p>crafting and building game free download apk<br />
29
- crafting and building mod apk unlimited resources<br />
30
- crafting and building online multiplayer apk<br />
31
- crafting and building apk latest version 2020<br />
32
- crafting and building apk for pc windows 10<br />
33
- crafting and building apk pure no ads<br />
34
- crafting and building survival mode apk<br />
35
- crafting and building creative mode apk<br />
36
- crafting and building apk android 4.4<br />
37
- crafting and building apk ios iphone<br />
38
- crafting and building apk mirror site safe<br />
39
- crafting and building apk old version 2018<br />
40
- crafting and building apk update 2021<br />
41
- crafting and building apk offline play<br />
42
- crafting and building apk hack cheats<br />
43
- crafting and building apk full unlocked<br />
44
- crafting and building apk no verification<br />
45
- crafting and building apk install guide<br />
46
- crafting and building apk review ratings<br />
47
- crafting and building apk tips tricks<br />
48
- crafting and building adventure map apk<br />
49
- crafting and building city builder apk<br />
50
- crafting and building pixel art apk<br />
51
- crafting and building sandbox simulator apk<br />
52
- crafting and building exploration world apk<br />
53
- crafting and building skins editor apk<br />
54
- crafting and building furniture mod apk<br />
55
- crafting and building animals pets apk<br />
56
- crafting and building weapons tools apk<br />
57
- crafting and building vehicles cars apk<br />
58
- crafting and building farm garden apk<br />
59
- crafting and building castle mine apk<br />
60
- crafting and building school education apk<br />
61
- crafting and building horror scary apk<br />
62
- crafting and building fantasy magic apk<br />
63
- crafting and building medieval kingdom apk<br />
64
- crafting and building modern house apk<br />
65
- crafting and building underwater ocean apk<br />
66
- crafting and building space galaxy apk<br />
67
- crafting and building jungle forest apk<br />
68
- crafting and building desert pyramid apk<br />
69
- crafting and building snow winter apk<br />
70
- crafting and building tropical island apk<br />
71
- crafting and building volcano lava apk</p>
72
- <h2>How to download Crafting and Building from an APK mirror</h2>
73
- <p>If you want to download Crafting and Building from an APK mirror, you need to follow these steps:</p>
74
- <ol>
75
- <li>Find a reliable APK mirror website that hosts the Crafting and Building APK file. You can use a search engine or check the reviews of other users. Some examples of APK mirror websites are APKPure, APKCombo, and APKMirror.</li>
76
- <li>Download the Crafting and Building APK file from the website. Make sure you choose the right version for your device and check the file size and permissions.</li>
77
- <li>Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.</li>
78
- <li>Locate the downloaded Crafting and Building APK file on your device and tap on it to install it. You may need to grant some permissions or confirm some prompts.</li>
79
- <li>Wait for the installation to finish and launch the game from your app drawer or home screen.</li>
80
- </ol>
81
- <h3>Risks and precautions</h3>
82
- <p>While downloading Crafting and Building from an APK mirror can be convenient and fun, it also comes with some risks and drawbacks that you should be aware of. Here are some of them:</p>
83
- <ul>
84
- <li>You may not get the latest updates or features of the game, as the APK file may be outdated or unofficial.</li>
85
- <li>You may encounter compatibility issues or bugs that affect the performance or stability of the game.</li>
86
- <li>You may expose your device to malware or viruses that can harm your data or system.</li>
87
- <li>You may violate the terms of service or privacy policy of the game developer or publisher, which can result in legal actions or account bans.</li>
88
- </ul>
89
- <p>To avoid these risks and drawbacks, you should always download Crafting and Building from an APK mirror that is trustworthy and reputable. You should also scan the APK file with an antivirus app before installing it. You should also backup your data and device regularly in case something goes wrong. Finally, you should respect the rights and rules of the game developer and publisher, and not use any mods or cheats that can give you an unfair advantage or harm other players.</p>
90
- <h2>Alternatives to Crafting and Building</h2>
91
- <p>If you are looking for other games like Crafting and Building that let you create your own worlds and express your creativity, here are some alternatives that you can try:</p>
92
- <h3>Minecraft</h3>
93
- <p>Minecraft is the original sandbox game that inspired Crafting and Building and many others. It is one of the most popular games of all time, with over 200 million copies sold. It is available for almost every platform, including PC, consoles, mobile devices, and VR. Minecraft lets you explore, build, craft, survive, and play in infinite procedurally generated worlds. You can also join online servers and play with millions of other players around the world. Minecraft has a huge community of fans, modders, creators, educators, and more. It also has a spin-off title called Minecraft Dungeons, which is a dungeon crawler game set in the Minecraft universe.</p>
94
- <h3>Terraria</h3>
95
- <p>Terraria is a 2D sandbox game that combines elements of action-adventure, RPG, platformer, and survival genres. It is available for PC, consoles, mobile devices, and Switch. Terraria lets you explore a vast world full of biomes, enemies, bosses, items, NPCs, events, secrets, and more. You can also build your own base, craft weapons and armor, mine resources, farm crops, fish, summon pets, cast spells, and more. Terraria has a multiplayer mode that lets you play with up to 8 players online or locally. Terraria has over 30 million copies sold and has received several updates and expansions over the years.</p>
96
- <h3>Roblox</h3>
97
- <p>Roblox is a massively multiplayer online sandbox game that lets you create and play games of various genres using Roblox Studio. It is available for PC , consoles, mobile devices, and VR. Roblox lets you create your own games using a simple scripting language called Lua. You can also play millions of games created by other users, ranging from simulations, role-playing, obbies, tycoons, shooters, puzzles, and more. You can also customize your avatar, chat with other players, join groups, earn badges, and trade items. Roblox has over 150 million monthly active users and has a thriving economy based on its virtual currency called Robux.</p>
98
- <h2>Conclusion</h2>
99
- <p>Crafting and Building is a fun and creative game that lets you build your own worlds and play with others. It is a free game that is available for Android devices, but you can also download it from an APK mirror if you want to access different versions or features of the game. However, you should be careful when downloading Crafting and Building from an APK mirror, as there are some risks and drawbacks involved. You should also check out some alternatives to Crafting and Building, such as Minecraft, Terraria, and Roblox, which offer similar or different experiences of sandbox gaming.</p>
100
- <p>We hope that this article has helped you learn more about Crafting and Building and how to download it from an APK mirror. If you have any questions or feedback, please let us know in the comments below. And if you enjoyed this article, please share it with your friends and family who might be interested in Crafting and Building. Thank you for reading!</p>
101
- <h2>FAQs</h2>
102
- <p>Here are some frequently asked questions about Crafting and Building and APK mirrors:</p>
103
- <h3>Is Crafting and Building safe to play?</h3>
104
- <p>Crafting and Building is safe to play if you download it from the official Google Play store or a reputable APK mirror website. However, you should always scan the APK file with an antivirus app before installing it, and backup your data and device regularly.</p>
105
- <h3>Is Crafting and Building free to play?</h3>
106
- <p>Yes, Crafting and Building is free to play. However, it may contain ads or in-app purchases that require real money.</p>
107
- <h3>Can I play Crafting and Building offline?</h3>
108
- <p>Yes, you can play Crafting and Building offline in single-player mode. However, you need an internet connection to play online in multiplayer mode.</p>
109
- <h3>Can I play Crafting and Building on PC?</h3>
110
- <p>No, Crafting and Building is not officially available for PC. However, you can use an Android emulator to run the game on your PC. An Android emulator is a software that simulates an Android device on your PC. Some examples of Android emulators are BlueStacks, NoxPlayer, and LDPlayer.</p>
111
- <h3>Can I transfer my Crafting and Building progress from one device to another?</h3>
112
- <p>Yes, you can transfer your Crafting and Building progress from one device to another by using the cloud save feature. You need to register an account and log in to the game on both devices. Then, you can use the cloud save button to upload or download your progress.</p> 401be4b1e0<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/ui/sheet.tsx DELETED
@@ -1,122 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import * as SheetPrimitive from '@radix-ui/react-dialog'
5
-
6
- import { cn } from '@/lib/utils'
7
- import { IconClose } from '@/components/ui/icons'
8
-
9
- const Sheet = SheetPrimitive.Root
10
-
11
- const SheetTrigger = SheetPrimitive.Trigger
12
-
13
- const SheetClose = SheetPrimitive.Close
14
-
15
- const SheetPortal = ({
16
- className,
17
- children,
18
- ...props
19
- }: SheetPrimitive.DialogPortalProps) => (
20
- <SheetPrimitive.Portal
21
- className={cn('fixed inset-0 z-50 flex', className)}
22
- {...props}
23
- >
24
- {children}
25
- </SheetPrimitive.Portal>
26
- )
27
- SheetPortal.displayName = SheetPrimitive.Portal.displayName
28
-
29
- const SheetOverlay = React.forwardRef<
30
- React.ElementRef<typeof SheetPrimitive.Overlay>,
31
- React.ComponentPropsWithoutRef<typeof SheetPrimitive.Overlay>
32
- >(({ className, children, ...props }, ref) => (
33
- <SheetPrimitive.Overlay
34
- className={cn(
35
- 'fixed inset-0 z-50 transition-all duration-100 data-[state=closed]:animate-out data-[state=closed]:fade-out data-[state=open]:fade-in',
36
- className
37
- )}
38
- {...props}
39
- ref={ref}
40
- />
41
- ))
42
- SheetOverlay.displayName = SheetPrimitive.Overlay.displayName
43
-
44
- const SheetContent = React.forwardRef<
45
- React.ElementRef<typeof SheetPrimitive.Content>,
46
- React.ComponentPropsWithoutRef<typeof SheetPrimitive.Content>
47
- >(({ className, children, ...props }, ref) => (
48
- <SheetPortal>
49
- <SheetPrimitive.Content
50
- ref={ref}
51
- className={cn(
52
- 'fixed inset-y-0 left-0 z-50 h-full border-r bg-background p-6 shadow-lg transition ease-in-out data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left data-[state=closed]:duration-300 data-[state=open]:duration-500 sm:max-w-sm',
53
- className
54
- )}
55
- {...props}
56
- >
57
- {children}
58
- <SheetPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-secondary">
59
- <IconClose />
60
- <span className="sr-only">Close</span>
61
- </SheetPrimitive.Close>
62
- </SheetPrimitive.Content>
63
- </SheetPortal>
64
- ))
65
- SheetContent.displayName = SheetPrimitive.Content.displayName
66
-
67
- const SheetHeader = ({
68
- className,
69
- ...props
70
- }: React.HTMLAttributes<HTMLDivElement>) => (
71
- <div className={cn('flex flex-col space-y-2', className)} {...props} />
72
- )
73
- SheetHeader.displayName = 'SheetHeader'
74
-
75
- const SheetFooter = ({
76
- className,
77
- ...props
78
- }: React.HTMLAttributes<HTMLDivElement>) => (
79
- <div
80
- className={cn(
81
- 'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2',
82
- className
83
- )}
84
- {...props}
85
- />
86
- )
87
- SheetFooter.displayName = 'SheetFooter'
88
-
89
- const SheetTitle = React.forwardRef<
90
- React.ElementRef<typeof SheetPrimitive.Title>,
91
- React.ComponentPropsWithoutRef<typeof SheetPrimitive.Title>
92
- >(({ className, ...props }, ref) => (
93
- <SheetPrimitive.Title
94
- ref={ref}
95
- className={cn('text-lg font-semibold text-foreground', className)}
96
- {...props}
97
- />
98
- ))
99
- SheetTitle.displayName = SheetPrimitive.Title.displayName
100
-
101
- const SheetDescription = React.forwardRef<
102
- React.ElementRef<typeof SheetPrimitive.Description>,
103
- React.ComponentPropsWithoutRef<typeof SheetPrimitive.Description>
104
- >(({ className, ...props }, ref) => (
105
- <SheetPrimitive.Description
106
- ref={ref}
107
- className={cn('text-sm text-muted-foreground', className)}
108
- {...props}
109
- />
110
- ))
111
- SheetDescription.displayName = SheetPrimitive.Description.displayName
112
-
113
- export {
114
- Sheet,
115
- SheetTrigger,
116
- SheetClose,
117
- SheetContent,
118
- SheetHeader,
119
- SheetFooter,
120
- SheetTitle,
121
- SheetDescription
122
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer_batch_rvc.py DELETED
@@ -1,215 +0,0 @@
1
- """
2
- v1
3
- runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33
4
- v2
5
- runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33
6
- """
7
- import os, sys, pdb, torch
8
-
9
- now_dir = os.getcwd()
10
- sys.path.append(now_dir)
11
- import sys
12
- import torch
13
- import tqdm as tq
14
- from multiprocessing import cpu_count
15
-
16
-
17
- class Config:
18
- def __init__(self, device, is_half):
19
- self.device = device
20
- self.is_half = is_half
21
- self.n_cpu = 0
22
- self.gpu_name = None
23
- self.gpu_mem = None
24
- self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
25
-
26
- def device_config(self) -> tuple:
27
- if torch.cuda.is_available():
28
- i_device = int(self.device.split(":")[-1])
29
- self.gpu_name = torch.cuda.get_device_name(i_device)
30
- if (
31
- ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
32
- or "P40" in self.gpu_name.upper()
33
- or "1060" in self.gpu_name
34
- or "1070" in self.gpu_name
35
- or "1080" in self.gpu_name
36
- ):
37
- print("16系/10系显卡和P40强制单精度")
38
- self.is_half = False
39
- for config_file in ["32k.json", "40k.json", "48k.json"]:
40
- with open(f"configs/{config_file}", "r") as f:
41
- strr = f.read().replace("true", "false")
42
- with open(f"configs/{config_file}", "w") as f:
43
- f.write(strr)
44
- with open("infer/modules/train/preprocess.py", "r") as f:
45
- strr = f.read().replace("3.7", "3.0")
46
- with open("infer/modules/train/preprocess.py", "w") as f:
47
- f.write(strr)
48
- else:
49
- self.gpu_name = None
50
- self.gpu_mem = int(
51
- torch.cuda.get_device_properties(i_device).total_memory
52
- / 1024
53
- / 1024
54
- / 1024
55
- + 0.4
56
- )
57
- if self.gpu_mem <= 4:
58
- with open("infer/modules/train/preprocess.py", "r") as f:
59
- strr = f.read().replace("3.7", "3.0")
60
- with open("infer/modules/train/preprocess.py", "w") as f:
61
- f.write(strr)
62
- elif torch.backends.mps.is_available():
63
- print("没有发现支持的N卡, 使用MPS进行推理")
64
- self.device = "mps"
65
- else:
66
- print("没有发现支持的N卡, 使用CPU进行推理")
67
- self.device = "cpu"
68
- self.is_half = True
69
-
70
- if self.n_cpu == 0:
71
- self.n_cpu = cpu_count()
72
-
73
- if self.is_half:
74
- # 6G显存配置
75
- x_pad = 3
76
- x_query = 10
77
- x_center = 60
78
- x_max = 65
79
- else:
80
- # 5G显存配置
81
- x_pad = 1
82
- x_query = 6
83
- x_center = 38
84
- x_max = 41
85
-
86
- if self.gpu_mem != None and self.gpu_mem <= 4:
87
- x_pad = 1
88
- x_query = 5
89
- x_center = 30
90
- x_max = 32
91
-
92
- return x_pad, x_query, x_center, x_max
93
-
94
-
95
- f0up_key = sys.argv[1]
96
- input_path = sys.argv[2]
97
- index_path = sys.argv[3]
98
- f0method = sys.argv[4] # harvest or pm
99
- opt_path = sys.argv[5]
100
- model_path = sys.argv[6]
101
- index_rate = float(sys.argv[7])
102
- device = sys.argv[8]
103
- is_half = sys.argv[9].lower() != "false"
104
- filter_radius = int(sys.argv[10])
105
- resample_sr = int(sys.argv[11])
106
- rms_mix_rate = float(sys.argv[12])
107
- protect = float(sys.argv[13])
108
- print(sys.argv)
109
- config = Config(device, is_half)
110
- now_dir = os.getcwd()
111
- sys.path.append(now_dir)
112
- from infer.modules.vc.modules import VC
113
- from lib.infer_pack.models import (
114
- SynthesizerTrnMs256NSFsid,
115
- SynthesizerTrnMs256NSFsid_nono,
116
- SynthesizerTrnMs768NSFsid,
117
- SynthesizerTrnMs768NSFsid_nono,
118
- )
119
- from infer.lib.audio import load_audio
120
- from fairseq import checkpoint_utils
121
- from scipy.io import wavfile
122
-
123
- hubert_model = None
124
-
125
-
126
- def load_hubert():
127
- global hubert_model
128
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
129
- ["hubert_base.pt"],
130
- suffix="",
131
- )
132
- hubert_model = models[0]
133
- hubert_model = hubert_model.to(device)
134
- if is_half:
135
- hubert_model = hubert_model.half()
136
- else:
137
- hubert_model = hubert_model.float()
138
- hubert_model.eval()
139
-
140
-
141
- def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate):
142
- global tgt_sr, net_g, vc, hubert_model, version
143
- if input_audio is None:
144
- return "You need to upload an audio", None
145
- f0_up_key = int(f0_up_key)
146
- audio = load_audio(input_audio, 16000)
147
- times = [0, 0, 0]
148
- if hubert_model == None:
149
- load_hubert()
150
- if_f0 = cpt.get("f0", 1)
151
- # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
152
- audio_opt = vc.pipeline(
153
- hubert_model,
154
- net_g,
155
- sid,
156
- audio,
157
- input_audio,
158
- times,
159
- f0_up_key,
160
- f0_method,
161
- file_index,
162
- index_rate,
163
- if_f0,
164
- filter_radius,
165
- tgt_sr,
166
- resample_sr,
167
- rms_mix_rate,
168
- version,
169
- protect,
170
- f0_file=f0_file,
171
- )
172
- print(times)
173
- return audio_opt
174
-
175
-
176
- def get_vc(model_path):
177
- global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version
178
- print("loading pth %s" % model_path)
179
- cpt = torch.load(model_path, map_location="cpu")
180
- tgt_sr = cpt["config"][-1]
181
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
182
- if_f0 = cpt.get("f0", 1)
183
- version = cpt.get("version", "v1")
184
- if version == "v1":
185
- if if_f0 == 1:
186
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
187
- else:
188
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
189
- elif version == "v2":
190
- if if_f0 == 1: #
191
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
192
- else:
193
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
194
- del net_g.enc_q
195
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩
196
- net_g.eval().to(device)
197
- if is_half:
198
- net_g = net_g.half()
199
- else:
200
- net_g = net_g.float()
201
- vc = VC(tgt_sr, config)
202
- n_spk = cpt["config"][-3]
203
- # return {"visible": True,"maximum": n_spk, "__type__": "update"}
204
-
205
-
206
- get_vc(model_path)
207
- audios = os.listdir(input_path)
208
- for file in tq.tqdm(audios):
209
- if file.endswith(".wav"):
210
- file_path = input_path + "/" + file
211
- wav_opt = vc_single(
212
- 0, file_path, f0up_key, None, f0method, index_path, index_rate
213
- )
214
- out_path = opt_path + "/" + file
215
- wavfile.write(out_path, tgt_sr, wav_opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_123812KB .py DELETED
@@ -1,118 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.bottleneck = nn.Sequential(
104
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
105
- )
106
-
107
- def forward(self, x):
108
- _, _, h, w = x.size()
109
- feat1 = F.interpolate(
110
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
111
- )
112
- feat2 = self.conv2(x)
113
- feat3 = self.conv3(x)
114
- feat4 = self.conv4(x)
115
- feat5 = self.conv5(x)
116
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
117
- bottle = self.bottleneck(out)
118
- return bottle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-ZTH-03-23/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/index.html DELETED
@@ -1,36 +0,0 @@
1
- <html>
2
- <head>
3
-
4
- <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.12.0/gradio.js">
5
- </script>
6
-
7
-
8
- </head>
9
- <body>
10
-
11
- <iframe
12
- src="https://awacke1-twitter-sentiment-live-realtime.hf.space"
13
- frameborder="0"
14
- width="850"
15
- height="1024"
16
- ></iframe>
17
-
18
- <iframe
19
- src="https://awacke1-streamlitwikipediachat.hf.space"
20
- frameborder="0"
21
- width="850"
22
- height="1024"
23
- ></iframe>
24
-
25
- <iframe
26
- src="https://awacke1-cognitive-ai-episodic-semantic-m-f4b3d67.hf.space"
27
- frameborder="0"
28
- width="850"
29
- height="1024"
30
- ></iframe>
31
-
32
-
33
-
34
- </body>
35
-
36
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIDHD/GrammarCorrector/README.md DELETED
@@ -1,38 +0,0 @@
1
- ---
2
- title: GrammarCorrector
3
- emoji: 📊
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: streamlit
7
- app_file: app.py
8
- pinned: false
9
- duplicated_from: deep-learning-analytics/GrammarCorrector
10
- ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio` or `streamlit`
28
-
29
- `sdk_version` : _string_
30
- Only applicable for `streamlit` SDK.
31
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
-
33
- `app_file`: _string_
34
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
- Path is relative to the root of the repository.
36
-
37
- `pinned`: _boolean_
38
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/metrics/dtw.py DELETED
@@ -1,162 +0,0 @@
1
- from numpy import array, zeros, full, argmin, inf, ndim
2
- from scipy.spatial.distance import cdist
3
- from math import isinf
4
-
5
-
6
- def dtw(x, y, dist, warp=1, w=inf, s=1.0):
7
- """
8
- Computes Dynamic Time Warping (DTW) of two sequences.
9
-
10
- :param array x: N1*M array
11
- :param array y: N2*M array
12
- :param func dist: distance used as cost measure
13
- :param int warp: how many shifts are computed.
14
- :param int w: window size limiting the maximal distance between indices of matched entries |i,j|.
15
- :param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal
16
- Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
17
- """
18
- assert len(x)
19
- assert len(y)
20
- assert isinf(w) or (w >= abs(len(x) - len(y)))
21
- assert s > 0
22
- r, c = len(x), len(y)
23
- if not isinf(w):
24
- D0 = full((r + 1, c + 1), inf)
25
- for i in range(1, r + 1):
26
- D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0
27
- D0[0, 0] = 0
28
- else:
29
- D0 = zeros((r + 1, c + 1))
30
- D0[0, 1:] = inf
31
- D0[1:, 0] = inf
32
- D1 = D0[1:, 1:] # view
33
- for i in range(r):
34
- for j in range(c):
35
- if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))):
36
- D1[i, j] = dist(x[i], y[j])
37
- C = D1.copy()
38
- jrange = range(c)
39
- for i in range(r):
40
- if not isinf(w):
41
- jrange = range(max(0, i - w), min(c, i + w + 1))
42
- for j in jrange:
43
- min_list = [D0[i, j]]
44
- for k in range(1, warp + 1):
45
- i_k = min(i + k, r)
46
- j_k = min(j + k, c)
47
- min_list += [D0[i_k, j] * s, D0[i, j_k] * s]
48
- D1[i, j] += min(min_list)
49
- if len(x) == 1:
50
- path = zeros(len(y)), range(len(y))
51
- elif len(y) == 1:
52
- path = range(len(x)), zeros(len(x))
53
- else:
54
- path = _traceback(D0)
55
- return D1[-1, -1], C, D1, path
56
-
57
-
58
- def accelerated_dtw(x, y, dist, warp=1):
59
- """
60
- Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
61
- Instead of iterating through each element and calculating each distance,
62
- this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
63
-
64
- :param array x: N1*M array
65
- :param array y: N2*M array
66
- :param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
67
- If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
68
- :param int warp: how many shifts are computed.
69
- Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
70
- """
71
- assert len(x)
72
- assert len(y)
73
- if ndim(x) == 1:
74
- x = x.reshape(-1, 1)
75
- if ndim(y) == 1:
76
- y = y.reshape(-1, 1)
77
- r, c = len(x), len(y)
78
- D0 = zeros((r + 1, c + 1))
79
- D0[0, 1:] = inf
80
- D0[1:, 0] = inf
81
- D1 = D0[1:, 1:]
82
- D0[1:, 1:] = cdist(x, y, dist)
83
- C = D1.copy()
84
- for i in range(r):
85
- for j in range(c):
86
- min_list = [D0[i, j]]
87
- for k in range(1, warp + 1):
88
- min_list += [D0[min(i + k, r), j],
89
- D0[i, min(j + k, c)]]
90
- D1[i, j] += min(min_list)
91
- if len(x) == 1:
92
- path = zeros(len(y)), range(len(y))
93
- elif len(y) == 1:
94
- path = range(len(x)), zeros(len(x))
95
- else:
96
- path = _traceback(D0)
97
- return D1[-1, -1], C, D1, path
98
-
99
-
100
- def _traceback(D):
101
- i, j = array(D.shape) - 2
102
- p, q = [i], [j]
103
- while (i > 0) or (j > 0):
104
- tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j]))
105
- if tb == 0:
106
- i -= 1
107
- j -= 1
108
- elif tb == 1:
109
- i -= 1
110
- else: # (tb == 2):
111
- j -= 1
112
- p.insert(0, i)
113
- q.insert(0, j)
114
- return array(p), array(q)
115
-
116
-
117
- if __name__ == '__main__':
118
- w = inf
119
- s = 1.0
120
- if 1: # 1-D numeric
121
- from sklearn.metrics.pairwise import manhattan_distances
122
- import numpy as np
123
- x = [0, 0, 1, 1, 2, 4, 2, 1, 2, 0]
124
- x = np.array(x).reshape([-1,1,1])
125
- y = [1, 1, 1, 2, 2, 2, 2, 3, 2, 0]
126
- y = np.array(y).reshape([-1,1,1])
127
- dist_fun = manhattan_distances
128
- w = 1
129
- # s = 1.2
130
- elif 0: # 2-D numeric
131
- from sklearn.metrics.pairwise import euclidean_distances
132
-
133
- x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]]
134
- y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]]
135
- dist_fun = euclidean_distances
136
- else: # 1-D list of strings
137
- from nltk.metrics.distance import edit_distance
138
-
139
- # x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder']
140
- # y = ['class', 'too']
141
- x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls']
142
- y = ['see', 'drown', 'himself']
143
- # x = 'we talked about the situation'.split()
144
- # y = 'we talked about the situation'.split()
145
- dist_fun = edit_distance
146
- dist, cost, acc, path = dtw(x, y, dist_fun, w=w, s=s)
147
-
148
- # Vizualize
149
- from matplotlib import pyplot as plt
150
-
151
- plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest')
152
- plt.plot(path[0], path[1], '-o') # relation
153
- plt.xticks(range(len(x)), x)
154
- plt.yticks(range(len(y)), y)
155
- plt.xlabel('x')
156
- plt.ylabel('y')
157
- plt.axis('tight')
158
- if isinf(w):
159
- plt.title('Minimum distance: {}, slope weight: {}'.format(dist, s))
160
- else:
161
- plt.title('Minimum distance: {}, window widht: {}, slope weight: {}'.format(dist, w, s))
162
- plt.show()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Debate/SOP.py DELETED
@@ -1,296 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The AIWaves Inc. team.
3
-
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """standard operation procedure of an LLM Autonomous agent"""
17
- import random
18
- from LLM.base_LLM import *
19
- from State import State
20
- from utils import extract, get_relevant_history
21
- from Memory import Memory
22
- from Prompt import *
23
- import json
24
- import os
25
-
26
- class SOP:
27
- """
28
- Responsible for managing the operational processes of all agents
29
- """
30
-
31
- # SOP should have args : "states" "relations" "root"
32
-
33
- def __init__(self, **kwargs):
34
- self.controller_dict = {}
35
- self.LLM = init_LLM("logs/god",**kwargs)
36
-
37
- self.states = {}
38
- self.init_states(kwargs["states"])
39
- self.init_relation(kwargs["relations"])
40
- for state_name, states_dict in kwargs["states"].items():
41
- if state_name != "end_state" and "controller" in states_dict:
42
- self.controller_dict[state_name] = states_dict["controller"]
43
-
44
- self.user_names = kwargs["user_names"] if "user_names" in kwargs else []
45
- self.root = self.states[kwargs["root"]]
46
- self.current_state = self.root
47
- self.finish_state_name = (
48
- kwargs["finish_state_name"]
49
- if "finish_state_name" in kwargs
50
- else "end_state"
51
- )
52
- self.roles_to_names = None
53
- self.names_to_roles = None
54
- self.finished = False
55
-
56
- @classmethod
57
- def from_config(cls, config_path):
58
- with open(config_path) as f:
59
- config = json.load(f)
60
- os.environ.clear()
61
- for key,value in config["config"].items():
62
- if key == "API_BASE":
63
- if value == "":
64
- pass
65
- else:
66
- os.environ[key] = value
67
- # assert "API_KEY" in os.environ and os.environ["API_KEY"] != "API_KEY","Please go to config.json to set API_KEY"
68
-
69
- sop = SOP(**config)
70
- return sop
71
-
72
- def init_states(self, states_dict):
73
- for state_name, state_dict in states_dict.items():
74
- state_dict["name"] = state_name
75
- self.states[state_name] = State(**state_dict)
76
-
77
- def init_relation(self, relations):
78
- for state_name, state_relation in relations.items():
79
- for idx, next_state_name in state_relation.items():
80
- self.states[state_name].next_states[idx] = self.states[next_state_name]
81
-
82
- def transit(self, chat_history, **kwargs):
83
- """
84
- Determine the next state based on the current situation
85
- Return :
86
- next_state(State) : the next state
87
- """
88
- # 如果是单一循环节点,则一直循环即可
89
- # If it is a single loop node, just keep looping
90
- if len(self.current_state.next_states) == 1:
91
- next_state = "0"
92
-
93
- # 否则则需要controller去判断进入哪一节点
94
- # Otherwise, the controller needs to determine which node to enter.
95
- else:
96
- current_state = self.current_state
97
- controller_dict = self.controller_dict[current_state.name]
98
- relevant_history = kwargs["relevant_history"]
99
-
100
- max_chat_nums = controller_dict["max_chat_nums"] if "max_chat_nums" in controller_dict else 1000
101
- if current_state.chat_nums>=max_chat_nums:
102
- return self.current_state.next_states["1"]
103
-
104
-
105
- # 否则则让controller判断是否结束
106
- # Otherwise, let the controller judge whether to end
107
- judge_system_prompt = controller_dict["judge_system_prompt"]
108
- environment_prompt = eval(Get_environment_prompt) if current_state.environment_prompt else ""
109
- transit_system_prompt = eval(Transit_system_prompt)
110
-
111
- judge_last_prompt = controller_dict["judge_last_prompt"]
112
- transit_last_prompt = eval(Transit_last_prompt)
113
-
114
-
115
-
116
- environment = kwargs["environment"]
117
- environment_summary = environment.shared_memory["short_term_memory"]
118
- chat_history_message = Memory.get_chat_history(chat_history)
119
- query = chat_history[-1].get_query()
120
-
121
- chat_messages = [
122
- {
123
- "role": "user",
124
- "content": eval(Transit_message)
125
- }
126
- ]
127
-
128
- extract_words = controller_dict["judge_extract_words"] if "judge_extract_words" in controller_dict else "end"
129
-
130
-
131
- response = self.LLM.get_response(
132
- chat_messages, transit_system_prompt, transit_last_prompt, stream=False, **kwargs
133
- )
134
- next_state = (
135
- response if response.isdigit() else extract(response, extract_words)
136
- )
137
-
138
- # 如果没有parse出来则继续循环
139
- # If no parse comes out, continue looping
140
- if not next_state.isdigit():
141
- next_state = "0"
142
-
143
- next_state = self.current_state.next_states[next_state]
144
- return next_state
145
-
146
-
147
- def route(self, chat_history, **kwargs):
148
- """
149
- Determine the role that needs action based on the current situation
150
- Return :
151
- current_agent(Agent) : the next act agent
152
- """
153
-
154
- agents = kwargs["agents"]
155
-
156
- # 知道进入哪一状态后开始分配角色,如果该状态下只有一个角色则直接分配给他
157
- # Start assigning roles after knowing which state you have entered. If there is only one role in that state, assign it directly to him.
158
- if len(self.current_state.roles) == 1:
159
- next_role = self.current_state.roles[0]
160
-
161
-
162
-
163
- # 否则controller进行分配
164
- # Otherwise the controller determines
165
- else:
166
- relevant_history = kwargs["relevant_history"]
167
- controller_type = (
168
- self.controller_dict[self.current_state.name]["controller_type"]
169
- if "controller_type" in self.controller_dict[self.current_state.name]
170
- else "order"
171
- )
172
-
173
-
174
- # 如果是rule 控制器,则交由LLM进行分配角色
175
- # If controller type is rule, it is left to LLM to assign roles.
176
- if controller_type == "rule":
177
- controller_dict = self.controller_dict[self.current_state.name]
178
-
179
- call_last_prompt = controller_dict["call_last_prompt"] if "call_last_prompt" in controller_dict else ""
180
-
181
- allocate_prompt = ""
182
- roles = list(set(self.current_state.roles))
183
- for role in roles:
184
- allocate_prompt += eval(Allocate_component)
185
-
186
- call_system_prompt = controller_dict["call_system_prompt"] if "call_system_prompt" in controller_dict else ""
187
- environment_prompt = eval(Get_environment_prompt) if self.current_state.environment_prompt else ""
188
- # call_system_prompt + environment + allocate_prompt
189
- call_system_prompt = eval(Call_system_prompt)
190
-
191
- query = chat_history[-1].get_query()
192
- last_name = chat_history[-1].send_name
193
- # last_prompt: note + last_prompt + query
194
- call_last_prompt =eval(Call_last_prompt)
195
-
196
-
197
- chat_history_message = Memory.get_chat_history(chat_history)
198
- # Intermediate historical conversation records
199
- chat_messages = [
200
- {
201
- "role": "user",
202
- "content": eval(Call_message),
203
- }
204
- ]
205
-
206
- extract_words = controller_dict["call_extract_words"] if "call_extract_words" in controller_dict else "end"
207
-
208
- response = self.LLM.get_response(
209
- chat_messages, call_system_prompt, call_last_prompt, stream=False, **kwargs
210
- )
211
-
212
- # get next role
213
- next_role = extract(response, extract_words)
214
-
215
- # Speak in order
216
- elif controller_type == "order":
217
- # If there is no begin role, it will be given directly to the first person.
218
- if not self.current_state.current_role:
219
- next_role = self.current_state.roles[0]
220
- # otherwise first
221
- else:
222
- self.current_state.index += 1
223
- self.current_state.index = (self.current_state.index) % len(self.current_state.roles)
224
- next_role = self.current_state.roles[self.current_state.index]
225
- # random speak
226
- elif controller_type == "random":
227
- next_role = random.choice(self.current_state.roles)
228
-
229
- # 如果下一角色不在,则随机挑选一个
230
- # If the next character is not available, pick one at random
231
- if next_role not in self.current_state.roles:
232
- next_role = random.choice(self.current_state.roles)
233
-
234
- self.current_state.current_role = next_role
235
-
236
- next_agent = agents[self.roles_to_names[self.current_state.name][next_role]]
237
-
238
- return next_agent
239
-
240
- def next(self, environment, agents):
241
- """
242
- Determine the next state and the agent that needs action based on the current situation
243
- """
244
-
245
- # 如��是第一次进入该状态
246
- # If it is the first time to enter this state
247
-
248
- if self.current_state.is_begin:
249
- agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
250
- agent = agents[agent_name]
251
- return self.current_state,agent
252
-
253
-
254
- # get relevant history
255
- query = environment.shared_memory["long_term_memory"][-1].content
256
- relevant_history = get_relevant_history(
257
- query,
258
- environment.shared_memory["long_term_memory"][:-1],
259
- environment.shared_memory["chat_embeddings"][:-1],
260
- )
261
- relevant_history = Memory.get_chat_history(relevant_history)
262
-
263
-
264
-
265
- next_state = self.transit(
266
- chat_history=environment.shared_memory["long_term_memory"][
267
- environment.current_chat_history_idx :
268
- ],
269
- relevant_history=relevant_history,
270
- environment=environment,
271
- )
272
- # 如果进入终止节点,则直接终止
273
- # If you enter the termination node, terminate directly
274
- if next_state.name == self.finish_state_name:
275
- self.finished = True
276
- return None, None
277
-
278
- self.current_state = next_state
279
-
280
- # 如果是首次进入该节点且有开场白,则直接分配给开场角色
281
- # If it is the first time to enter the state and there is a begin query, it will be directly assigned to the begin role.
282
- if self.current_state.is_begin and self.current_state.begin_role:
283
- agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
284
- agent = agents[agent_name]
285
- return self.current_state,agent
286
-
287
-
288
- next_agent = self.route(
289
- chat_history=environment.shared_memory["long_term_memory"][
290
- environment.current_chat_history_idx :
291
- ],
292
- agents = agents,
293
- relevant_history=relevant_history,
294
- )
295
-
296
- return self.current_state, next_agent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZeroToHero/05-RealtimeStreamlitASR/streaming.py DELETED
@@ -1,66 +0,0 @@
1
- import subprocess
2
-
3
- import numpy as np
4
-
5
-
6
- def ffmpeg_stream(youtube_url, sampling_rate=16_000, chunk_duration_ms=5000, pad_duration_ms=200):
7
- """
8
- Helper function to read an audio file through ffmpeg.
9
- """
10
- chunk_len = int(sampling_rate * chunk_duration_ms / 1000)
11
- pad_len = int(sampling_rate * pad_duration_ms / 1000)
12
- read_chunk_len = chunk_len + pad_len * 2
13
-
14
- ar = f"{sampling_rate}"
15
- ac = "1"
16
- format_for_conversion = "f32le"
17
- dtype = np.float32
18
- size_of_sample = 4
19
-
20
- ffmpeg_command = [
21
- "ffmpeg",
22
- "-i",
23
- "pipe:",
24
- "-ac",
25
- ac,
26
- "-ar",
27
- ar,
28
- "-f",
29
- format_for_conversion,
30
- "-hide_banner",
31
- "-loglevel",
32
- "quiet",
33
- "pipe:1",
34
- ]
35
-
36
- ytdl_command = ["yt-dlp", "-f", "bestaudio", youtube_url, "--quiet", "-o", "-"]
37
-
38
- try:
39
- ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
40
- ytdl_process = subprocess.Popen(ytdl_command, stdout=ffmpeg_process.stdin)
41
- except FileNotFoundError:
42
- raise ValueError("ffmpeg was not found but is required to stream audio files from filename")
43
-
44
- acc = b""
45
- leftover = np.zeros((0,), dtype=np.float32)
46
- while ytdl_process.poll() is None:
47
- buflen = read_chunk_len * size_of_sample
48
-
49
- raw = ffmpeg_process.stdout.read(buflen)
50
- if raw == b"":
51
- break
52
-
53
- if len(acc) + len(raw) > buflen:
54
- acc = raw
55
- else:
56
- acc += raw
57
-
58
- audio = np.frombuffer(acc, dtype=dtype)
59
- audio = np.concatenate([leftover, audio])
60
- if len(audio) < pad_len * 2:
61
- # TODO: handle end of stream better than this
62
- break
63
- yield audio
64
-
65
- leftover = audio[-pad_len * 2 :]
66
- read_chunk_len = chunk_len
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ALM/CALM/app.py DELETED
@@ -1,448 +0,0 @@
1
- import streamlit as st
2
- import numpy as np
3
-
4
- from st_btn_select import st_btn_select
5
- from streamlit_option_menu import option_menu
6
-
7
- from cgi import test
8
- import streamlit as st
9
- import pandas as pd
10
- from PIL import Image
11
- import os
12
- import glob
13
-
14
- from transformers import CLIPVisionModel, AutoTokenizer, AutoModel
15
- from transformers import ViTFeatureExtractor, ViTForImageClassification
16
-
17
- import torch
18
- from tqdm import tqdm
19
- from PIL import Image
20
- import numpy as np
21
- from torch.utils.data import DataLoader
22
- from transformers import default_data_collator
23
-
24
- from torch.utils.data import Dataset, DataLoader
25
- import torchvision.transforms as transforms
26
-
27
- from bokeh.models.widgets import Button
28
- from bokeh.models import CustomJS
29
- from streamlit_bokeh_events import streamlit_bokeh_events
30
-
31
- from webcam import webcam
32
-
33
- ## Global Variables
34
- MP3_ROOT_PATH = "sample_mp3/"
35
- SPECTROGRAMS_PATH = "sample_spectrograms/"
36
-
37
- IMAGE_SIZE = 224
38
- MEAN = torch.tensor([0.48145466, 0.4578275, 0.40821073])
39
- STD = torch.tensor([0.26862954, 0.26130258, 0.27577711])
40
-
41
- TEXT_MODEL = 'bert-base-uncased'
42
-
43
- CLIP_TEXT_MODEL_PATH = "text_model/"
44
- CLIP_VISION_MODEL_PATH = "vision_model/"
45
-
46
- ## NavBar
47
- def streamlit_menu(example=1):
48
- if example == 1:
49
- # 1. as sidebar menu
50
- with st.sidebar:
51
- selected = option_menu(
52
- menu_title="Main Menu", # required
53
- options=["Text", "Audio", "Camera"], # required
54
- icons=["chat-text", "mic", "camera"], # optional
55
- menu_icon="cast", # optional
56
- default_index=0, # optional
57
- )
58
- return selected
59
-
60
- if example == 2:
61
- # 2. horizontal menu w/o custom style
62
- selected = option_menu(
63
- menu_title=None, # required
64
- options=["Text", "Audio", "Camera"], # required
65
- icons=["chat-text", "mic", "camera"], # optional
66
- menu_icon="cast", # optional
67
- default_index=0, # optional
68
- orientation="horizontal",
69
- )
70
- return selected
71
-
72
- if example == 3:
73
- # 2. horizontal menu with custom style
74
- selected = option_menu(
75
- menu_title=None, # required
76
- options=["Text", "Audio", "Camera"], # required
77
- icons=["chat-text", "mic", "camera"], # optional
78
- menu_icon="cast", # optional
79
- default_index=0, # optional
80
- orientation="horizontal",
81
- styles={
82
- "container": {"padding": "0!important", "background-color": "#fafafa"},
83
- "icon": {"color": "#ffde59", "font-size": "25px"},
84
- "nav-link": {
85
- "font-size": "25px",
86
- "text-align": "left",
87
- "margin": "0px",
88
- "--hover-color": "#eee",
89
- },
90
- "nav-link-selected": {"background-color": "#5271ff"},
91
- },
92
- )
93
- return selected
94
-
95
-
96
- ## Draw Sidebar
97
- def draw_sidebar(
98
- key,
99
- plot=False,
100
- ):
101
-
102
- st.write(
103
- """
104
- # Sidebar
105
-
106
- ```python
107
- Think.
108
- Search.
109
- Feel.
110
- ```
111
- """
112
- )
113
-
114
- st.slider("From 1 to 10, how cool is this app?", min_value=1, max_value=10, key=key)
115
-
116
- option = st_btn_select(('option1', 'option2', 'option3'), index=2)
117
- st.write(f'Selected option: {option}')
118
-
119
- ## Change Color
120
- #def change_color(styles="")
121
-
122
- ## VisionDataset
123
- class VisionDataset(Dataset):
124
- preprocess = transforms.Compose([
125
- transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
126
- transforms.ToTensor(),
127
- transforms.Normalize(mean=MEAN, std=STD)
128
- ])
129
-
130
- def __init__(self, image_paths: list):
131
- self.image_paths = image_paths
132
-
133
- def __getitem__(self, idx):
134
- return self.preprocess(Image.open(self.image_paths[idx]).convert('RGB'))
135
-
136
- def __len__(self):
137
- return len(self.image_paths)
138
-
139
- ## TextDataset
140
- class TextDataset(Dataset):
141
- def __init__(self, text: list, tokenizer, max_len):
142
- self.len = len(text)
143
- self.tokens = tokenizer(text, padding='max_length',
144
- max_length=max_len, truncation=True)
145
-
146
- def __getitem__(self, idx):
147
- token = self.tokens[idx]
148
- return {'input_ids': token.ids, 'attention_mask': token.attention_mask}
149
-
150
- def __len__(self):
151
- return self.len
152
-
153
- ## CLIP Demo
154
- class CLIPDemo:
155
- def __init__(self, vision_encoder, text_encoder, tokenizer,
156
- batch_size: int = 64, max_len: int = 64, device='cuda'):
157
- """ Initializes CLIPDemo
158
- it has the following functionalities:
159
- image_search: Search images based on text query
160
- zero_shot: Zero shot image classification
161
- analogy: Analogies with embedding space arithmetic.
162
-
163
- Args:
164
- vision_encoder: Fine-tuned vision encoder
165
- text_encoder: Fine-tuned text encoder
166
- tokenizer: Transformers tokenizer
167
- device (torch.device): Running device
168
- batch_size (int): Size of mini-batches used to embeddings
169
- max_length (int): Tokenizer max length
170
-
171
- Example:
172
- >>> demo = CLIPDemo(vision_encoder, text_encoder, tokenizer)
173
- >>> demo.compute_image_embeddings(test_df.image.to_list())
174
- >>> demo.image_search('یک مرد و یک زن')
175
- >>> demo.zero_shot('./workers.jpg')
176
- >>> demo.anology('./sunset.jpg', additional_text='دریا')
177
- """
178
- self.vision_encoder = vision_encoder.eval().to(device)
179
- self.text_encoder = text_encoder.eval().to(device)
180
- self.batch_size = batch_size
181
- self.device = device
182
- self.tokenizer = tokenizer
183
- self.max_len = max_len
184
- self.text_embeddings_ = None
185
- self.image_embeddings_ = None
186
-
187
-
188
- def compute_image_embeddings(self, image_paths: list):
189
- self.image_paths = image_paths
190
- dataloader = DataLoader(VisionDataset(
191
- image_paths=image_paths), batch_size=self.batch_size)
192
- embeddings = []
193
- with torch.no_grad():
194
-
195
- bar = st.progress(0)
196
- for i, images in tqdm(enumerate(dataloader), desc='computing image embeddings'):
197
- bar.progress(int(i/len(dataloader)*100))
198
- image_embedding = self.vision_encoder(
199
- pixel_values=images.to(self.device)).pooler_output
200
- embeddings.append(image_embedding)
201
- bar.empty()
202
- self.image_embeddings_ = torch.cat(embeddings)
203
-
204
- def compute_text_embeddings(self, text: list):
205
- self.text = text
206
- dataloader = DataLoader(TextDataset(text=text, tokenizer=self.tokenizer, max_len=self.max_len),
207
- batch_size=self.batch_size, collate_fn=default_data_collator)
208
- embeddings = []
209
- with torch.no_grad():
210
- for tokens in tqdm(dataloader, desc='computing text embeddings'):
211
- image_embedding = self.text_encoder(input_ids=tokens["input_ids"].to(self.device),
212
- attention_mask=tokens["attention_mask"].to(self.device)).pooler_output
213
- embeddings.append(image_embedding)
214
- self.text_embeddings_ = torch.cat(embeddings)
215
-
216
- def text_query_embedding(self, query: str = 'A happy song'):
217
- tokens = self.tokenizer(query, return_tensors='pt')
218
- with torch.no_grad():
219
- text_embedding = self.text_encoder(input_ids=tokens["input_ids"].to(self.device),
220
- attention_mask=tokens["attention_mask"].to(self.device)).pooler_output
221
- return text_embedding
222
-
223
- def most_similars(self, embeddings_1, embeddings_2):
224
- values, indices = torch.cosine_similarity(
225
- embeddings_1, embeddings_2).sort(descending=True)
226
- return values.cpu(), indices.cpu()
227
-
228
-
229
- def image_search(self, query: str, top_k=10):
230
- """ Search images based on text query
231
- Args:
232
- query (str): text query
233
- image_paths (list[str]): a bunch of image paths
234
- top_k (int): number of relevant images
235
- """
236
- query_embedding = self.text_query_embedding(query=query)
237
- _, indices = self.most_similars(self.image_embeddings_, query_embedding)
238
-
239
- matches = np.array(self.image_paths)[indices][:top_k]
240
- songs_path = []
241
- for match in matches:
242
- filename = os.path.split(match)[1]
243
- filename = int(filename.replace(".jpeg", ""))
244
- audio_path = MP3_ROOT_PATH + "/" + f"{filename:06d}"
245
- songs_path.append(audio_path)
246
- return songs_path
247
-
248
- ## Draw text page
249
- def draw_text(
250
- key,
251
- plot=False,
252
- device=None,
253
- ):
254
-
255
-
256
- image = Image.open("data/logo.png")
257
- st.image(image, use_column_width="always")
258
-
259
- if 'model' not in st.session_state:
260
- #with st.spinner('We are orginizing your traks...'):
261
- text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
262
- vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True).to(device)
263
- tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
264
- model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer, device=device)
265
- model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH + "/*.jpeg")[:1000])
266
- st.session_state["model"] = model
267
-
268
-
269
- ""
270
- ""
271
-
272
- moods = ['-', 'angry', 'calm', 'happy', 'sad']
273
- genres = ['-', 'house', 'pop', 'rock', 'techno']
274
- artists = ['-', 'bad dad', 'lazy magnet', 'the astronauts', 'yan yalego']
275
- years = ['-', '80s', '90s', '2000s', '2010s']
276
-
277
- col1, col2 = st.columns(2)
278
- mood = col1.selectbox('Which mood do you feel right now?', moods, help="Select a mood here")
279
- genre = col2.selectbox('Which genre do you want to listen?', genres, help="Select a genre here")
280
- artist = col1.selectbox('Which artist do you like best?', artists, help="Select an artist here")
281
- year = col2.selectbox('Which period do you want to relive?', years, help="Select a period here")
282
- button_form = st.button('Search', key="button_form")
283
-
284
- st.text_input("Otherwise, describe the song you are looking for!", value="", key="sentence")
285
- button_sentence = st.button('Search', key="button_sentence")
286
-
287
- if (button_sentence and st.session_state.sentence != "") or (button_form and not (mood == "-" and artist == "-" and genre == "-" and year == "-")):
288
- if button_sentence:
289
- sentence = st.session_state.sentence
290
- elif button_form:
291
- sentence = mood if mood != "-" else ""
292
- sentence = sentence + " " + genre if genre != "-" else sentence
293
- sentence = sentence + " " + artist if artist != "-" else sentence
294
- sentence = sentence + " " + year if year != "-" else sentence
295
-
296
- song_paths = st.session_state.model.image_search(sentence)
297
- for song in song_paths:
298
- song_name = df.loc[df['track_id'] == int(song[-6:])]['track_title'].to_list()[0]
299
- artist_name = df.loc[df['track_id'] == int(song[-6:])]['artist_name'].to_list()[0]
300
- st.write('**"'+song_name+'"**' + ' by ' + artist_name)
301
- st.audio(song + ".ogg", format="audio/ogg", start_time=0)
302
-
303
- ## Draw audio page
304
- def draw_audio(
305
- key,
306
- plot=False,
307
- device=None,
308
- ):
309
-
310
- image = Image.open("data/logo.png")
311
- st.image(image, use_column_width="always")
312
-
313
- if 'model' not in st.session_state:
314
- #with st.spinner('We are orginizing your traks...'):
315
- text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
316
- vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True).to(device)
317
- tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
318
- model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer, device=device)
319
- model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH + "/*.jpeg")[:1000])
320
- st.session_state["model"] = model
321
- #st.session_state['model'] = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
322
- #st.session_state.model.compute_image_embeddings(glob.glob("/data1/mlaquatra/TSOAI_hack/data/spectrograms/*.jpeg")[:100])
323
- #st.success('Done!')
324
-
325
- ""
326
- ""
327
-
328
- st.write("Please, describe the kind of song you are looking for!")
329
- stt_button = Button(label="Start Recording", margin=[5,5,5,200], width=200, default_size=10, width_policy='auto', button_type='primary')
330
-
331
- stt_button.js_on_event("button_click", CustomJS(code="""
332
- var recognition = new webkitSpeechRecognition();
333
- recognition.continuous = false;
334
- recognition.interimResults = true;
335
-
336
- recognition.onresult = function (e) {
337
- var value = "";
338
- for (var i = e.resultIndex; i < e.results.length; ++i) {
339
- if (e.results[i].isFinal) {
340
- value += e.results[i][0].transcript;
341
- }
342
- }
343
- if ( value != "") {
344
- document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
345
- }
346
- }
347
- recognition.start();
348
- """))
349
-
350
-
351
- result = streamlit_bokeh_events(
352
- stt_button,
353
- events="GET_TEXT",
354
- key="listen",
355
- refresh_on_update=False,
356
- override_height=75,
357
- debounce_time=0)
358
-
359
- if result:
360
- if "GET_TEXT" in result:
361
- sentence = result.get("GET_TEXT")
362
- st.write('You asked for: "' + sentence + '"')
363
-
364
- song_paths = st.session_state.model.image_search(sentence)
365
- for song in song_paths:
366
- song_name = df.loc[df['track_id'] == int(song[-6:])]['track_title'].to_list()[0]
367
- artist_name = df.loc[df['track_id'] == int(song[-6:])]['artist_name'].to_list()[0]
368
- st.write('**"'+song_name+'"**' + ' by ' + artist_name)
369
- st.audio(song + ".ogg", format="audio/ogg", start_time=0)
370
-
371
- ## Draw camera page
372
- def draw_camera(
373
- key,
374
- plot=False,
375
- device=None,
376
- ):
377
-
378
- image = Image.open("data/logo.png")
379
- st.image(image, use_column_width="always")
380
-
381
- if 'model' not in st.session_state:
382
- #with st.spinner('We are orginizing your traks...'):
383
- text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
384
- vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True).to(device)
385
- tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
386
- model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer, device=device)
387
- model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH + "/*.jpeg")[:1000])
388
- st.session_state["model"] = model
389
- #st.session_state['model'] = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
390
- #st.session_state.model.compute_image_embeddings(glob.glob("/data1/mlaquatra/TSOAI_hack/data/spectrograms/*.jpeg")[:100])
391
- #st.success('Done!')
392
-
393
- ""
394
- ""
395
-
396
- st.write("Please, show us how you are feeling today!")
397
- captured_image = webcam()
398
- if captured_image is None:
399
- st.write("Waiting for capture...")
400
- else:
401
- # st.write("Got an image from the webcam:")
402
-
403
- # st.image(captured_image)
404
-
405
- # st.write(type(captured_image))
406
- # st.write(captured_image)
407
- # st.write(captured_image.size)
408
-
409
- captured_image = captured_image.convert("RGB")
410
-
411
- vit_feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")
412
- vit_model = ViTForImageClassification.from_pretrained("ViT_ER/best_checkpoint", local_files_only=True)
413
- inputs = vit_feature_extractor(images=[captured_image], return_tensors="pt")
414
- outputs = vit_model(**inputs, output_hidden_states=True)
415
- #st.write(outputs)
416
- emotions = ['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Neutral']
417
- mood = emotions[np.argmax(outputs.logits.detach().cpu().numpy())]
418
- #st.write(mood)
419
-
420
- st.write(f"Your mood seems to be **{mood.lower()}** today! Here's a song for you that matches with how you feel!")
421
-
422
- song_paths = st.session_state.model.image_search(mood)
423
- for song in song_paths:
424
- song_name = df.loc[df['track_id'] == int(song[-6:])]['track_title'].to_list()[0]
425
- artist_name = df.loc[df['track_id'] == int(song[-6:])]['artist_name'].to_list()[0]
426
- st.write('**"'+song_name+'"**' + ' by ' + artist_name)
427
- st.audio(song + ".ogg", format="audio/ogg", start_time=0)
428
-
429
-
430
- ## Main
431
- selected = streamlit_menu(example=3)
432
- df = pd.read_csv('full_metadata.csv', index_col=False)
433
-
434
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
435
-
436
- if selected == "Text":
437
- # st.title(f"You have selected {selected}")
438
- draw_text("text", plot=True, device=device)
439
- if selected == "Audio":
440
- # st.title(f"You have selected {selected}")
441
- draw_audio("audio", plot=True, device=device)
442
- if selected == "Camera":
443
- # st.title(f"You have selected {selected}")
444
- #draw_camera("camera", plot=True, device=device)
445
- pass
446
-
447
- # with st.sidebar:
448
- # draw_sidebar("sidebar")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py DELETED
@@ -1,22 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/resnet50.py',
3
- '../_base_/datasets/imagenet_bs256_rsb_a3.py',
4
- '../_base_/schedules/imagenet_bs2048_rsb.py',
5
- '../_base_/default_runtime.py'
6
- ]
7
-
8
- # model settings
9
- model = dict(
10
- backbone=dict(norm_cfg=dict(type='SyncBN', requires_grad=True)),
11
- head=dict(loss=dict(use_sigmoid=True)),
12
- train_cfg=dict(augments=[
13
- dict(type='Mixup', alpha=0.1),
14
- dict(type='CutMix', alpha=1.0)
15
- ]),
16
- )
17
-
18
- # schedule settings
19
- optim_wrapper = dict(
20
- optimizer=dict(lr=0.008),
21
- paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
22
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/Dockerfile DELETED
@@ -1,32 +0,0 @@
1
- # syntax=docker/dockerfile:1
2
- # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
3
- # you will also find guides on how best to write your Dockerfile
4
- FROM node:19 as builder-production
5
-
6
- WORKDIR /app
7
-
8
- COPY --link --chown=1000 package-lock.json package.json ./
9
- RUN --mount=type=cache,target=/app/.npm \
10
- npm set cache /app/.npm && \
11
- npm ci --omit=dev
12
-
13
- FROM builder-production as builder
14
-
15
- RUN --mount=type=cache,target=/app/.npm \
16
- npm set cache /app/.npm && \
17
- npm ci
18
-
19
- COPY --link --chown=1000 . .
20
-
21
- RUN --mount=type=secret,id=DOTENV_LOCAL,dst=.env.local \
22
- npm run build
23
-
24
- FROM node:19-slim
25
-
26
- RUN npm install -g pm2
27
-
28
- COPY --from=builder-production /app/node_modules /app/node_modules
29
- COPY --link --chown=1000 package.json /app/package.json
30
- COPY --from=builder /app/build /app/build
31
-
32
- CMD pm2 start /app/build/index.js -i $CPU_CORES --no-daemon
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/get_working_providers.py DELETED
@@ -1,7 +0,0 @@
1
- from g4f.active_providers import get_active_model_providers
2
-
3
- working_providers = get_active_model_providers()
4
-
5
- print("\nWorking providers by model:")
6
- for model, providers in working_providers.items():
7
- print(f"{model}: {', '.join(providers)}")
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/configs/mm/hrnet_w48_coco_256x192.py DELETED
@@ -1,169 +0,0 @@
1
- # _base_ = [
2
- # '../../../../_base_/default_runtime.py',
3
- # '../../../../_base_/datasets/coco.py'
4
- # ]
5
- evaluation = dict(interval=10, metric='mAP', save_best='AP')
6
-
7
- optimizer = dict(
8
- type='Adam',
9
- lr=5e-4,
10
- )
11
- optimizer_config = dict(grad_clip=None)
12
- # learning policy
13
- lr_config = dict(
14
- policy='step',
15
- warmup='linear',
16
- warmup_iters=500,
17
- warmup_ratio=0.001,
18
- step=[170, 200])
19
- total_epochs = 210
20
- channel_cfg = dict(
21
- num_output_channels=17,
22
- dataset_joints=17,
23
- dataset_channel=[
24
- [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
25
- ],
26
- inference_channel=[
27
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
28
- ])
29
-
30
- # model settings
31
- model = dict(
32
- type='TopDown',
33
- pretrained='https://download.openmmlab.com/mmpose/'
34
- 'pretrain_models/hrnet_w48-8ef0771d.pth',
35
- backbone=dict(
36
- type='HRNet',
37
- in_channels=3,
38
- extra=dict(
39
- stage1=dict(
40
- num_modules=1,
41
- num_branches=1,
42
- block='BOTTLENECK',
43
- num_blocks=(4, ),
44
- num_channels=(64, )),
45
- stage2=dict(
46
- num_modules=1,
47
- num_branches=2,
48
- block='BASIC',
49
- num_blocks=(4, 4),
50
- num_channels=(48, 96)),
51
- stage3=dict(
52
- num_modules=4,
53
- num_branches=3,
54
- block='BASIC',
55
- num_blocks=(4, 4, 4),
56
- num_channels=(48, 96, 192)),
57
- stage4=dict(
58
- num_modules=3,
59
- num_branches=4,
60
- block='BASIC',
61
- num_blocks=(4, 4, 4, 4),
62
- num_channels=(48, 96, 192, 384))),
63
- ),
64
- keypoint_head=dict(
65
- type='TopdownHeatmapSimpleHead',
66
- in_channels=48,
67
- out_channels=channel_cfg['num_output_channels'],
68
- num_deconv_layers=0,
69
- extra=dict(final_conv_kernel=1, ),
70
- loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
71
- train_cfg=dict(),
72
- test_cfg=dict(
73
- flip_test=True,
74
- post_process='default',
75
- shift_heatmap=True,
76
- modulate_kernel=11))
77
-
78
- data_cfg = dict(
79
- image_size=[192, 256],
80
- heatmap_size=[48, 64],
81
- num_output_channels=channel_cfg['num_output_channels'],
82
- num_joints=channel_cfg['dataset_joints'],
83
- dataset_channel=channel_cfg['dataset_channel'],
84
- inference_channel=channel_cfg['inference_channel'],
85
- soft_nms=False,
86
- nms_thr=1.0,
87
- oks_thr=0.9,
88
- vis_thr=0.2,
89
- use_gt_bbox=False,
90
- det_bbox_thr=0.0,
91
- bbox_file='data/coco/person_detection_results/'
92
- 'COCO_val2017_detections_AP_H_56_person.json',
93
- )
94
-
95
- train_pipeline = [
96
- dict(type='LoadImageFromFile'),
97
- dict(type='TopDownGetBboxCenterScale', padding=1.25),
98
- dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3),
99
- dict(type='TopDownRandomFlip', flip_prob=0.5),
100
- dict(
101
- type='TopDownHalfBodyTransform',
102
- num_joints_half_body=8,
103
- prob_half_body=0.3),
104
- dict(
105
- type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
106
- dict(type='TopDownAffine'),
107
- dict(type='ToTensor'),
108
- dict(
109
- type='NormalizeTensor',
110
- mean=[0.485, 0.456, 0.406],
111
- std=[0.229, 0.224, 0.225]),
112
- dict(type='TopDownGenerateTarget', sigma=2),
113
- dict(
114
- type='Collect',
115
- keys=['img', 'target', 'target_weight'],
116
- meta_keys=[
117
- 'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
118
- 'rotation', 'bbox_score', 'flip_pairs'
119
- ]),
120
- ]
121
-
122
- val_pipeline = [
123
- dict(type='LoadImageFromFile'),
124
- dict(type='TopDownGetBboxCenterScale', padding=1.25),
125
- dict(type='TopDownAffine'),
126
- dict(type='ToTensor'),
127
- dict(
128
- type='NormalizeTensor',
129
- mean=[0.485, 0.456, 0.406],
130
- std=[0.229, 0.224, 0.225]),
131
- dict(
132
- type='Collect',
133
- keys=['img'],
134
- meta_keys=[
135
- 'image_file', 'center', 'scale', 'rotation', 'bbox_score',
136
- 'flip_pairs'
137
- ]),
138
- ]
139
-
140
- test_pipeline = val_pipeline
141
-
142
- data_root = 'data/coco'
143
- data = dict(
144
- samples_per_gpu=32,
145
- workers_per_gpu=2,
146
- val_dataloader=dict(samples_per_gpu=32),
147
- test_dataloader=dict(samples_per_gpu=32),
148
- train=dict(
149
- type='TopDownCocoDataset',
150
- ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
151
- img_prefix=f'{data_root}/train2017/',
152
- data_cfg=data_cfg,
153
- pipeline=train_pipeline,
154
- dataset_info={{_base_.dataset_info}}),
155
- val=dict(
156
- type='TopDownCocoDataset',
157
- ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
158
- img_prefix=f'{data_root}/val2017/',
159
- data_cfg=data_cfg,
160
- pipeline=val_pipeline,
161
- dataset_info={{_base_.dataset_info}}),
162
- test=dict(
163
- type='TopDownCocoDataset',
164
- ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
165
- img_prefix=f'{data_root}/val2017/',
166
- data_cfg=data_cfg,
167
- pipeline=test_pipeline,
168
- dataset_info={{_base_.dataset_info}}),
169
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/dynamictext.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import DynamicText from './gameobjects/dynamictext/dynamictext/DynamicText';
2
- export default DynamicText;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/roundrectanglecanvas/RoundRectangleCanvas.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import RoundRectangleCanvas from "../../../plugins/roundrectanglecanvas";
2
- export default RoundRectangleCanvas;
 
 
 
spaces/AlexWortega/ruImageCaptionong/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: RuImageCaptionong
3
- emoji: 👀
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.0.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-VITS/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 9Nine VITS
3
- emoji: ⚡
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
- pinned: false
10
- license: agpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_tgui.py DELETED
@@ -1,171 +0,0 @@
1
- '''
2
- Contributed by SagsMug. Modified by binary-husky
3
- https://github.com/oobabooga/text-generation-webui/pull/175
4
- '''
5
-
6
- import asyncio
7
- import json
8
- import random
9
- import string
10
- import websockets
11
- import logging
12
- import time
13
- import threading
14
- import importlib
15
- from toolbox import get_conf, update_ui
16
-
17
-
18
- def random_hash():
19
- letters = string.ascii_lowercase + string.digits
20
- return ''.join(random.choice(letters) for i in range(9))
21
-
22
- async def run(context, max_token, temperature, top_p, addr, port):
23
- params = {
24
- 'max_new_tokens': max_token,
25
- 'do_sample': True,
26
- 'temperature': temperature,
27
- 'top_p': top_p,
28
- 'typical_p': 1,
29
- 'repetition_penalty': 1.05,
30
- 'encoder_repetition_penalty': 1.0,
31
- 'top_k': 0,
32
- 'min_length': 0,
33
- 'no_repeat_ngram_size': 0,
34
- 'num_beams': 1,
35
- 'penalty_alpha': 0,
36
- 'length_penalty': 1,
37
- 'early_stopping': True,
38
- 'seed': -1,
39
- }
40
- session = random_hash()
41
-
42
- async with websockets.connect(f"ws://{addr}:{port}/queue/join") as websocket:
43
- while content := json.loads(await websocket.recv()):
44
- #Python3.10 syntax, replace with if elif on older
45
- if content["msg"] == "send_hash":
46
- await websocket.send(json.dumps({
47
- "session_hash": session,
48
- "fn_index": 12
49
- }))
50
- elif content["msg"] == "estimation":
51
- pass
52
- elif content["msg"] == "send_data":
53
- await websocket.send(json.dumps({
54
- "session_hash": session,
55
- "fn_index": 12,
56
- "data": [
57
- context,
58
- params['max_new_tokens'],
59
- params['do_sample'],
60
- params['temperature'],
61
- params['top_p'],
62
- params['typical_p'],
63
- params['repetition_penalty'],
64
- params['encoder_repetition_penalty'],
65
- params['top_k'],
66
- params['min_length'],
67
- params['no_repeat_ngram_size'],
68
- params['num_beams'],
69
- params['penalty_alpha'],
70
- params['length_penalty'],
71
- params['early_stopping'],
72
- params['seed'],
73
- ]
74
- }))
75
- elif content["msg"] == "process_starts":
76
- pass
77
- elif content["msg"] in ["process_generating", "process_completed"]:
78
- yield content["output"]["data"][0]
79
- # You can search for your desired end indicator and
80
- # stop generation by closing the websocket here
81
- if (content["msg"] == "process_completed"):
82
- break
83
-
84
-
85
-
86
-
87
-
88
- def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
89
- """
90
- 发送至chatGPT,流式获取输出。
91
- 用于基础的对话功能。
92
- inputs 是本次问询的输入
93
- top_p, temperature是chatGPT的内部调优参数
94
- history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
95
- chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
96
- additional_fn代表点击的哪个按钮,按钮见functional.py
97
- """
98
- if additional_fn is not None:
99
- import core_functional
100
- importlib.reload(core_functional) # 热更新prompt
101
- core_functional = core_functional.get_core_functions()
102
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
103
- inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
104
-
105
- raw_input = "What I would like to say is the following: " + inputs
106
- history.extend([inputs, ""])
107
- chatbot.append([inputs, ""])
108
- yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
109
-
110
- prompt = raw_input
111
- tgui_say = ""
112
-
113
- model_name, addr_port = llm_kwargs['llm_model'].split('@')
114
- assert ':' in addr_port, "LLM_MODEL 格式不正确!" + llm_kwargs['llm_model']
115
- addr, port = addr_port.split(':')
116
-
117
-
118
- mutable = ["", time.time()]
119
- def run_coorotine(mutable):
120
- async def get_result(mutable):
121
- # "tgui:galactica-1.3b@localhost:7860"
122
-
123
- async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
124
- temperature=llm_kwargs['temperature'],
125
- top_p=llm_kwargs['top_p'], addr=addr, port=port):
126
- print(response[len(mutable[0]):])
127
- mutable[0] = response
128
- if (time.time() - mutable[1]) > 3:
129
- print('exit when no listener')
130
- break
131
- asyncio.run(get_result(mutable))
132
-
133
- thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
134
- thread_listen.start()
135
-
136
- while thread_listen.is_alive():
137
- time.sleep(1)
138
- mutable[1] = time.time()
139
- # Print intermediate steps
140
- if tgui_say != mutable[0]:
141
- tgui_say = mutable[0]
142
- history[-1] = tgui_say
143
- chatbot[-1] = (history[-2], history[-1])
144
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
145
-
146
-
147
-
148
-
149
- def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
150
- raw_input = "What I would like to say is the following: " + inputs
151
- prompt = raw_input
152
- tgui_say = ""
153
- model_name, addr_port = llm_kwargs['llm_model'].split('@')
154
- assert ':' in addr_port, "LLM_MODEL 格式不正确!" + llm_kwargs['llm_model']
155
- addr, port = addr_port.split(':')
156
-
157
-
158
- def run_coorotine(observe_window):
159
- async def get_result(observe_window):
160
- async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
161
- temperature=llm_kwargs['temperature'],
162
- top_p=llm_kwargs['top_p'], addr=addr, port=port):
163
- print(response[len(observe_window[0]):])
164
- observe_window[0] = response
165
- if (time.time() - observe_window[1]) > 5:
166
- print('exit when no listener')
167
- break
168
- asyncio.run(get_result(observe_window))
169
- thread_listen = threading.Thread(target=run_coorotine, args=(observe_window,))
170
- thread_listen.start()
171
- return observe_window[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/check_proxy.py DELETED
@@ -1,27 +0,0 @@
1
-
2
- def check_proxy(proxies):
3
- import requests
4
- proxies_https = proxies['https'] if proxies is not None else '无'
5
- try:
6
- response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
7
- data = response.json()
8
- print(f'查询代理的地理位置,返回的结果是{data}')
9
- if 'country_name' in data:
10
- country = data['country_name']
11
- result = f"代理配置 {proxies_https}, 代理所在地:{country}"
12
- elif 'error' in data:
13
- result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
14
- print(result)
15
- return result
16
- except:
17
- result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
18
- print(result)
19
- return result
20
-
21
-
22
- if __name__ == '__main__':
23
- import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
24
- from toolbox import get_conf
25
- proxies, = get_conf('proxies')
26
- check_proxy(proxies)
27
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-onnxruntime-cuda/Dockerfile DELETED
@@ -1,44 +0,0 @@
1
- FROM nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04
2
- LABEL maintainer="Hugging Face"
3
- LABEL repository="diffusers"
4
-
5
- ENV DEBIAN_FRONTEND=noninteractive
6
-
7
- RUN apt update && \
8
- apt install -y bash \
9
- build-essential \
10
- git \
11
- git-lfs \
12
- curl \
13
- ca-certificates \
14
- libsndfile1-dev \
15
- python3.8 \
16
- python3-pip \
17
- python3.8-venv && \
18
- rm -rf /var/lib/apt/lists
19
-
20
- # make sure to use venv
21
- RUN python3 -m venv /opt/venv
22
- ENV PATH="/opt/venv/bin:$PATH"
23
-
24
- # pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
25
- RUN python3 -m pip install --no-cache-dir --upgrade pip && \
26
- python3 -m pip install --no-cache-dir \
27
- torch \
28
- torchvision \
29
- torchaudio \
30
- "onnxruntime-gpu>=1.13.1" \
31
- --extra-index-url https://download.pytorch.org/whl/cu117 && \
32
- python3 -m pip install --no-cache-dir \
33
- accelerate \
34
- datasets \
35
- hf-doc-builder \
36
- huggingface-hub \
37
- Jinja2 \
38
- librosa \
39
- numpy \
40
- scipy \
41
- tensorboard \
42
- transformers
43
-
44
- CMD ["/bin/bash"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/seed_resize_stable_diffusion.py DELETED
@@ -1,366 +0,0 @@
1
- """
2
- modified based on diffusion library from Huggingface: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
3
- """
4
- import inspect
5
- from typing import Callable, List, Optional, Union
6
-
7
- import torch
8
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
9
-
10
- from diffusers import DiffusionPipeline
11
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
- from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
- from diffusers.utils import logging
16
-
17
-
18
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
-
20
-
21
- class SeedResizeStableDiffusionPipeline(DiffusionPipeline):
22
- r"""
23
- Pipeline for text-to-image generation using Stable Diffusion.
24
-
25
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
26
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
27
-
28
- Args:
29
- vae ([`AutoencoderKL`]):
30
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
31
- text_encoder ([`CLIPTextModel`]):
32
- Frozen text-encoder. Stable Diffusion uses the text portion of
33
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
34
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
35
- tokenizer (`CLIPTokenizer`):
36
- Tokenizer of class
37
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
38
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
39
- scheduler ([`SchedulerMixin`]):
40
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
41
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
42
- safety_checker ([`StableDiffusionSafetyChecker`]):
43
- Classification module that estimates whether generated images could be considered offensive or harmful.
44
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
45
- feature_extractor ([`CLIPImageProcessor`]):
46
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
47
- """
48
-
49
- def __init__(
50
- self,
51
- vae: AutoencoderKL,
52
- text_encoder: CLIPTextModel,
53
- tokenizer: CLIPTokenizer,
54
- unet: UNet2DConditionModel,
55
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
56
- safety_checker: StableDiffusionSafetyChecker,
57
- feature_extractor: CLIPImageProcessor,
58
- ):
59
- super().__init__()
60
- self.register_modules(
61
- vae=vae,
62
- text_encoder=text_encoder,
63
- tokenizer=tokenizer,
64
- unet=unet,
65
- scheduler=scheduler,
66
- safety_checker=safety_checker,
67
- feature_extractor=feature_extractor,
68
- )
69
-
70
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
71
- r"""
72
- Enable sliced attention computation.
73
-
74
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
75
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
76
-
77
- Args:
78
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
79
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
80
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
81
- `attention_head_dim` must be a multiple of `slice_size`.
82
- """
83
- if slice_size == "auto":
84
- # half the attention head size is usually a good trade-off between
85
- # speed and memory
86
- slice_size = self.unet.config.attention_head_dim // 2
87
- self.unet.set_attention_slice(slice_size)
88
-
89
- def disable_attention_slicing(self):
90
- r"""
91
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
92
- back to computing attention in one step.
93
- """
94
- # set slice_size = `None` to disable `attention slicing`
95
- self.enable_attention_slicing(None)
96
-
97
- @torch.no_grad()
98
- def __call__(
99
- self,
100
- prompt: Union[str, List[str]],
101
- height: int = 512,
102
- width: int = 512,
103
- num_inference_steps: int = 50,
104
- guidance_scale: float = 7.5,
105
- negative_prompt: Optional[Union[str, List[str]]] = None,
106
- num_images_per_prompt: Optional[int] = 1,
107
- eta: float = 0.0,
108
- generator: Optional[torch.Generator] = None,
109
- latents: Optional[torch.FloatTensor] = None,
110
- output_type: Optional[str] = "pil",
111
- return_dict: bool = True,
112
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
113
- callback_steps: int = 1,
114
- text_embeddings: Optional[torch.FloatTensor] = None,
115
- **kwargs,
116
- ):
117
- r"""
118
- Function invoked when calling the pipeline for generation.
119
-
120
- Args:
121
- prompt (`str` or `List[str]`):
122
- The prompt or prompts to guide the image generation.
123
- height (`int`, *optional*, defaults to 512):
124
- The height in pixels of the generated image.
125
- width (`int`, *optional*, defaults to 512):
126
- The width in pixels of the generated image.
127
- num_inference_steps (`int`, *optional*, defaults to 50):
128
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
129
- expense of slower inference.
130
- guidance_scale (`float`, *optional*, defaults to 7.5):
131
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
132
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
133
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
134
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
135
- usually at the expense of lower image quality.
136
- negative_prompt (`str` or `List[str]`, *optional*):
137
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
138
- if `guidance_scale` is less than `1`).
139
- num_images_per_prompt (`int`, *optional*, defaults to 1):
140
- The number of images to generate per prompt.
141
- eta (`float`, *optional*, defaults to 0.0):
142
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
143
- [`schedulers.DDIMScheduler`], will be ignored for others.
144
- generator (`torch.Generator`, *optional*):
145
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
146
- deterministic.
147
- latents (`torch.FloatTensor`, *optional*):
148
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
149
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
150
- tensor will ge generated by sampling using the supplied random `generator`.
151
- output_type (`str`, *optional*, defaults to `"pil"`):
152
- The output format of the generate image. Choose between
153
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
154
- return_dict (`bool`, *optional*, defaults to `True`):
155
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
156
- plain tuple.
157
- callback (`Callable`, *optional*):
158
- A function that will be called every `callback_steps` steps during inference. The function will be
159
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
160
- callback_steps (`int`, *optional*, defaults to 1):
161
- The frequency at which the `callback` function will be called. If not specified, the callback will be
162
- called at every step.
163
-
164
- Returns:
165
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
166
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
167
- When returning a tuple, the first element is a list with the generated images, and the second element is a
168
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
169
- (nsfw) content, according to the `safety_checker`.
170
- """
171
-
172
- if isinstance(prompt, str):
173
- batch_size = 1
174
- elif isinstance(prompt, list):
175
- batch_size = len(prompt)
176
- else:
177
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
178
-
179
- if height % 8 != 0 or width % 8 != 0:
180
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
181
-
182
- if (callback_steps is None) or (
183
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
184
- ):
185
- raise ValueError(
186
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
187
- f" {type(callback_steps)}."
188
- )
189
-
190
- # get prompt text embeddings
191
- text_inputs = self.tokenizer(
192
- prompt,
193
- padding="max_length",
194
- max_length=self.tokenizer.model_max_length,
195
- return_tensors="pt",
196
- )
197
- text_input_ids = text_inputs.input_ids
198
-
199
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
200
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
201
- logger.warning(
202
- "The following part of your input was truncated because CLIP can only handle sequences up to"
203
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
204
- )
205
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
206
-
207
- if text_embeddings is None:
208
- text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
209
-
210
- # duplicate text embeddings for each generation per prompt, using mps friendly method
211
- bs_embed, seq_len, _ = text_embeddings.shape
212
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
213
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
214
-
215
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
216
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
217
- # corresponds to doing no classifier free guidance.
218
- do_classifier_free_guidance = guidance_scale > 1.0
219
- # get unconditional embeddings for classifier free guidance
220
- if do_classifier_free_guidance:
221
- uncond_tokens: List[str]
222
- if negative_prompt is None:
223
- uncond_tokens = [""]
224
- elif type(prompt) is not type(negative_prompt):
225
- raise TypeError(
226
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
227
- f" {type(prompt)}."
228
- )
229
- elif isinstance(negative_prompt, str):
230
- uncond_tokens = [negative_prompt]
231
- elif batch_size != len(negative_prompt):
232
- raise ValueError(
233
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
234
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
235
- " the batch size of `prompt`."
236
- )
237
- else:
238
- uncond_tokens = negative_prompt
239
-
240
- max_length = text_input_ids.shape[-1]
241
- uncond_input = self.tokenizer(
242
- uncond_tokens,
243
- padding="max_length",
244
- max_length=max_length,
245
- truncation=True,
246
- return_tensors="pt",
247
- )
248
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
249
-
250
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
251
- seq_len = uncond_embeddings.shape[1]
252
- uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
253
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
254
-
255
- # For classifier free guidance, we need to do two forward passes.
256
- # Here we concatenate the unconditional and text embeddings into a single batch
257
- # to avoid doing two forward passes
258
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
259
-
260
- # get the initial random noise unless the user supplied it
261
-
262
- # Unlike in other pipelines, latents need to be generated in the target device
263
- # for 1-to-1 results reproducibility with the CompVis implementation.
264
- # However this currently doesn't work in `mps`.
265
- latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
266
- latents_shape_reference = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
267
- latents_dtype = text_embeddings.dtype
268
- if latents is None:
269
- if self.device.type == "mps":
270
- # randn does not exist on mps
271
- latents_reference = torch.randn(
272
- latents_shape_reference, generator=generator, device="cpu", dtype=latents_dtype
273
- ).to(self.device)
274
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
275
- self.device
276
- )
277
- else:
278
- latents_reference = torch.randn(
279
- latents_shape_reference, generator=generator, device=self.device, dtype=latents_dtype
280
- )
281
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
282
- else:
283
- if latents_reference.shape != latents_shape:
284
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
285
- latents_reference = latents_reference.to(self.device)
286
- latents = latents.to(self.device)
287
-
288
- # This is the key part of the pipeline where we
289
- # try to ensure that the generated images w/ the same seed
290
- # but different sizes actually result in similar images
291
- dx = (latents_shape[3] - latents_shape_reference[3]) // 2
292
- dy = (latents_shape[2] - latents_shape_reference[2]) // 2
293
- w = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
294
- h = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
295
- tx = 0 if dx < 0 else dx
296
- ty = 0 if dy < 0 else dy
297
- dx = max(-dx, 0)
298
- dy = max(-dy, 0)
299
- # import pdb
300
- # pdb.set_trace()
301
- latents[:, :, ty : ty + h, tx : tx + w] = latents_reference[:, :, dy : dy + h, dx : dx + w]
302
-
303
- # set timesteps
304
- self.scheduler.set_timesteps(num_inference_steps)
305
-
306
- # Some schedulers like PNDM have timesteps as arrays
307
- # It's more optimized to move all timesteps to correct device beforehand
308
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
309
-
310
- # scale the initial noise by the standard deviation required by the scheduler
311
- latents = latents * self.scheduler.init_noise_sigma
312
-
313
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
314
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
315
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
316
- # and should be between [0, 1]
317
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
318
- extra_step_kwargs = {}
319
- if accepts_eta:
320
- extra_step_kwargs["eta"] = eta
321
-
322
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
323
- # expand the latents if we are doing classifier free guidance
324
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
325
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
326
-
327
- # predict the noise residual
328
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
329
-
330
- # perform guidance
331
- if do_classifier_free_guidance:
332
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
333
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
334
-
335
- # compute the previous noisy sample x_t -> x_t-1
336
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
337
-
338
- # call the callback, if provided
339
- if callback is not None and i % callback_steps == 0:
340
- callback(i, t, latents)
341
-
342
- latents = 1 / 0.18215 * latents
343
- image = self.vae.decode(latents).sample
344
-
345
- image = (image / 2 + 0.5).clamp(0, 1)
346
-
347
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
348
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
349
-
350
- if self.safety_checker is not None:
351
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
352
- self.device
353
- )
354
- image, has_nsfw_concept = self.safety_checker(
355
- images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
356
- )
357
- else:
358
- has_nsfw_concept = None
359
-
360
- if output_type == "pil":
361
- image = self.numpy_to_pil(image)
362
-
363
- if not return_dict:
364
- return (image, has_nsfw_concept)
365
-
366
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/modeling_flax_utils.py DELETED
@@ -1,534 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import os
17
- from pickle import UnpicklingError
18
- from typing import Any, Dict, Union
19
-
20
- import jax
21
- import jax.numpy as jnp
22
- import msgpack.exceptions
23
- from flax.core.frozen_dict import FrozenDict, unfreeze
24
- from flax.serialization import from_bytes, to_bytes
25
- from flax.traverse_util import flatten_dict, unflatten_dict
26
- from huggingface_hub import hf_hub_download
27
- from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
28
- from requests import HTTPError
29
-
30
- from .. import __version__, is_torch_available
31
- from ..utils import (
32
- CONFIG_NAME,
33
- DIFFUSERS_CACHE,
34
- FLAX_WEIGHTS_NAME,
35
- HUGGINGFACE_CO_RESOLVE_ENDPOINT,
36
- WEIGHTS_NAME,
37
- logging,
38
- )
39
- from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax
40
-
41
-
42
- logger = logging.get_logger(__name__)
43
-
44
-
45
- class FlaxModelMixin:
46
- r"""
47
- Base class for all Flax models.
48
-
49
- [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and
50
- saving models.
51
-
52
- - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`].
53
- """
54
- config_name = CONFIG_NAME
55
- _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"]
56
- _flax_internal_args = ["name", "parent", "dtype"]
57
-
58
- @classmethod
59
- def _from_config(cls, config, **kwargs):
60
- """
61
- All context managers that the model should be initialized under go here.
62
- """
63
- return cls(config, **kwargs)
64
-
65
- def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:
66
- """
67
- Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.
68
- """
69
-
70
- # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27
71
- def conditional_cast(param):
72
- if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):
73
- param = param.astype(dtype)
74
- return param
75
-
76
- if mask is None:
77
- return jax.tree_map(conditional_cast, params)
78
-
79
- flat_params = flatten_dict(params)
80
- flat_mask, _ = jax.tree_flatten(mask)
81
-
82
- for masked, key in zip(flat_mask, flat_params.keys()):
83
- if masked:
84
- param = flat_params[key]
85
- flat_params[key] = conditional_cast(param)
86
-
87
- return unflatten_dict(flat_params)
88
-
89
- def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):
90
- r"""
91
- Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast
92
- the `params` in place.
93
-
94
- This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full
95
- half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.
96
-
97
- Arguments:
98
- params (`Union[Dict, FrozenDict]`):
99
- A `PyTree` of model parameters.
100
- mask (`Union[Dict, FrozenDict]`):
101
- A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
102
- for params you want to cast, and `False` for those you want to skip.
103
-
104
- Examples:
105
-
106
- ```python
107
- >>> from diffusers import FlaxUNet2DConditionModel
108
-
109
- >>> # load model
110
- >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
111
- >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision
112
- >>> params = model.to_bf16(params)
113
- >>> # If you don't want to cast certain parameters (for example layer norm bias and scale)
114
- >>> # then pass the mask as follows
115
- >>> from flax import traverse_util
116
-
117
- >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
118
- >>> flat_params = traverse_util.flatten_dict(params)
119
- >>> mask = {
120
- ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
121
- ... for path in flat_params
122
- ... }
123
- >>> mask = traverse_util.unflatten_dict(mask)
124
- >>> params = model.to_bf16(params, mask)
125
- ```"""
126
- return self._cast_floating_to(params, jnp.bfloat16, mask)
127
-
128
- def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):
129
- r"""
130
- Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the
131
- model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.
132
-
133
- Arguments:
134
- params (`Union[Dict, FrozenDict]`):
135
- A `PyTree` of model parameters.
136
- mask (`Union[Dict, FrozenDict]`):
137
- A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
138
- for params you want to cast, and `False` for those you want to skip.
139
-
140
- Examples:
141
-
142
- ```python
143
- >>> from diffusers import FlaxUNet2DConditionModel
144
-
145
- >>> # Download model and configuration from huggingface.co
146
- >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
147
- >>> # By default, the model params will be in fp32, to illustrate the use of this method,
148
- >>> # we'll first cast to fp16 and back to fp32
149
- >>> params = model.to_f16(params)
150
- >>> # now cast back to fp32
151
- >>> params = model.to_fp32(params)
152
- ```"""
153
- return self._cast_floating_to(params, jnp.float32, mask)
154
-
155
- def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):
156
- r"""
157
- Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the
158
- `params` in place.
159
-
160
- This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full
161
- half-precision training or to save weights in float16 for inference in order to save memory and improve speed.
162
-
163
- Arguments:
164
- params (`Union[Dict, FrozenDict]`):
165
- A `PyTree` of model parameters.
166
- mask (`Union[Dict, FrozenDict]`):
167
- A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True`
168
- for params you want to cast, and `False` for those you want to skip.
169
-
170
- Examples:
171
-
172
- ```python
173
- >>> from diffusers import FlaxUNet2DConditionModel
174
-
175
- >>> # load model
176
- >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
177
- >>> # By default, the model params will be in fp32, to cast these to float16
178
- >>> params = model.to_fp16(params)
179
- >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
180
- >>> # then pass the mask as follows
181
- >>> from flax import traverse_util
182
-
183
- >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
184
- >>> flat_params = traverse_util.flatten_dict(params)
185
- >>> mask = {
186
- ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
187
- ... for path in flat_params
188
- ... }
189
- >>> mask = traverse_util.unflatten_dict(mask)
190
- >>> params = model.to_fp16(params, mask)
191
- ```"""
192
- return self._cast_floating_to(params, jnp.float16, mask)
193
-
194
- def init_weights(self, rng: jax.random.KeyArray) -> Dict:
195
- raise NotImplementedError(f"init_weights method has to be implemented for {self}")
196
-
197
- @classmethod
198
- def from_pretrained(
199
- cls,
200
- pretrained_model_name_or_path: Union[str, os.PathLike],
201
- dtype: jnp.dtype = jnp.float32,
202
- *model_args,
203
- **kwargs,
204
- ):
205
- r"""
206
- Instantiate a pretrained Flax model from a pretrained model configuration.
207
-
208
- Parameters:
209
- pretrained_model_name_or_path (`str` or `os.PathLike`):
210
- Can be either:
211
-
212
- - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model
213
- hosted on the Hub.
214
- - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
215
- using [`~FlaxModelMixin.save_pretrained`].
216
- dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
217
- The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
218
- `jax.numpy.bfloat16` (on TPUs).
219
-
220
- This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
221
- specified, all the computation will be performed with the given `dtype`.
222
-
223
- <Tip>
224
-
225
- This only specifies the dtype of the *computation* and does not influence the dtype of model
226
- parameters.
227
-
228
- If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and
229
- [`~FlaxModelMixin.to_bf16`].
230
-
231
- </Tip>
232
-
233
- model_args (sequence of positional arguments, *optional*):
234
- All remaining positional arguments are passed to the underlying model's `__init__` method.
235
- cache_dir (`Union[str, os.PathLike]`, *optional*):
236
- Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
237
- is not used.
238
- force_download (`bool`, *optional*, defaults to `False`):
239
- Whether or not to force the (re-)download of the model weights and configuration files, overriding the
240
- cached versions if they exist.
241
- resume_download (`bool`, *optional*, defaults to `False`):
242
- Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
243
- incompletely downloaded files are deleted.
244
- proxies (`Dict[str, str]`, *optional*):
245
- A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
246
- 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
247
- local_files_only(`bool`, *optional*, defaults to `False`):
248
- Whether to only load local model weights and configuration files or not. If set to `True`, the model
249
- won't be downloaded from the Hub.
250
- revision (`str`, *optional*, defaults to `"main"`):
251
- The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
252
- allowed by Git.
253
- from_pt (`bool`, *optional*, defaults to `False`):
254
- Load the model weights from a PyTorch checkpoint save file.
255
- kwargs (remaining dictionary of keyword arguments, *optional*):
256
- Can be used to update the configuration object (after it is loaded) and initiate the model (for
257
- example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
258
- automatically loaded:
259
-
260
- - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying
261
- model's `__init__` method (we assume all relevant updates to the configuration have already been
262
- done).
263
- - If a configuration is not provided, `kwargs` are first passed to the configuration class
264
- initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds
265
- to a configuration attribute is used to override said attribute with the supplied `kwargs` value.
266
- Remaining keys that do not correspond to any configuration attribute are passed to the underlying
267
- model's `__init__` function.
268
-
269
- Examples:
270
-
271
- ```python
272
- >>> from diffusers import FlaxUNet2DConditionModel
273
-
274
- >>> # Download model and configuration from huggingface.co and cache.
275
- >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5")
276
- >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
277
- >>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/")
278
- ```
279
-
280
- If you get the error message below, you need to finetune the weights for your downstream task:
281
-
282
- ```bash
283
- Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
284
- - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
285
- You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
286
- ```
287
- """
288
- config = kwargs.pop("config", None)
289
- cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
290
- force_download = kwargs.pop("force_download", False)
291
- from_pt = kwargs.pop("from_pt", False)
292
- resume_download = kwargs.pop("resume_download", False)
293
- proxies = kwargs.pop("proxies", None)
294
- local_files_only = kwargs.pop("local_files_only", False)
295
- use_auth_token = kwargs.pop("use_auth_token", None)
296
- revision = kwargs.pop("revision", None)
297
- subfolder = kwargs.pop("subfolder", None)
298
-
299
- user_agent = {
300
- "diffusers": __version__,
301
- "file_type": "model",
302
- "framework": "flax",
303
- }
304
-
305
- # Load config if we don't provide a configuration
306
- config_path = config if config is not None else pretrained_model_name_or_path
307
- model, model_kwargs = cls.from_config(
308
- config_path,
309
- cache_dir=cache_dir,
310
- return_unused_kwargs=True,
311
- force_download=force_download,
312
- resume_download=resume_download,
313
- proxies=proxies,
314
- local_files_only=local_files_only,
315
- use_auth_token=use_auth_token,
316
- revision=revision,
317
- subfolder=subfolder,
318
- # model args
319
- dtype=dtype,
320
- **kwargs,
321
- )
322
-
323
- # Load model
324
- pretrained_path_with_subfolder = (
325
- pretrained_model_name_or_path
326
- if subfolder is None
327
- else os.path.join(pretrained_model_name_or_path, subfolder)
328
- )
329
- if os.path.isdir(pretrained_path_with_subfolder):
330
- if from_pt:
331
- if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):
332
- raise EnvironmentError(
333
- f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} "
334
- )
335
- model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)
336
- elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)):
337
- # Load from a Flax checkpoint
338
- model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)
339
- # Check if pytorch weights exist instead
340
- elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):
341
- raise EnvironmentError(
342
- f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model"
343
- " using `from_pt=True`."
344
- )
345
- else:
346
- raise EnvironmentError(
347
- f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
348
- f"{pretrained_path_with_subfolder}."
349
- )
350
- else:
351
- try:
352
- model_file = hf_hub_download(
353
- pretrained_model_name_or_path,
354
- filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME,
355
- cache_dir=cache_dir,
356
- force_download=force_download,
357
- proxies=proxies,
358
- resume_download=resume_download,
359
- local_files_only=local_files_only,
360
- use_auth_token=use_auth_token,
361
- user_agent=user_agent,
362
- subfolder=subfolder,
363
- revision=revision,
364
- )
365
-
366
- except RepositoryNotFoundError:
367
- raise EnvironmentError(
368
- f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
369
- "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
370
- "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
371
- "login`."
372
- )
373
- except RevisionNotFoundError:
374
- raise EnvironmentError(
375
- f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
376
- "this model name. Check the model page at "
377
- f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
378
- )
379
- except EntryNotFoundError:
380
- raise EnvironmentError(
381
- f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}."
382
- )
383
- except HTTPError as err:
384
- raise EnvironmentError(
385
- f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n"
386
- f"{err}"
387
- )
388
- except ValueError:
389
- raise EnvironmentError(
390
- f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
391
- f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
392
- f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your"
393
- " internet connection or see how to run the library in offline mode at"
394
- " 'https://huggingface.co/docs/transformers/installation#offline-mode'."
395
- )
396
- except EnvironmentError:
397
- raise EnvironmentError(
398
- f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
399
- "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
400
- f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
401
- f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}."
402
- )
403
-
404
- if from_pt:
405
- if is_torch_available():
406
- from .modeling_utils import load_state_dict
407
- else:
408
- raise EnvironmentError(
409
- "Can't load the model in PyTorch format because PyTorch is not installed. "
410
- "Please, install PyTorch or use native Flax weights."
411
- )
412
-
413
- # Step 1: Get the pytorch file
414
- pytorch_model_file = load_state_dict(model_file)
415
-
416
- # Step 2: Convert the weights
417
- state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model)
418
- else:
419
- try:
420
- with open(model_file, "rb") as state_f:
421
- state = from_bytes(cls, state_f.read())
422
- except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
423
- try:
424
- with open(model_file) as f:
425
- if f.read().startswith("version"):
426
- raise OSError(
427
- "You seem to have cloned a repository without having git-lfs installed. Please"
428
- " install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
429
- " folder you cloned."
430
- )
431
- else:
432
- raise ValueError from e
433
- except (UnicodeDecodeError, ValueError):
434
- raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ")
435
- # make sure all arrays are stored as jnp.ndarray
436
- # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:
437
- # https://github.com/google/flax/issues/1261
438
- state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.devices("cpu")[0]), state)
439
-
440
- # flatten dicts
441
- state = flatten_dict(state)
442
-
443
- params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0))
444
- required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())
445
-
446
- shape_state = flatten_dict(unfreeze(params_shape_tree))
447
-
448
- missing_keys = required_params - set(state.keys())
449
- unexpected_keys = set(state.keys()) - required_params
450
-
451
- if missing_keys:
452
- logger.warning(
453
- f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. "
454
- "Make sure to call model.init_weights to initialize the missing weights."
455
- )
456
- cls._missing_keys = missing_keys
457
-
458
- for key in state.keys():
459
- if key in shape_state and state[key].shape != shape_state[key].shape:
460
- raise ValueError(
461
- f"Trying to load the pretrained weight for {key} failed: checkpoint has shape "
462
- f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. "
463
- )
464
-
465
- # remove unexpected keys to not be saved again
466
- for unexpected_key in unexpected_keys:
467
- del state[unexpected_key]
468
-
469
- if len(unexpected_keys) > 0:
470
- logger.warning(
471
- f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
472
- f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
473
- f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
474
- " with another architecture."
475
- )
476
- else:
477
- logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
478
-
479
- if len(missing_keys) > 0:
480
- logger.warning(
481
- f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
482
- f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
483
- " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
484
- )
485
- else:
486
- logger.info(
487
- f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
488
- f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
489
- f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
490
- " training."
491
- )
492
-
493
- return model, unflatten_dict(state)
494
-
495
- def save_pretrained(
496
- self,
497
- save_directory: Union[str, os.PathLike],
498
- params: Union[Dict, FrozenDict],
499
- is_main_process: bool = True,
500
- ):
501
- """
502
- Save a model and its configuration file to a directory so that it can be reloaded using the
503
- [`~FlaxModelMixin.from_pretrained`] class method.
504
-
505
- Arguments:
506
- save_directory (`str` or `os.PathLike`):
507
- Directory to save a model and its configuration file to. Will be created if it doesn't exist.
508
- params (`Union[Dict, FrozenDict]`):
509
- A `PyTree` of model parameters.
510
- is_main_process (`bool`, *optional*, defaults to `True`):
511
- Whether the process calling this is the main process or not. Useful during distributed training and you
512
- need to call this function on all processes. In this case, set `is_main_process=True` only on the main
513
- process to avoid race conditions.
514
- """
515
- if os.path.isfile(save_directory):
516
- logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
517
- return
518
-
519
- os.makedirs(save_directory, exist_ok=True)
520
-
521
- model_to_save = self
522
-
523
- # Attach architecture to the config
524
- # Save the config
525
- if is_main_process:
526
- model_to_save.save_config(save_directory)
527
-
528
- # save model
529
- output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME)
530
- with open(output_model_file, "wb") as f:
531
- model_bytes = to_bytes(params)
532
- f.write(model_bytes)
533
-
534
- logger.info(f"Model weights saved in {output_model_file}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_1d.py DELETED
@@ -1,267 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import torch
19
-
20
- from diffusers import UNet1DModel
21
- from diffusers.utils import floats_tensor, slow, torch_device
22
-
23
- from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
24
-
25
-
26
- class UNet1DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
27
- model_class = UNet1DModel
28
- main_input_name = "sample"
29
-
30
- @property
31
- def dummy_input(self):
32
- batch_size = 4
33
- num_features = 14
34
- seq_len = 16
35
-
36
- noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device)
37
- time_step = torch.tensor([10] * batch_size).to(torch_device)
38
-
39
- return {"sample": noise, "timestep": time_step}
40
-
41
- @property
42
- def input_shape(self):
43
- return (4, 14, 16)
44
-
45
- @property
46
- def output_shape(self):
47
- return (4, 14, 16)
48
-
49
- def test_ema_training(self):
50
- pass
51
-
52
- def test_training(self):
53
- pass
54
-
55
- def test_determinism(self):
56
- super().test_determinism()
57
-
58
- def test_outputs_equivalence(self):
59
- super().test_outputs_equivalence()
60
-
61
- def test_from_save_pretrained(self):
62
- super().test_from_save_pretrained()
63
-
64
- def test_from_save_pretrained_variant(self):
65
- super().test_from_save_pretrained_variant()
66
-
67
- def test_model_from_pretrained(self):
68
- super().test_model_from_pretrained()
69
-
70
- def test_output(self):
71
- super().test_output()
72
-
73
- def prepare_init_args_and_inputs_for_common(self):
74
- init_dict = {
75
- "block_out_channels": (32, 64, 128, 256),
76
- "in_channels": 14,
77
- "out_channels": 14,
78
- "time_embedding_type": "positional",
79
- "use_timestep_embedding": True,
80
- "flip_sin_to_cos": False,
81
- "freq_shift": 1.0,
82
- "out_block_type": "OutConv1DBlock",
83
- "mid_block_type": "MidResTemporalBlock1D",
84
- "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
85
- "up_block_types": ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D"),
86
- "act_fn": "swish",
87
- }
88
- inputs_dict = self.dummy_input
89
- return init_dict, inputs_dict
90
-
91
- def test_from_pretrained_hub(self):
92
- model, loading_info = UNet1DModel.from_pretrained(
93
- "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet"
94
- )
95
- self.assertIsNotNone(model)
96
- self.assertEqual(len(loading_info["missing_keys"]), 0)
97
-
98
- model.to(torch_device)
99
- image = model(**self.dummy_input)
100
-
101
- assert image is not None, "Make sure output is not None"
102
-
103
- def test_output_pretrained(self):
104
- model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet")
105
- torch.manual_seed(0)
106
- if torch.cuda.is_available():
107
- torch.cuda.manual_seed_all(0)
108
-
109
- num_features = model.config.in_channels
110
- seq_len = 16
111
- noise = torch.randn((1, seq_len, num_features)).permute(
112
- 0, 2, 1
113
- ) # match original, we can update values and remove
114
- time_step = torch.full((num_features,), 0)
115
-
116
- with torch.no_grad():
117
- output = model(noise, time_step).sample.permute(0, 2, 1)
118
-
119
- output_slice = output[0, -3:, -3:].flatten()
120
- # fmt: off
121
- expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, 0.11038864, -0.4760633, 0.13270172, 0.02591348])
122
- # fmt: on
123
- self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3))
124
-
125
- def test_forward_with_norm_groups(self):
126
- # Not implemented yet for this UNet
127
- pass
128
-
129
- @slow
130
- def test_unet_1d_maestro(self):
131
- model_id = "harmonai/maestro-150k"
132
- model = UNet1DModel.from_pretrained(model_id, subfolder="unet")
133
- model.to(torch_device)
134
-
135
- sample_size = 65536
136
- noise = torch.sin(torch.arange(sample_size)[None, None, :].repeat(1, 2, 1)).to(torch_device)
137
- timestep = torch.tensor([1]).to(torch_device)
138
-
139
- with torch.no_grad():
140
- output = model(noise, timestep).sample
141
-
142
- output_sum = output.abs().sum()
143
- output_max = output.abs().max()
144
-
145
- assert (output_sum - 224.0896).abs() < 0.5
146
- assert (output_max - 0.0607).abs() < 4e-4
147
-
148
-
149
- class UNetRLModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
150
- model_class = UNet1DModel
151
- main_input_name = "sample"
152
-
153
- @property
154
- def dummy_input(self):
155
- batch_size = 4
156
- num_features = 14
157
- seq_len = 16
158
-
159
- noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device)
160
- time_step = torch.tensor([10] * batch_size).to(torch_device)
161
-
162
- return {"sample": noise, "timestep": time_step}
163
-
164
- @property
165
- def input_shape(self):
166
- return (4, 14, 16)
167
-
168
- @property
169
- def output_shape(self):
170
- return (4, 14, 1)
171
-
172
- def test_determinism(self):
173
- super().test_determinism()
174
-
175
- def test_outputs_equivalence(self):
176
- super().test_outputs_equivalence()
177
-
178
- def test_from_save_pretrained(self):
179
- super().test_from_save_pretrained()
180
-
181
- def test_from_save_pretrained_variant(self):
182
- super().test_from_save_pretrained_variant()
183
-
184
- def test_model_from_pretrained(self):
185
- super().test_model_from_pretrained()
186
-
187
- def test_output(self):
188
- # UNetRL is a value-function is different output shape
189
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
190
- model = self.model_class(**init_dict)
191
- model.to(torch_device)
192
- model.eval()
193
-
194
- with torch.no_grad():
195
- output = model(**inputs_dict)
196
-
197
- if isinstance(output, dict):
198
- output = output.sample
199
-
200
- self.assertIsNotNone(output)
201
- expected_shape = torch.Size((inputs_dict["sample"].shape[0], 1))
202
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
203
-
204
- def test_ema_training(self):
205
- pass
206
-
207
- def test_training(self):
208
- pass
209
-
210
- def prepare_init_args_and_inputs_for_common(self):
211
- init_dict = {
212
- "in_channels": 14,
213
- "out_channels": 14,
214
- "down_block_types": ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"],
215
- "up_block_types": [],
216
- "out_block_type": "ValueFunction",
217
- "mid_block_type": "ValueFunctionMidBlock1D",
218
- "block_out_channels": [32, 64, 128, 256],
219
- "layers_per_block": 1,
220
- "downsample_each_block": True,
221
- "use_timestep_embedding": True,
222
- "freq_shift": 1.0,
223
- "flip_sin_to_cos": False,
224
- "time_embedding_type": "positional",
225
- "act_fn": "mish",
226
- }
227
- inputs_dict = self.dummy_input
228
- return init_dict, inputs_dict
229
-
230
- def test_from_pretrained_hub(self):
231
- value_function, vf_loading_info = UNet1DModel.from_pretrained(
232
- "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function"
233
- )
234
- self.assertIsNotNone(value_function)
235
- self.assertEqual(len(vf_loading_info["missing_keys"]), 0)
236
-
237
- value_function.to(torch_device)
238
- image = value_function(**self.dummy_input)
239
-
240
- assert image is not None, "Make sure output is not None"
241
-
242
- def test_output_pretrained(self):
243
- value_function, vf_loading_info = UNet1DModel.from_pretrained(
244
- "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function"
245
- )
246
- torch.manual_seed(0)
247
- if torch.cuda.is_available():
248
- torch.cuda.manual_seed_all(0)
249
-
250
- num_features = value_function.config.in_channels
251
- seq_len = 14
252
- noise = torch.randn((1, seq_len, num_features)).permute(
253
- 0, 2, 1
254
- ) # match original, we can update values and remove
255
- time_step = torch.full((num_features,), 0)
256
-
257
- with torch.no_grad():
258
- output = value_function(noise, time_step).sample
259
-
260
- # fmt: off
261
- expected_output_slice = torch.tensor([165.25] * seq_len)
262
- # fmt: on
263
- self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3))
264
-
265
- def test_forward_with_norm_groups(self):
266
- # Not implemented yet for this UNet
267
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/htc/htc_r50_fpn_1x_coco.py DELETED
@@ -1,56 +0,0 @@
1
- _base_ = './htc_without_semantic_r50_fpn_1x_coco.py'
2
- model = dict(
3
- roi_head=dict(
4
- semantic_roi_extractor=dict(
5
- type='SingleRoIExtractor',
6
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
7
- out_channels=256,
8
- featmap_strides=[8]),
9
- semantic_head=dict(
10
- type='FusedSemanticHead',
11
- num_ins=5,
12
- fusion_level=1,
13
- num_convs=4,
14
- in_channels=256,
15
- conv_out_channels=256,
16
- num_classes=183,
17
- ignore_label=255,
18
- loss_weight=0.2)))
19
- data_root = 'data/coco/'
20
- img_norm_cfg = dict(
21
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
22
- train_pipeline = [
23
- dict(type='LoadImageFromFile'),
24
- dict(
25
- type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
26
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
27
- dict(type='RandomFlip', flip_ratio=0.5),
28
- dict(type='Normalize', **img_norm_cfg),
29
- dict(type='Pad', size_divisor=32),
30
- dict(type='SegRescale', scale_factor=1 / 8),
31
- dict(type='DefaultFormatBundle'),
32
- dict(
33
- type='Collect',
34
- keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
35
- ]
36
- test_pipeline = [
37
- dict(type='LoadImageFromFile'),
38
- dict(
39
- type='MultiScaleFlipAug',
40
- img_scale=(1333, 800),
41
- flip=False,
42
- transforms=[
43
- dict(type='Resize', keep_ratio=True),
44
- dict(type='RandomFlip', flip_ratio=0.5),
45
- dict(type='Normalize', **img_norm_cfg),
46
- dict(type='Pad', size_divisor=32),
47
- dict(type='ImageToTensor', keys=['img']),
48
- dict(type='Collect', keys=['img']),
49
- ])
50
- ]
51
- data = dict(
52
- train=dict(
53
- seg_prefix=data_root + 'stuffthingmaps/train2017/',
54
- pipeline=train_pipeline),
55
- val=dict(pipeline=test_pipeline),
56
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ApathyINC/CustomGPT/app.py DELETED
@@ -1,516 +0,0 @@
1
- import os, sys, json
2
- os.system("pip install gradio==3.19.1")
3
- import openai
4
- import gradio as gr
5
-
6
- from loguru import logger
7
- import paddlehub as hub
8
- import random
9
- from encoder import get_encoder
10
-
11
- openai.api_key = os.getenv("OPENAI_API_KEY")
12
-
13
- from utils import get_tmt_client, getTextTrans_tmt
14
- tmt_client = get_tmt_client()
15
-
16
- def getTextTrans(text, source='zh', target='en'):
17
- def is_chinese(string):
18
- for ch in string:
19
- if u'\u4e00' <= ch <= u'\u9fff':
20
- return True
21
- return False
22
-
23
- if not is_chinese(text) and target == 'en':
24
- return text
25
-
26
- try:
27
- text_translation = getTextTrans_tmt(tmt_client, text, source, target)
28
- return text_translation
29
- except Exception as e:
30
- return text
31
-
32
- start_work = """async() => {
33
- function isMobile() {
34
- try {
35
- document.createEvent("TouchEvent"); return true;
36
- } catch(e) {
37
- return false;
38
- }
39
- }
40
- function getClientHeight()
41
- {
42
- var clientHeight=0;
43
- if(document.body.clientHeight&&document.documentElement.clientHeight) {
44
- var clientHeight = (document.body.clientHeight<document.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight;
45
- } else {
46
- var clientHeight = (document.body.clientHeight>document.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight;
47
- }
48
- return clientHeight;
49
- }
50
-
51
- function setNativeValue(element, value) {
52
- const valueSetter = Object.getOwnPropertyDescriptor(element.__proto__, 'value').set;
53
- const prototype = Object.getPrototypeOf(element);
54
- const prototypeValueSetter = Object.getOwnPropertyDescriptor(prototype, 'value').set;
55
-
56
- if (valueSetter && valueSetter !== prototypeValueSetter) {
57
- prototypeValueSetter.call(element, value);
58
- } else {
59
- valueSetter.call(element, value);
60
- }
61
- element.dispatchEvent(new Event('input', { bubbles: true }));
62
- }
63
- function get_clear_innerHTML(innerHTML) {
64
- innerHTML = innerHTML.replace(/<p>|<\\/p>|\\n/g, '');
65
- regexp = /\\★☆(.*?)\\☆★/;
66
- match = innerHTML.match(regexp);
67
- if (match) {
68
- innerHTML = match[1];
69
- }
70
- return innerHTML;
71
- }
72
- function save_conversation(chatbot) {
73
- var conversations = new Array();
74
- var conversations_clear = new Array();
75
- for (var i = 0; i < chatbot.children.length; i++) {
76
- testid_icon = '☟:'; //'user'
77
- if (chatbot.children[i].dataset['testid'] == 'bot') {
78
- testid_icon = '☝:'; //'bot'
79
- }
80
- innerHTML = chatbot.children[i].innerHTML;
81
- conversations.push(testid_icon + innerHTML);
82
- if (innerHTML.indexOf("<img") == -1 && innerHTML.indexOf("null_") == -1) {
83
- conversations_clear.push(testid_icon + get_clear_innerHTML(innerHTML));
84
- }
85
- }
86
- var json_str = JSON.stringify(conversations);
87
- setNativeValue(window['chat_his'], JSON.stringify(conversations_clear));
88
- localStorage.setItem('chatgpt_conversations', json_str);
89
- }
90
- function img_click(img) {
91
- this_width = parseInt(img.style.width) + 20;
92
- if (this_width > 100) {
93
- this_width = 20;
94
- }
95
- img.style.width = this_width + "%";
96
- img.style.height = img.offsetWidth + 'px';
97
- }
98
- function load_conversation(chatbot) {
99
- var json_str = localStorage.getItem('chatgpt_conversations');
100
- if (json_str) {
101
- var conversations_clear = new Array();
102
- conversations = JSON.parse(json_str);
103
- for (var i = 0; i < conversations.length; i++) {
104
- innerHTML = conversations[i];
105
- if (innerHTML.indexOf("☝:") == -1) {
106
- className = "message user svelte-134zwfa";
107
- bgcolor = "#16a34a";
108
- testid = "user";
109
- testid_icon = '☟:'; //'user'
110
- } else {
111
- className = "message bot svelte-134zwfa";
112
- bgcolor = "#2563eb";
113
- testid = "bot";
114
- testid_icon = '☝:'; //'bot'
115
- }
116
- var new_div = document.createElement("div");
117
- new_div.className = className;
118
- new_div.style.backgroundColor = bgcolor;
119
- new_div.dataset.testid = testid;
120
- if (innerHTML.indexOf("data:image/jpeg") >= 0) {
121
- new_div.style.width = "20%";
122
- new_div.style.padding = "0.2rem";
123
- new_div.onclick = function(e) {
124
- img_click(this);
125
- }
126
- setTimeout(function(){
127
- new_div.style.height = new_div.offsetWidth + 'px';
128
- new_div.children[0].setAttribute('style', 'max-width: none; width:100%');
129
- }, 10);
130
- }
131
- innerHTML = innerHTML.replace("☝:", "");
132
- innerHTML = innerHTML.replace("☟:", "");
133
- new_div.innerHTML = innerHTML;
134
- if (innerHTML.indexOf("null_") != -1) {
135
- new_div.style.display = 'none';
136
- }
137
- chatbot.appendChild(new_div);
138
-
139
- if (innerHTML.indexOf("<img") == -1 && innerHTML.indexOf("null_") == -1) {
140
- conversations_clear.push(testid_icon + get_clear_innerHTML(innerHTML));
141
- }
142
- }
143
- setNativeValue(window['chat_his'], JSON.stringify(conversations_clear));
144
- setTimeout(function(){
145
- window['chat_bot1'].children[1].scrollTop = window['chat_bot1'].children[1].scrollHeight;
146
- }, 500);
147
- }
148
- }
149
- var gradioEl = document.querySelector('body > gradio-app').shadowRoot;
150
- if (!gradioEl) {
151
- gradioEl = document.querySelector('body > gradio-app');
152
- }
153
-
154
- if (typeof window['gradioEl'] === 'undefined') {
155
- window['gradioEl'] = gradioEl;
156
-
157
- const page1 = window['gradioEl'].querySelectorAll('#page_1')[0];
158
- const page2 = window['gradioEl'].querySelectorAll('#page_2')[0];
159
-
160
- page1.style.display = "none";
161
- page2.style.display = "block";
162
- window['div_count'] = 0;
163
- window['chat_radio_0'] = window['gradioEl'].querySelectorAll('#chat_radio')[0].querySelectorAll('input[name=radio-chat_radio]')[0];
164
- window['chat_radio_1'] = window['gradioEl'].querySelectorAll('#chat_radio')[0].querySelectorAll('input[name=radio-chat_radio]')[1];
165
- window['chat_bot'] = window['gradioEl'].querySelectorAll('#chat_bot')[0];
166
- window['chat_bot1'] = window['gradioEl'].querySelectorAll('#chat_bot1')[0];
167
- window['my_prompt'] = window['gradioEl'].querySelectorAll('#my_prompt')[0].querySelectorAll('textarea')[0];
168
- window['my_prompt_en'] = window['gradioEl'].querySelectorAll('#my_prompt_en')[0].querySelectorAll('textarea')[0];
169
- window['chat_his'] = window['gradioEl'].querySelectorAll('#chat_history')[0].querySelectorAll('textarea')[0];
170
- chat_row = window['gradioEl'].querySelectorAll('#chat_row')[0];
171
- prompt_row = window['gradioEl'].querySelectorAll('#prompt_row')[0];
172
- window['chat_bot1'].children[1].children[0].textContent = '';
173
-
174
- clientHeight = getClientHeight();
175
- if (isMobile()) {
176
- output_htmls = window['gradioEl'].querySelectorAll('.output-html');
177
- for (var i = 0; i < output_htmls.length; i++) {
178
- output_htmls[i].style.display = "none";
179
- }
180
- new_height = (clientHeight - 250) + 'px';
181
- } else {
182
- new_height = (clientHeight - 350) + 'px';
183
- }
184
- chat_row.style.height = new_height;
185
- window['chat_bot'].style.height = new_height;
186
- window['chat_bot'].children[1].style.height = new_height;
187
- window['chat_bot1'].style.height = new_height;
188
- window['chat_bot1'].children[1].style.height = new_height;
189
- window['chat_bot1'].children[0].style.top = (parseInt(window['chat_bot1'].style.height)-window['chat_bot1'].children[0].offsetHeight-2) + 'px';
190
- prompt_row.children[0].style.flex = 'auto';
191
- prompt_row.children[0].style.width = '100%';
192
- window['gradioEl'].querySelectorAll('#chat_radio')[0].style.flex = 'auto';
193
- window['gradioEl'].querySelectorAll('#chat_radio')[0].style.width = '100%';
194
- prompt_row.children[0].setAttribute('style','flex-direction: inherit; flex: 1 1 auto; width: 100%;border-color: green;border-width: 1px !important;')
195
- window['chat_bot1'].children[1].setAttribute('style', 'border-bottom-right-radius:0;top:unset;bottom:0;padding-left:0.1rem');
196
- window['gradioEl'].querySelectorAll('#btns_row')[0].children[0].setAttribute('style', 'min-width: min(10px, 100%); flex-grow: 1');
197
- window['gradioEl'].querySelectorAll('#btns_row')[0].children[1].setAttribute('style', 'min-width: min(10px, 100%); flex-grow: 1');
198
-
199
- load_conversation(window['chat_bot1'].children[1].children[0]);
200
- window['chat_bot1'].children[1].scrollTop = window['chat_bot1'].children[1].scrollHeight;
201
-
202
- window['gradioEl'].querySelectorAll('#clear-btn')[0].onclick = function(e){
203
- if (confirm('Clear all outputs?')==true) {
204
- for (var i = window['chat_bot'].children[1].children[0].children.length-1; i >= 0; i--) {
205
- window['chat_bot'].children[1].children[0].removeChild(window['chat_bot'].children[1].children[0].children[i]);
206
- }
207
- for (var i = window['chat_bot1'].children[1].children[0].children.length-1; i >= 0; i--) {
208
- window['chat_bot1'].children[1].children[0].removeChild(window['chat_bot1'].children[1].children[0].children[i]);
209
- }
210
- window['div_count'] = 0;
211
- save_conversation(window['chat_bot1'].children[1].children[0]);
212
- }
213
- }
214
-
215
- function set_buttons(action) {
216
- window['submit-btn'].disabled = action;
217
- window['clear-btn'].disabled = action;
218
- window['chat_radio_0'].disabled = action;
219
- window['chat_radio_1'].disabled = action;
220
- btn_color = 'color:#000';
221
- if (action) {
222
- btn_color = 'color:#ccc';
223
- }
224
- window['submit-btn'].setAttribute('style', btn_color);
225
- window['clear-btn'].setAttribute('style', btn_color);
226
- window['chat_radio_0'].setAttribute('style', btn_color);
227
- window['chat_radio_1'].setAttribute('style', btn_color);
228
- }
229
- window['prevPrompt'] = '';
230
- window['doCheckPrompt'] = 0;
231
- window['prevImgSrc'] = '';
232
- window['checkChange'] = function checkChange() {
233
- try {
234
- if (window['chat_radio_0'].checked) {
235
- dot_flashing = window['chat_bot'].children[1].children[0].querySelectorAll('.dot-flashing');
236
- if (window['chat_bot'].children[1].children[0].children.length > window['div_count'] && dot_flashing.length == 0) {
237
- new_len = window['chat_bot'].children[1].children[0].children.length - window['div_count'];
238
- for (var i = 0; i < new_len; i++) {
239
- new_div = window['chat_bot'].children[1].children[0].children[window['div_count'] + i].cloneNode(true);
240
- window['chat_bot1'].children[1].children[0].appendChild(new_div);
241
- }
242
- window['div_count'] = window['chat_bot'].children[1].children[0].children.length;
243
- window['chat_bot1'].children[1].scrollTop = window['chat_bot1'].children[1].scrollHeight;
244
- save_conversation(window['chat_bot1'].children[1].children[0]);
245
- }
246
- if (window['chat_bot'].children[0].children.length > 1) {
247
- set_buttons(true);
248
- window['chat_bot1'].children[0].textContent = window['chat_bot'].children[0].children[1].textContent;
249
- } else {
250
- set_buttons(false);
251
- window['chat_bot1'].children[0].textContent = '';
252
- }
253
- } else {
254
- img_index = 0;
255
- draw_prompt_en = window['my_prompt_en'].value;
256
- if (window['doCheckPrompt'] == 0 && window['prevPrompt'] != draw_prompt_en) {
257
- console.log('_____draw_prompt_en___[' + draw_prompt_en + ']_');
258
- window['doCheckPrompt'] = 1;
259
- window['prevPrompt'] = draw_prompt_en;
260
-
261
- tabitems = window['gradioEl'].querySelectorAll('.tabitem');
262
- for (var i = 0; i < tabitems.length; i++) {
263
- inputText = tabitems[i].children[0].children[1].children[0].querySelectorAll('input')[0];
264
- setNativeValue(inputText, draw_prompt_en);
265
- }
266
- setTimeout(function() {
267
- window['draw_prompt'] = window['my_prompt'].value;
268
- btns = window['gradioEl'].querySelectorAll('button');
269
- for (var i = 0; i < btns.length; i++) {
270
- if (['Generate image','Run'].includes(btns[i].innerText)) {
271
- btns[i].click();
272
- }
273
- }
274
- window['doCheckPrompt'] = 0;
275
- }, 10);
276
- }
277
- tabitems = window['gradioEl'].querySelectorAll('.tabitem');
278
- imgs = tabitems[img_index].children[0].children[1].children[1].querySelectorAll("img");
279
- if (imgs.length > 0) {
280
- if (window['prevImgSrc'] !== imgs[0].src) {
281
- var user_div = document.createElement("div");
282
- user_div.className = "message user svelte-134zwfa";
283
- user_div.style.backgroundColor = "#16a34a";
284
- user_div.dataset.testid = 'user';
285
- user_div.innerHTML = "<p>作画: " + window['draw_prompt'] + "</p><img></img>";
286
- window['chat_bot1'].children[1].children[0].appendChild(user_div);
287
- var bot_div = document.createElement("div");
288
- bot_div.className = "message bot svelte-134zwfa";
289
- bot_div.style.backgroundColor = "#2563eb";
290
- bot_div.style.width = "20%";
291
- bot_div.dataset.testid = 'bot';
292
- bot_div.onclick = function(e){
293
- img_click(this);
294
- }
295
- setTimeout(function(){
296
- bot_div.style.height = bot_div.offsetWidth + 'px';
297
- bot_div.children[0].setAttribute('style', 'max-width:none; width:100%');
298
- }, 10);
299
- bot_div.style.padding = "0.2rem";
300
- bot_div.appendChild(imgs[0].cloneNode(true));
301
- window['chat_bot1'].children[1].children[0].appendChild(bot_div);
302
-
303
- window['chat_bot1'].children[1].scrollTop = window['chat_bot1'].children[1].scrollHeight;
304
- window['prevImgSrc'] = imgs[0].src;
305
- save_conversation(window['chat_bot1'].children[1].children[0]);
306
- }
307
- }
308
- if (tabitems[img_index].children[0].children[1].children[1].children[0].children.length > 1) {
309
- tips = tabitems[img_index].children[0].children[1].children[1].children[0].textContent;
310
- if (tips.indexOf("Error") == -1) {
311
- set_buttons(true);
312
- } else {
313
- set_buttons(false);
314
- }
315
- window['chat_bot1'].children[0].textContent = '作画中 ' + tips;
316
- } else {
317
- set_buttons(false);
318
- window['chat_bot1'].children[0].textContent = '';
319
- }
320
- }
321
-
322
- } catch(e) {
323
- }
324
- }
325
- window['checkChange_interval'] = window.setInterval("window.checkChange()", 500);
326
- }
327
-
328
- return false;
329
- }"""
330
-
331
- space_ids = {
332
- "spaces/stabilityai/stable-diffusion":"Stable Diffusion 2.1",
333
- # "spaces/runwayml/stable-diffusion-v1-5":"Stable Diffusion 1.5",
334
- # "spaces/stabilityai/stable-diffusion-1":"Stable Diffusion 1.0",
335
- }
336
-
337
- tab_actions = []
338
- tab_titles = []
339
-
340
- for space_id in space_ids.keys():
341
- print(space_id, space_ids[space_id])
342
- try:
343
- tab = gr.Interface.load(space_id)
344
- tab_actions.append(tab)
345
- tab_titles.append(space_ids[space_id])
346
- except Exception as e:
347
- logger.info(f"load_fail__{space_id}_{e}")
348
-
349
- token_encoder = get_encoder()
350
- total_tokens = 4096
351
- max_output_tokens = 1024
352
- max_input_tokens = total_tokens - max_output_tokens
353
-
354
- def set_openai_api_key(api_key):
355
- if api_key and api_key.startswith("sk-") and len(api_key) > 50:
356
- openai.api_key = api_key
357
-
358
- def get_response_from_openai(input, chat_history, model_radio):
359
- error_1 = 'You exceeded your current quota, please check your plan and billing details.'
360
- def openai_create(input_list, model_radio):
361
- try:
362
- # print(f'input_list={input_list}')
363
- input_list_len = len(input_list)
364
- out_prompt = ''
365
- messages = []
366
- if model_radio == 'GPT-3.0':
367
- out_prompt = 'AI:'
368
- for i in range(input_list_len):
369
- input = input_list[input_list_len-i-1].replace("<br>", '\n\n')
370
- if input.startswith("Openai said:"):
371
- input = "☝:"
372
-
373
- if input.startswith("☝:"):
374
- if model_radio == 'GPT-3.0':
375
- out_prompt = input.replace("☝:", "AI:") + '\n' + out_prompt
376
- else:
377
- out_prompt = input.replace("☝:", "") + out_prompt
378
- messages.insert(0, {"role": "assistant", "content": input.replace("☝:", "")})
379
- elif input.startswith("☟:"):
380
- if model_radio == 'GPT-3.0':
381
- out_prompt = input.replace("☟:", "Human:") + '\n' + out_prompt
382
- else:
383
- out_prompt = input.replace("☟:", "") + out_prompt
384
- messages.insert(0, {"role": "user", "content": input.replace("☟:", "")})
385
- tokens = token_encoder.encode(out_prompt)
386
- if len(tokens) > max_input_tokens:
387
- break
388
-
389
- if model_radio == 'GPT-3.0':
390
- # print(out_prompt)
391
- response = openai.Completion.create(
392
- model="text-davinci-003",
393
- prompt=out_prompt,
394
- temperature=0.7,
395
- max_tokens=max_output_tokens,
396
- top_p=1,
397
- frequency_penalty=0,
398
- presence_penalty=0,
399
- stop=[" Human:", " AI:"]
400
- )
401
- # print(f'response_3.0__:{response}')
402
- ret = response.choices[0].text
403
- else:
404
- # print(messages)
405
- response = openai.ChatCompletion.create(
406
- model="gpt-3.5-turbo",
407
- messages=messages,
408
- temperature=0.7,
409
- max_tokens=max_output_tokens,
410
- top_p=1,
411
- frequency_penalty=0,
412
- presence_penalty=0,
413
- stop=[" Human:", " AI:"]
414
- )
415
- # print(f'response_3.5__:{response}')
416
- ret = response.choices[0].message['content']
417
- if ret.startswith("\n\n"):
418
- ret = ret.replace("\n\n", '')
419
- ret = ret.replace('\n', '<br>')
420
- if ret == '':
421
- ret = f"Openai said: I'm too tired."
422
- return ret, response.usage
423
- except Exception as e:
424
- logger.info(f"openai_create_error__{e}")
425
- ret = f"Openai said: {e} Perhaps enter your OpenAI API key."
426
- return ret, {"completion_tokens": -1, "prompt_tokens": -1, "total_tokens": -1}
427
-
428
- # logger.info(f'chat_history = {chat_history}')
429
- chat_history_list = []
430
- chat_history = chat_history.replace("<p>", "").replace("</p>", "")
431
- if chat_history != '':
432
- chat_history_list = json.loads(chat_history)
433
- chat_history_list.append(f'☟:{input}')
434
-
435
- output, response_usage = openai_create(chat_history_list, model_radio)
436
- logger.info(f'response_usage={response_usage}')
437
- return output
438
-
439
- def chat(input0, input1, chat_radio, model_radio, all_chat_history, chat_history):
440
- all_chat = []
441
- if all_chat_history != '':
442
- all_chat = json.loads(all_chat_history)
443
-
444
- if len(input0) == 0:
445
- return all_chat, json.dumps(all_chat), input0, input1
446
-
447
- if chat_radio == "Talk to chatGPT":
448
- response = get_response_from_openai(input0, chat_history, model_radio)
449
- all_chat.append((input0, response))
450
- return all_chat, json.dumps(all_chat), '', input1
451
- else:
452
- prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
453
- return all_chat, json.dumps(all_chat), input0, prompt_en
454
-
455
- def chat_radio_change(chat_radio):
456
- if chat_radio == "Talk to chatGPT":
457
- return gr.Radio.update(visible=True), gr.Text.update(visible=True)
458
- else:
459
- return gr.Radio.update(visible=False), gr.Text.update(visible=False)
460
-
461
- with gr.Blocks(title='Talk to chatGPT') as demo:
462
- with gr.Row(elem_id="page_0", visible=False) as page_0:
463
- gr.HTML("<p>You can duplicating this space and use your own session token: <a style='display:inline-block' href='https://huggingface.co/spaces/yizhangliu/chatGPT?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
464
- with gr.Group(elem_id="page_1", visible=True) as page_1:
465
- with gr.Box():
466
- with gr.Row():
467
- start_button = gr.Button("CustomGPT modded by MBHudson :: Click to Continue! ::", elem_id="start-btn", visible=True)
468
- start_button.click(fn=None, inputs=[], outputs=[], _js=start_work)
469
-
470
- with gr.Row(elem_id="page_2", visible=False) as page_2:
471
- with gr.Row(elem_id="chat_row"):
472
- chatbot = gr.Chatbot(elem_id="chat_bot", visible=False).style(color_map=("green", "blue"))
473
- chatbot1 = gr.Chatbot(elem_id="chat_bot1").style(color_map=("green", "blue"))
474
- with gr.Row(elem_id="prompt_row"):
475
- prompt_input0 = gr.Textbox(lines=2, label="input", elem_id="my_prompt", show_label=True)
476
- prompt_input1 = gr.Textbox(lines=4, label="prompt", elem_id="my_prompt_en", visible=False)
477
- chat_history = gr.Textbox(lines=4, label="chat_history", elem_id="chat_history", visible=False)
478
- all_chat_history = gr.Textbox(lines=4, label="会话上下文:", elem_id="all_chat_history", visible=False)
479
-
480
- chat_radio = gr.Radio(["Talk to chatGPT", "Text to Image"], elem_id="chat_radio",value="Talk to chatGPT", show_label=False, visible=True)
481
- model_radio = gr.Radio(["GPT-3.0", "GPT-3.5"], elem_id="model_radio", value="GPT-3.5",
482
- label='GPT model: ', show_label=True,interactive=True, visible=True)
483
- openai_api_key_textbox = gr.Textbox(placeholder="Paste your OpenAI API key (sk-...) and hit Enter",
484
- show_label=False, lines=1, type='password')
485
- with gr.Row(elem_id="btns_row"):
486
- with gr.Column(id="submit_col"):
487
- submit_btn = gr.Button(value = "submit",elem_id="submit-btn").style(
488
- margin=True,
489
- rounded=(True, True, True, True),
490
- width=100
491
- )
492
- with gr.Column(id="clear_col"):
493
- clear_btn = gr.Button(value = "clear outputs", elem_id="clear-btn").style(
494
- margin=True,
495
- rounded=(True, True, True, True),
496
- width=100
497
- )
498
- submit_btn.click(fn=chat,
499
- inputs=[prompt_input0, prompt_input1, chat_radio, model_radio, all_chat_history, chat_history],
500
- outputs=[chatbot, all_chat_history, prompt_input0, prompt_input1],
501
- )
502
- with gr.Row(elem_id='tab_img', visible=False).style(height=5):
503
- tab_img = gr.TabbedInterface(tab_actions, tab_titles)
504
-
505
- openai_api_key_textbox.change(set_openai_api_key,
506
- inputs=[openai_api_key_textbox],
507
- outputs=[])
508
- openai_api_key_textbox.submit(set_openai_api_key,
509
- inputs=[openai_api_key_textbox],
510
- outputs=[])
511
- chat_radio.change(fn=chat_radio_change,
512
- inputs=[chat_radio],
513
- outputs=[model_radio, openai_api_key_textbox],
514
- )
515
-
516
- demo.launch(debug = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/base.py DELETED
@@ -1,141 +0,0 @@
1
- from typing import FrozenSet, Iterable, Optional, Tuple, Union
2
-
3
- from pip._vendor.packaging.specifiers import SpecifierSet
4
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
5
- from pip._vendor.packaging.version import LegacyVersion, Version
6
-
7
- from pip._internal.models.link import Link, links_equivalent
8
- from pip._internal.req.req_install import InstallRequirement
9
- from pip._internal.utils.hashes import Hashes
10
-
11
- CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]]
12
- CandidateVersion = Union[LegacyVersion, Version]
13
-
14
-
15
- def format_name(project: str, extras: FrozenSet[str]) -> str:
16
- if not extras:
17
- return project
18
- canonical_extras = sorted(canonicalize_name(e) for e in extras)
19
- return "{}[{}]".format(project, ",".join(canonical_extras))
20
-
21
-
22
- class Constraint:
23
- def __init__(
24
- self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link]
25
- ) -> None:
26
- self.specifier = specifier
27
- self.hashes = hashes
28
- self.links = links
29
-
30
- @classmethod
31
- def empty(cls) -> "Constraint":
32
- return Constraint(SpecifierSet(), Hashes(), frozenset())
33
-
34
- @classmethod
35
- def from_ireq(cls, ireq: InstallRequirement) -> "Constraint":
36
- links = frozenset([ireq.link]) if ireq.link else frozenset()
37
- return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links)
38
-
39
- def __bool__(self) -> bool:
40
- return bool(self.specifier) or bool(self.hashes) or bool(self.links)
41
-
42
- def __and__(self, other: InstallRequirement) -> "Constraint":
43
- if not isinstance(other, InstallRequirement):
44
- return NotImplemented
45
- specifier = self.specifier & other.specifier
46
- hashes = self.hashes & other.hashes(trust_internet=False)
47
- links = self.links
48
- if other.link:
49
- links = links.union([other.link])
50
- return Constraint(specifier, hashes, links)
51
-
52
- def is_satisfied_by(self, candidate: "Candidate") -> bool:
53
- # Reject if there are any mismatched URL constraints on this package.
54
- if self.links and not all(_match_link(link, candidate) for link in self.links):
55
- return False
56
- # We can safely always allow prereleases here since PackageFinder
57
- # already implements the prerelease logic, and would have filtered out
58
- # prerelease candidates if the user does not expect them.
59
- return self.specifier.contains(candidate.version, prereleases=True)
60
-
61
-
62
- class Requirement:
63
- @property
64
- def project_name(self) -> NormalizedName:
65
- """The "project name" of a requirement.
66
-
67
- This is different from ``name`` if this requirement contains extras,
68
- in which case ``name`` would contain the ``[...]`` part, while this
69
- refers to the name of the project.
70
- """
71
- raise NotImplementedError("Subclass should override")
72
-
73
- @property
74
- def name(self) -> str:
75
- """The name identifying this requirement in the resolver.
76
-
77
- This is different from ``project_name`` if this requirement contains
78
- extras, where ``project_name`` would not contain the ``[...]`` part.
79
- """
80
- raise NotImplementedError("Subclass should override")
81
-
82
- def is_satisfied_by(self, candidate: "Candidate") -> bool:
83
- return False
84
-
85
- def get_candidate_lookup(self) -> CandidateLookup:
86
- raise NotImplementedError("Subclass should override")
87
-
88
- def format_for_error(self) -> str:
89
- raise NotImplementedError("Subclass should override")
90
-
91
-
92
- def _match_link(link: Link, candidate: "Candidate") -> bool:
93
- if candidate.source_link:
94
- return links_equivalent(link, candidate.source_link)
95
- return False
96
-
97
-
98
- class Candidate:
99
- @property
100
- def project_name(self) -> NormalizedName:
101
- """The "project name" of the candidate.
102
-
103
- This is different from ``name`` if this candidate contains extras,
104
- in which case ``name`` would contain the ``[...]`` part, while this
105
- refers to the name of the project.
106
- """
107
- raise NotImplementedError("Override in subclass")
108
-
109
- @property
110
- def name(self) -> str:
111
- """The name identifying this candidate in the resolver.
112
-
113
- This is different from ``project_name`` if this candidate contains
114
- extras, where ``project_name`` would not contain the ``[...]`` part.
115
- """
116
- raise NotImplementedError("Override in subclass")
117
-
118
- @property
119
- def version(self) -> CandidateVersion:
120
- raise NotImplementedError("Override in subclass")
121
-
122
- @property
123
- def is_installed(self) -> bool:
124
- raise NotImplementedError("Override in subclass")
125
-
126
- @property
127
- def is_editable(self) -> bool:
128
- raise NotImplementedError("Override in subclass")
129
-
130
- @property
131
- def source_link(self) -> Optional[Link]:
132
- raise NotImplementedError("Override in subclass")
133
-
134
- def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
135
- raise NotImplementedError("Override in subclass")
136
-
137
- def get_install_requirement(self) -> Optional[InstallRequirement]:
138
- raise NotImplementedError("Override in subclass")
139
-
140
- def format_for_error(self) -> str:
141
- raise NotImplementedError("Subclass should override")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/vcs/bazaar.py DELETED
@@ -1,112 +0,0 @@
1
- import logging
2
- from typing import List, Optional, Tuple
3
-
4
- from pip._internal.utils.misc import HiddenText, display_path
5
- from pip._internal.utils.subprocess import make_command
6
- from pip._internal.utils.urls import path_to_url
7
- from pip._internal.vcs.versioncontrol import (
8
- AuthInfo,
9
- RemoteNotFoundError,
10
- RevOptions,
11
- VersionControl,
12
- vcs,
13
- )
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
-
18
- class Bazaar(VersionControl):
19
- name = "bzr"
20
- dirname = ".bzr"
21
- repo_name = "branch"
22
- schemes = (
23
- "bzr+http",
24
- "bzr+https",
25
- "bzr+ssh",
26
- "bzr+sftp",
27
- "bzr+ftp",
28
- "bzr+lp",
29
- "bzr+file",
30
- )
31
-
32
- @staticmethod
33
- def get_base_rev_args(rev: str) -> List[str]:
34
- return ["-r", rev]
35
-
36
- def fetch_new(
37
- self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
38
- ) -> None:
39
- rev_display = rev_options.to_display()
40
- logger.info(
41
- "Checking out %s%s to %s",
42
- url,
43
- rev_display,
44
- display_path(dest),
45
- )
46
- if verbosity <= 0:
47
- flag = "--quiet"
48
- elif verbosity == 1:
49
- flag = ""
50
- else:
51
- flag = f"-{'v'*verbosity}"
52
- cmd_args = make_command(
53
- "checkout", "--lightweight", flag, rev_options.to_args(), url, dest
54
- )
55
- self.run_command(cmd_args)
56
-
57
- def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
58
- self.run_command(make_command("switch", url), cwd=dest)
59
-
60
- def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
61
- output = self.run_command(
62
- make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
63
- )
64
- if output.startswith("Standalone "):
65
- # Older versions of pip used to create standalone branches.
66
- # Convert the standalone branch to a checkout by calling "bzr bind".
67
- cmd_args = make_command("bind", "-q", url)
68
- self.run_command(cmd_args, cwd=dest)
69
-
70
- cmd_args = make_command("update", "-q", rev_options.to_args())
71
- self.run_command(cmd_args, cwd=dest)
72
-
73
- @classmethod
74
- def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
75
- # hotfix the URL scheme after removing bzr+ from bzr+ssh:// re-add it
76
- url, rev, user_pass = super().get_url_rev_and_auth(url)
77
- if url.startswith("ssh://"):
78
- url = "bzr+" + url
79
- return url, rev, user_pass
80
-
81
- @classmethod
82
- def get_remote_url(cls, location: str) -> str:
83
- urls = cls.run_command(
84
- ["info"], show_stdout=False, stdout_only=True, cwd=location
85
- )
86
- for line in urls.splitlines():
87
- line = line.strip()
88
- for x in ("checkout of branch: ", "parent branch: "):
89
- if line.startswith(x):
90
- repo = line.split(x)[1]
91
- if cls._is_local_repository(repo):
92
- return path_to_url(repo)
93
- return repo
94
- raise RemoteNotFoundError
95
-
96
- @classmethod
97
- def get_revision(cls, location: str) -> str:
98
- revision = cls.run_command(
99
- ["revno"],
100
- show_stdout=False,
101
- stdout_only=True,
102
- cwd=location,
103
- )
104
- return revision.splitlines()[-1]
105
-
106
- @classmethod
107
- def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
108
- """Always assume the versions don't match"""
109
- return False
110
-
111
-
112
- vcs.register(Bazaar)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/ansi_test.py DELETED
@@ -1,76 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- import sys
3
- from unittest import TestCase, main
4
-
5
- from ..ansi import Back, Fore, Style
6
- from ..ansitowin32 import AnsiToWin32
7
-
8
- stdout_orig = sys.stdout
9
- stderr_orig = sys.stderr
10
-
11
-
12
- class AnsiTest(TestCase):
13
-
14
- def setUp(self):
15
- # sanity check: stdout should be a file or StringIO object.
16
- # It will only be AnsiToWin32 if init() has previously wrapped it
17
- self.assertNotEqual(type(sys.stdout), AnsiToWin32)
18
- self.assertNotEqual(type(sys.stderr), AnsiToWin32)
19
-
20
- def tearDown(self):
21
- sys.stdout = stdout_orig
22
- sys.stderr = stderr_orig
23
-
24
-
25
- def testForeAttributes(self):
26
- self.assertEqual(Fore.BLACK, '\033[30m')
27
- self.assertEqual(Fore.RED, '\033[31m')
28
- self.assertEqual(Fore.GREEN, '\033[32m')
29
- self.assertEqual(Fore.YELLOW, '\033[33m')
30
- self.assertEqual(Fore.BLUE, '\033[34m')
31
- self.assertEqual(Fore.MAGENTA, '\033[35m')
32
- self.assertEqual(Fore.CYAN, '\033[36m')
33
- self.assertEqual(Fore.WHITE, '\033[37m')
34
- self.assertEqual(Fore.RESET, '\033[39m')
35
-
36
- # Check the light, extended versions.
37
- self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m')
38
- self.assertEqual(Fore.LIGHTRED_EX, '\033[91m')
39
- self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m')
40
- self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m')
41
- self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m')
42
- self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m')
43
- self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m')
44
- self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m')
45
-
46
-
47
- def testBackAttributes(self):
48
- self.assertEqual(Back.BLACK, '\033[40m')
49
- self.assertEqual(Back.RED, '\033[41m')
50
- self.assertEqual(Back.GREEN, '\033[42m')
51
- self.assertEqual(Back.YELLOW, '\033[43m')
52
- self.assertEqual(Back.BLUE, '\033[44m')
53
- self.assertEqual(Back.MAGENTA, '\033[45m')
54
- self.assertEqual(Back.CYAN, '\033[46m')
55
- self.assertEqual(Back.WHITE, '\033[47m')
56
- self.assertEqual(Back.RESET, '\033[49m')
57
-
58
- # Check the light, extended versions.
59
- self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m')
60
- self.assertEqual(Back.LIGHTRED_EX, '\033[101m')
61
- self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m')
62
- self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m')
63
- self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m')
64
- self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m')
65
- self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m')
66
- self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m')
67
-
68
-
69
- def testStyleAttributes(self):
70
- self.assertEqual(Style.DIM, '\033[2m')
71
- self.assertEqual(Style.NORMAL, '\033[22m')
72
- self.assertEqual(Style.BRIGHT, '\033[1m')
73
-
74
-
75
- if __name__ == '__main__':
76
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/batch_norm.py DELETED
@@ -1,276 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import torch
3
- import torch.distributed as dist
4
- from fvcore.nn.distributed import differentiable_all_reduce
5
- from torch import nn
6
- from torch.nn import functional as F
7
-
8
- from detectron2.utils import comm, env
9
-
10
- from .wrappers import BatchNorm2d
11
-
12
-
13
- class FrozenBatchNorm2d(nn.Module):
14
- """
15
- BatchNorm2d where the batch statistics and the affine parameters are fixed.
16
-
17
- It contains non-trainable buffers called
18
- "weight" and "bias", "running_mean", "running_var",
19
- initialized to perform identity transformation.
20
-
21
- The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
22
- which are computed from the original four parameters of BN.
23
- The affine transform `x * weight + bias` will perform the equivalent
24
- computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
25
- When loading a backbone model from Caffe2, "running_mean" and "running_var"
26
- will be left unchanged as identity transformation.
27
-
28
- Other pre-trained backbone models may contain all 4 parameters.
29
-
30
- The forward is implemented by `F.batch_norm(..., training=False)`.
31
- """
32
-
33
- _version = 3
34
-
35
- def __init__(self, num_features, eps=1e-5):
36
- super().__init__()
37
- self.num_features = num_features
38
- self.eps = eps
39
- self.register_buffer("weight", torch.ones(num_features))
40
- self.register_buffer("bias", torch.zeros(num_features))
41
- self.register_buffer("running_mean", torch.zeros(num_features))
42
- self.register_buffer("running_var", torch.ones(num_features) - eps)
43
-
44
- def forward(self, x):
45
- if x.requires_grad:
46
- # When gradients are needed, F.batch_norm will use extra memory
47
- # because its backward op computes gradients for weight/bias as well.
48
- scale = self.weight * (self.running_var + self.eps).rsqrt()
49
- bias = self.bias - self.running_mean * scale
50
- scale = scale.reshape(1, -1, 1, 1)
51
- bias = bias.reshape(1, -1, 1, 1)
52
- out_dtype = x.dtype # may be half
53
- return x * scale.to(out_dtype) + bias.to(out_dtype)
54
- else:
55
- # When gradients are not needed, F.batch_norm is a single fused op
56
- # and provide more optimization opportunities.
57
- return F.batch_norm(
58
- x,
59
- self.running_mean,
60
- self.running_var,
61
- self.weight,
62
- self.bias,
63
- training=False,
64
- eps=self.eps,
65
- )
66
-
67
- def _load_from_state_dict(
68
- self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
69
- ):
70
- version = local_metadata.get("version", None)
71
-
72
- if version is None or version < 2:
73
- # No running_mean/var in early versions
74
- # This will silent the warnings
75
- if prefix + "running_mean" not in state_dict:
76
- state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
77
- if prefix + "running_var" not in state_dict:
78
- state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
79
-
80
- super()._load_from_state_dict(
81
- state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
82
- )
83
-
84
- def __repr__(self):
85
- return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
86
-
87
- @classmethod
88
- def convert_frozen_batchnorm(cls, module):
89
- """
90
- Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
91
-
92
- Args:
93
- module (torch.nn.Module):
94
-
95
- Returns:
96
- If module is BatchNorm/SyncBatchNorm, returns a new module.
97
- Otherwise, in-place convert module and return it.
98
-
99
- Similar to convert_sync_batchnorm in
100
- https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
101
- """
102
- bn_module = nn.modules.batchnorm
103
- bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
104
- res = module
105
- if isinstance(module, bn_module):
106
- res = cls(module.num_features)
107
- if module.affine:
108
- res.weight.data = module.weight.data.clone().detach()
109
- res.bias.data = module.bias.data.clone().detach()
110
- res.running_mean.data = module.running_mean.data
111
- res.running_var.data = module.running_var.data
112
- res.eps = module.eps
113
- else:
114
- for name, child in module.named_children():
115
- new_child = cls.convert_frozen_batchnorm(child)
116
- if new_child is not child:
117
- res.add_module(name, new_child)
118
- return res
119
-
120
-
121
- def get_norm(norm, out_channels):
122
- """
123
- Args:
124
- norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
125
- or a callable that takes a channel number and returns
126
- the normalization layer as a nn.Module.
127
-
128
- Returns:
129
- nn.Module or None: the normalization layer
130
- """
131
- if norm is None:
132
- return None
133
- if isinstance(norm, str):
134
- if len(norm) == 0:
135
- return None
136
- norm = {
137
- "BN": BatchNorm2d,
138
- # Fixed in https://github.com/pytorch/pytorch/pull/36382
139
- "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
140
- "FrozenBN": FrozenBatchNorm2d,
141
- "GN": lambda channels: nn.GroupNorm(32, channels),
142
- # for debugging:
143
- "nnSyncBN": nn.SyncBatchNorm,
144
- "naiveSyncBN": NaiveSyncBatchNorm,
145
- # expose stats_mode N as an option to caller, required for zero-len inputs
146
- "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"),
147
- }[norm]
148
- return norm(out_channels)
149
-
150
-
151
- class NaiveSyncBatchNorm(BatchNorm2d):
152
- """
153
- In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
154
- when the batch size on each worker is different.
155
- (e.g., when scale augmentation is used, or when it is applied to mask head).
156
-
157
- This is a slower but correct alternative to `nn.SyncBatchNorm`.
158
-
159
- Note:
160
- There isn't a single definition of Sync BatchNorm.
161
-
162
- When ``stats_mode==""``, this module computes overall statistics by using
163
- statistics of each worker with equal weight. The result is true statistics
164
- of all samples (as if they are all on one worker) only when all workers
165
- have the same (N, H, W). This mode does not support inputs with zero batch size.
166
-
167
- When ``stats_mode=="N"``, this module computes overall statistics by weighting
168
- the statistics of each worker by their ``N``. The result is true statistics
169
- of all samples (as if they are all on one worker) only when all workers
170
- have the same (H, W). It is slower than ``stats_mode==""``.
171
-
172
- Even though the result of this module may not be the true statistics of all samples,
173
- it may still be reasonable because it might be preferrable to assign equal weights
174
- to all workers, regardless of their (H, W) dimension, instead of putting larger weight
175
- on larger images. From preliminary experiments, little difference is found between such
176
- a simplified implementation and an accurate computation of overall mean & variance.
177
- """
178
-
179
- def __init__(self, *args, stats_mode="", **kwargs):
180
- super().__init__(*args, **kwargs)
181
- assert stats_mode in ["", "N"]
182
- self._stats_mode = stats_mode
183
-
184
- def forward(self, input):
185
- if comm.get_world_size() == 1 or not self.training:
186
- return super().forward(input)
187
-
188
- B, C = input.shape[0], input.shape[1]
189
-
190
- half_input = input.dtype == torch.float16
191
- if half_input:
192
- # fp16 does not have good enough numerics for the reduction here
193
- input = input.float()
194
- mean = torch.mean(input, dim=[0, 2, 3])
195
- meansqr = torch.mean(input * input, dim=[0, 2, 3])
196
-
197
- if self._stats_mode == "":
198
- assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
199
- vec = torch.cat([mean, meansqr], dim=0)
200
- vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())
201
- mean, meansqr = torch.split(vec, C)
202
- momentum = self.momentum
203
- else:
204
- if B == 0:
205
- vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
206
- vec = vec + input.sum() # make sure there is gradient w.r.t input
207
- else:
208
- vec = torch.cat(
209
- [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
210
- )
211
- vec = differentiable_all_reduce(vec * B)
212
-
213
- total_batch = vec[-1].detach()
214
- momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
215
- mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero
216
-
217
- var = meansqr - mean * mean
218
- invstd = torch.rsqrt(var + self.eps)
219
- scale = self.weight * invstd
220
- bias = self.bias - mean * scale
221
- scale = scale.reshape(1, -1, 1, 1)
222
- bias = bias.reshape(1, -1, 1, 1)
223
-
224
- self.running_mean += momentum * (mean.detach() - self.running_mean)
225
- self.running_var += momentum * (var.detach() - self.running_var)
226
- ret = input * scale + bias
227
- if half_input:
228
- ret = ret.half()
229
- return ret
230
-
231
-
232
- class CycleBatchNormList(nn.ModuleList):
233
- """
234
- Implement domain-specific BatchNorm by cycling.
235
-
236
- When a BatchNorm layer is used for multiple input domains or input
237
- features, it might need to maintain a separate test-time statistics
238
- for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`.
239
-
240
- This module implements it by using N separate BN layers
241
- and it cycles through them every time a forward() is called.
242
-
243
- NOTE: The caller of this module MUST guarantee to always call
244
- this module by multiple of N times. Otherwise its test-time statistics
245
- will be incorrect.
246
- """
247
-
248
- def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):
249
- """
250
- Args:
251
- length: number of BatchNorm layers to cycle.
252
- bn_class: the BatchNorm class to use
253
- kwargs: arguments of the BatchNorm class, such as num_features.
254
- """
255
- self._affine = kwargs.pop("affine", True)
256
- super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])
257
- if self._affine:
258
- # shared affine, domain-specific BN
259
- channels = self[0].num_features
260
- self.weight = nn.Parameter(torch.ones(channels))
261
- self.bias = nn.Parameter(torch.zeros(channels))
262
- self._pos = 0
263
-
264
- def forward(self, x):
265
- ret = self[self._pos](x)
266
- self._pos = (self._pos + 1) % len(self)
267
-
268
- if self._affine:
269
- w = self.weight.reshape(1, -1, 1, 1)
270
- b = self.bias.reshape(1, -1, 1, 1)
271
- return ret * w + b
272
- else:
273
- return ret
274
-
275
- def extra_repr(self):
276
- return f"affine={self._affine}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/components/ui/dialog.tsx DELETED
@@ -1,122 +0,0 @@
1
- "use client"
2
-
3
- import * as React from "react"
4
- import * as DialogPrimitive from "@radix-ui/react-dialog"
5
- import { X } from "lucide-react"
6
-
7
- import { cn } from "@/lib/utils"
8
-
9
- const Dialog = DialogPrimitive.Root
10
-
11
- const DialogTrigger = DialogPrimitive.Trigger
12
-
13
- const DialogPortal = ({
14
- ...props
15
- }: DialogPrimitive.DialogPortalProps) => (
16
- <DialogPrimitive.Portal {...props} />
17
- )
18
- DialogPortal.displayName = DialogPrimitive.Portal.displayName
19
-
20
- const DialogOverlay = React.forwardRef<
21
- React.ElementRef<typeof DialogPrimitive.Overlay>,
22
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Overlay>
23
- >(({ className, ...props }, ref) => (
24
- <DialogPrimitive.Overlay
25
- ref={ref}
26
- className={cn(
27
- "fixed inset-0 z-50 bg-white/80 backdrop-blur-sm data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 dark:bg-stone-950/80",
28
- className
29
- )}
30
- {...props}
31
- />
32
- ))
33
- DialogOverlay.displayName = DialogPrimitive.Overlay.displayName
34
-
35
- const DialogContent = React.forwardRef<
36
- React.ElementRef<typeof DialogPrimitive.Content>,
37
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Content>
38
- >(({ className, children, ...props }, ref) => (
39
- <DialogPortal>
40
- <DialogOverlay />
41
- <DialogPrimitive.Content
42
- ref={ref}
43
- className={cn(
44
- "fixed left-[50%] top-[50%] z-50 grid w-full max-w-lg translate-x-[-50%] translate-y-[-50%] gap-4 border border-stone-200 bg-white p-6 shadow-lg duration-200 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[state=closed]:slide-out-to-left-1/2 data-[state=closed]:slide-out-to-top-[48%] data-[state=open]:slide-in-from-left-1/2 data-[state=open]:slide-in-from-top-[48%] sm:rounded-lg md:w-full dark:border-stone-800 dark:bg-stone-950",
45
- className
46
- )}
47
- {...props}
48
- >
49
- {children}
50
- <DialogPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-white transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-stone-400 focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-stone-100 data-[state=open]:text-stone-500 dark:ring-offset-stone-950 dark:focus:ring-stone-800 dark:data-[state=open]:bg-stone-800 dark:data-[state=open]:text-stone-400">
51
- <X className="h-4 w-4" />
52
- <span className="sr-only">Close</span>
53
- </DialogPrimitive.Close>
54
- </DialogPrimitive.Content>
55
- </DialogPortal>
56
- ))
57
- DialogContent.displayName = DialogPrimitive.Content.displayName
58
-
59
- const DialogHeader = ({
60
- className,
61
- ...props
62
- }: React.HTMLAttributes<HTMLDivElement>) => (
63
- <div
64
- className={cn(
65
- "flex flex-col space-y-1.5 text-center sm:text-left",
66
- className
67
- )}
68
- {...props}
69
- />
70
- )
71
- DialogHeader.displayName = "DialogHeader"
72
-
73
- const DialogFooter = ({
74
- className,
75
- ...props
76
- }: React.HTMLAttributes<HTMLDivElement>) => (
77
- <div
78
- className={cn(
79
- "flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2",
80
- className
81
- )}
82
- {...props}
83
- />
84
- )
85
- DialogFooter.displayName = "DialogFooter"
86
-
87
- const DialogTitle = React.forwardRef<
88
- React.ElementRef<typeof DialogPrimitive.Title>,
89
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Title>
90
- >(({ className, ...props }, ref) => (
91
- <DialogPrimitive.Title
92
- ref={ref}
93
- className={cn(
94
- "text-lg font-semibold leading-none tracking-tight",
95
- className
96
- )}
97
- {...props}
98
- />
99
- ))
100
- DialogTitle.displayName = DialogPrimitive.Title.displayName
101
-
102
- const DialogDescription = React.forwardRef<
103
- React.ElementRef<typeof DialogPrimitive.Description>,
104
- React.ComponentPropsWithoutRef<typeof DialogPrimitive.Description>
105
- >(({ className, ...props }, ref) => (
106
- <DialogPrimitive.Description
107
- ref={ref}
108
- className={cn("text-sm text-stone-500 dark:text-stone-400", className)}
109
- {...props}
110
- />
111
- ))
112
- DialogDescription.displayName = DialogPrimitive.Description.displayName
113
-
114
- export {
115
- Dialog,
116
- DialogTrigger,
117
- DialogContent,
118
- DialogHeader,
119
- DialogFooter,
120
- DialogTitle,
121
- DialogDescription,
122
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/ Recuva.md DELETED
@@ -1,99 +0,0 @@
1
- <br />
2
- <h1>Download Recuva: How to Recover Deleted Files from Computer and Other Devices</h1>
3
- <p>Did you accidentally delete an important file from a computer, flash drive, memory card or other device? Do not despair, there is a way to return it back. To do this, you will need a Recuva program that can recover files even after disk formatting or cleaning up the trash. In this article we will tell you what Recuva is, how to download and install it on Windows, how to use it to recover or safely delete files. </p>
4
- <h2>What is Recuva and why is it needed? </h2>
5
- <p>Recuva is a free file recovery software developed by Piriform, also known for other products such as CCleaner, Defraggler and Speccy. Recuva can help you find and recover lost data from any media connected to your computer. </p>
6
- <h2>скачать recuva</h2><br /><p><b><b>Download</b> &#10003; <a href="https://bltlly.com/2v6KBh">https://bltlly.com/2v6KBh</a></b></p><br /><br />
7
- <h3>The main features and benefits of Recuva</h3>
8
- <p>Recuva has a number of features and benefits that make it one of the best data recovery programs. Here are some of them:</p>
9
- <ul>
10
- <li>Recuva can recover files of any type: photos, music, videos, documents, emails and others. </li>
11
- <li>Recuva can work with any device that can be connected to the computer via USB or other interface: hard drives, flash drives, memory cards, digital cameras, MP3 players and others. </li>
12
- <li>Recuva can restore files even after they have been deleted from the recycle bin, overwritten by other data or damaged by viruses. </li>
13
- <li>Recuva has a simple and user-friendly interface that allows you to easily customize your search and recovery options. You can choose file type, location, scan depth and other options. </li>
14
-
15
- <li>Recuva has a safe file removal feature that allows you to permanently erase data from the disk using various overwriting methods. This can be useful if you want to get rid of confidential information or free up disk space. </li>
16
- </ul>
17
- <h3>System requirements and supported file formats</h3>
18
- <p>Recuva runs on Windows 10, 8.1, 8, 7, Vista and XP operating systems (including 32-bit and 64-bit versions). The program requires about 15 MB of disk space and an Internet connection to download updates. Recuva supports over 1000 file formats including images (JPG, PNG, GIF, BMP and others), audio (MP3, WAV, WMA, OGG and others), video (AVI, MP4, WMV, MOV and others), documents (DOC, PDF, XP, PPT and others), PST and many others. </p>
19
- <h2>How to download and install Recuva on Windows? </h2>
20
- <p>There are several ways to download Recuva to your computer. We will consider two of them: from the official website of the program and using the special utility MultiSetup.</p>
21
- <h3>Download Recuva from official website</h3>
22
- <p>To download Recuva from the official website of the program, follow these steps:</p>
23
- <ol>
24
- <li>Go to <a href="">https://www.ccleaner.com/recuva</a>. </li>
25
- <li>Click on the button "Download Free Version" or "Download Professional" depending on what version of the program you want to get. The free version has all the basic features of Recuva, and the paid version adds features such as virtual hard disk, automatic upgrade and support. </li>
26
- <li>On the page opened click on "Download from Piriform.com" or "Download from FileHippo.com". Both of these sites are reliable sources for downloading the program. </li>
27
- <li>Save the recuva.exe file on your computer. </li>
28
- </ol>
29
- <h3>Download Recuva with MultiSetup</h3>
30
-
31
- <ol>
32
- <li>Go to <a href="">https://Multi-setup.ru/</a>. </li>
33
- <li>Click on the "Download MultiSetup" button and save the MultiSetup.exe file on your computer. </li>
34
- <li>Start the MultiSetup.exe file and wait for the program download to finish. </li>
35
- <li>In the list of programs, find Recuva and put a check in front of it. </li>
36
- <li>Click on the "Set Selected" button and follow the instructions on the screen. </li>
37
- </ol>
38
- <h3>Install Recuva on Computer</h3>
39
- <p>After you have downloaded the recuva.exe file from the official website or with MultiSetup, you can install Recuva on your computer. To do this, follow these steps:</p>
40
- <ol>
41
- <li>Start the recuva.exe file and click the "Yes" button in the account control window. </li>
42
- <li>Select the installation language and click "OK". </li>
43
- <li>Read and accept the license agreement by clicking on "I agree". </li>
44
- <li>Select the installation type: standard or custom. The standard type will install Recuva in the C: Program Files Recuva folder with default settings. The custom type allows you to select a folder to install, create shortcuts on your desktop and "Start" menu, and add Recuva to the Windows Explorer context menu.</li>
45
- <li>Click on the "Install" button and wait for the end of the process. </li>
46
- <li>Click on "Complete" button and start Recuva.</li>
47
- </ol>
48
- <h2>How to use Recuva to recover deleted files? </h2>
49
- <p>Once you have installed Recuva on your computer, you can start using it to recover deleted files. To do this, follow these steps:</p>
50
- <h3>Start Recuva and select file type</h3>
51
- <p>Start Recuva and select the mode of operation: wizard or advanced. The wizard will start automatically on the first run of the program, and the advanced one can be selected by clicking the "Switch to Advanced Mode" button. We recommend that you use the wizard to start, as it is simpler and easier to understand. </p>
52
- <p></p>
53
-
54
- <h3>Select search location and run scan</h3>
55
- <p>In the next window you need to choose the location of search for deleted files. You can choose one of <p>In the next window you need to choose a location to search for deleted files. You can choose one of the suggested options: on your computer, in your shopping cart, on your memory card, on your iPod or MP3 player, on your CD or DVD or elsewhere. You can also specify a specific path to the folder or drive where you think the files might be located. Click the "Next" button to continue. </p>
56
- <p>In the last window you need to run a scan of the drive or folder you have selected. You can choose a normal scan or a deep scan. Normal scanning is faster but less efficient than deep scanning, which takes longer but searches for files more thoroughly. Click on the "Start" button to start the scan. </p>
57
- <h3>View results and recover files</h3>
58
- <p>After the scan is complete, you will see a list of the found files. You can sort them by name, path, size, date or state. You can also view a thumbnail of the image or the contents of the text file by clicking on the "Preview" button. You can see the probability of file recovery by color indicator: green - excellent, yellow - acceptable, red - bad. </p>
59
- <p>To recover the files you need, tick the boxes next to them and click the "Recover" button. Select the folder where you want to save the files and click the "OK" button. It is not recommended to save files to the same disk or device from which you are recovering them, as this may result in overwriting the data and losing the chance to recover other files. </p>
60
- <h2>How to safely delete files using Recuva? </h2>
61
-
62
- <h3>Start Recuva and go to safe delete mode</h3>
63
- <p>Start Recuva and switch to advanced mode by clicking "Switch to advanced mode". Click on the "Options" button in the upper right corner of the program window. Click the "Actions" tab and tick the "Enable Safe Deletion" option. Click the "OK" button to save the changes. </p>
64
- <h3>Select files and overwrite method</h3>
65
- <p>Return to the main program window and select a location to search for files for safe deletion. Start the scan and tick the files you want to delete. Right click on <p>Right click on one of the selected files and select the "Safely delete selected files" option. In the window that appears, select the overwriting method you want to use. There are several methods that differ in reliability and speed. The higher the degree of reliability, the more time the file will be overwritten and the less chance it will be to recover, but the longer it will be the process of deleting. You can choose one of the following methods:</p>
66
- <ul>
67
- <li>Simple rewriting (1 pass) - the fastest and weakest method that overwrites files with one pass of random data. </li>
68
- <li>DoD 5220.22-M (3 passes) is the middle method that overwrites files with three passes: one zero pass, one unit pass, and one random data pass. </li>
69
- <li>NSA (7 passes) is a powerful method that overwrites files with seven passages: four passages with random data and three passages with special templates. </li>
70
- <li>Gutmann (35 passes) is the strongest and slowest method that overwrites files with 35 passes of different templates based on Gutmann’s algorithm.</li>
71
- </ul>
72
- <p>Select the method that suits you and click the "OK" button. Wait until the removal process is complete and close the program. </p>
73
- <h2>Conclusion</h2>
74
-
75
- <h2>Frequently Asked Questions</h2>
76
- <p>In this section we will answer some frequently asked questions about Recuva.</p>
77
- <h4>Can I recover files from SSD disk with Recuva? </h4>
78
- <p>Recovering files from an SSD disk using Recuva can be difficult due to the special features of this type of disk. SSD disks use TRIM technology, which automatically erases data from the disk after it has been deleted to increase speed and prolong disk life. This means that deleted files may not be available for recovery. However, you can still try using Recuva to search for files on an SSD disk, as TRIM does not always work instantly or completely. </p>
79
- <h4>Can I recover files from an encrypted <h4>Can I recover files from an encrypted disk with Recuva? </h4>
80
- <p>Recover files from an encrypted disk using Recuva depends on what type of encryption was used. If the disk has been encrypted with BitLocker, Recuva will not be able to recover the files until you unlock the disk with a password or key. If the disk has been encrypted with another program, such as VeraCrypt or TrueCrypt, Recuva can recover files if you connect the encrypted disk as a virtual drive and run Recuva on it. In any case, you need to know the password or key to access encrypted data. </p>
81
- <h4>Can I recover files from a damaged disk with Recuva? </h4>
82
- <p>Recovering files from a corrupted disk with Recuva can be possible if the damage does not affect the physical structure of the disk. If the disk has logical errors such as a corrupted file system, Recuva may try to fix them and find the files. If the disk has physical damage such as scratches, Recuva will not be able to recover the files as they may not be readable. In this case, you need to contact data recovery specialists. </p>
83
-
84
- <p>Recovering files from a formatted disk using Recuva depends on the type of formatting performed. If the disk has been formatted with quick formatting, Recuva can recover the files as they have not been completely erased from the disk. If the disk was formatted with full formatting, Recuva would not be able to recover the files because they were overwritten with zeros or other data. In this case, the chances of recovery are very small. </p>
85
- <h4>Can I recover files from a remote partition with Recuva? </h4>
86
- <p>Recovering files from a remote partition with Recuva can be possible if the partition has not been overwritten by another partition or data. To do this, you need to run Recuva and select the "Elsewhere" option in the search location window. Then you need to select the physical disk on which the remote partition was located and run a deep scan. Recuva will try to find and recover files from the remote partition. </p>
87
- <h4>Can I recover files after reinstalling Windows with Recuva? </h4>
88
- <p>Recovering files after reinstalling Windows with Recuva can be difficult because Windows reinstallation can overwrite or delete data on disk C. If you want to save your data before reinstalling Windows, then you need to copy them to another disk or device. If you have already reinstalled Windows and want to recover your data, you need to run Recuva and select C in the search location window. Run a deep scan and see what Recuva can find. You may be able to recover some files that were not overwritten or deleted during Windows reinstallation. However, the chances are not very high, so it is best to always back up your data. </p>
89
-
90
- Recuva: how to recover deleted files from computer and other devices</h1>
91
- <p>Did you accidentally delete an important file from a computer, flash drive, memory card or other device? Do not despair, there is a way to return it back. To do this, you will need a Recuva program that can recover files even after disk formatting or cleaning up the trash. In this article we will tell you what Recuva is, how to download and install it on Windows, how to use it to recover or safely delete files. </p>
92
- <h2>What is Recuva and why is it needed? </h2>
93
- <p>Recuva is a free file recovery software developed by Piriform, also known for other products such as CCleaner, Defraggler and Speccy. Recuva can help you find and recover lost data from any media connected to your computer. </p>
94
- <h3>The main features and benefits of Recuva</h3>
95
- <p>Recuva has a number of features and benefits that make it one of the best data recovery programs. Here are some of them:</p>
96
- <ul>
97
- <li</p> 64aa2da5cf<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apk Kafa Topu 2.md DELETED
@@ -1,123 +0,0 @@
1
-
2
- <h1>Kafa Topu 2: Un juego de fútbol en línea divertido y competitivo</h1>
3
- <p>Tabla de contenido</p>
4
- <ul>
5
- <li><a href="#intro">Introducción</a></li>
6
- <li><a href="#features">Características de Kafa Topu 2</a></li>
7
- <ul>
8
- <li><a href="#characters">Personajes y accesorios</a></li>
9
- <li><a href="#powers">Super Powers</a></li>
10
- <li><a href="#teams">Equipos y Ligas</a></li>
11
- <li><a href="#social">Integración de redes sociales</a></li>
12
- </ul>
13
- <li><a href="#download">Cómo descargar Kafa Topu 2 APK</a></li>
14
- <ul>
15
- <li><a href="#android">Para dispositivos Android</a></li>
16
- <li><a href="#pc">Para usuarios de PC</a></li>
17
- </ul>
18
- <li><a href="#tips">Consejos y trucos para jugar Kafa Topu 2</a></li>
19
- <li><a href="#conclusion">Conclusión</a></li>
20
- <li><a href="#faqs">FAQs</a></li>
21
- </ul>
22
- <h2 id="intro">Introducción</h2>
23
- <h2 id="features">Características de Kafa Topu 2</h2>
24
- <h3 id="characters">Personajes y accesorios</h3>
25
- <h3 id="powers">Super Powers</h3>
26
- <h3 id="teams">Equipos y Ligas</h3>
27
- <h3 id="social">Integración de redes sociales</h3>
28
- <h2 id="download">Cómo descargar Kafa Topu 2 APK</h2>
29
- <h3 id="android">Para dispositivos Android</h3>
30
- <h3 id="pc">Para usuarios de PC</h3>
31
- <h2 id="tips">Consejos y trucos para jugar Kafa Topu 2</h2>
32
- <h2 id="conclusión">Conclusión</h2>
33
- <h2 id="faqs">FAQs</h2>. Aquí está un ejemplo de cómo escribiría el artículo basado en el esquema: <h1>Kafa Topu 2: Un juego de fútbol en línea divertido y competitivo</h1>
34
- <p>Tabla de contenido</p>
35
- <h2>apk kafa topu 2</h2><br /><p><b><b>Download</b> &#9999; &#9999; &#9999; <a href="https://bltlly.com/2v6IVn">https://bltlly.com/2v6IVn</a></b></p><br /><br />
36
- <ul>
37
- <li><a href="#intro">Introducción</a></li>
38
- <li><a href="#features">Características de Kafa Topu 2</a></li>
39
- <ul>
40
- <li><a href="#characters">Personajes y accesorios</a></li>
41
- <li><a href="#powers">Super Powers</a></li>
42
- <li><a href="#teams">Equipos y Ligas</a></li>
43
- <li><a href="#social">Integración de redes sociales</a></li>
44
- </ul>
45
- <li><a href="#download">Cómo descargar Kafa Topu 2 APK</a></li>
46
- <ul>
47
- <li><a href="#android">Para dispositivos Android</a></li>
48
- <li><a href="#pc">Para usuarios de PC</a></li>
49
- </ul>
50
-
51
- <li><a href="#conclusion">Conclusión</a></li>
52
- <li><a href="#faqs">FAQs</a></li>
53
- </ul>
54
- <h2 id="intro">Introducción</h2>
55
- <p>¿Te encanta el fútbol? ¿Te gusta jugar juegos en línea con tus amigos? Si respondiste sí a ambas preguntas, entonces definitivamente deberías echar un vistazo a Kafa Topu 2, un divertido y competitivo juego de fútbol en línea que te mantendrá entretenido durante horas. </p>
56
- <p>Kafa Topu 2 es la secuela del popular juego en línea Kafa Topu, que tiene millones de fans en todo el mundo. En este juego, puede crear su propio héroe de fútbol, personalizar su apariencia y accesorios, unirse a un equipo o crear el suyo propio, y competir en varias ligas y torneos contra oponentes reales de todo el mundo. </p>
57
- <p>Kafa Topu 2 no es su juego de fútbol típico. Tiene características de juego únicas que lo hacen más emocionante y desafiante que otros juegos. Por ejemplo, puedes usar superpoderes para mejorar tu rendimiento, marcar goles increíbles y derrotar a tus rivales. También puedes interactuar con otros jugadores a través de la integración de redes sociales, chatear con ellos, enviarles regalos y retarlos a partidos amistosos. </p>
58
- <p>Si usted está buscando una nueva manera de disfrutar de fútbol en línea, entonces Kafa Topu 2 es el juego para usted. En este artículo, te contaremos más sobre las características de Kafa Topu 2, cómo descargar el archivo APK para tu dispositivo y algunos consejos y trucos para ayudarte a mejorar tus habilidades y divertirte más. </p>
59
- <h2 id="features">Características de Kafa Topu 2</h2>
60
- <p>Kafa Topu 2 tiene muchas características que lo hacen destacar de otros juegos de fútbol en línea. Estos son algunos de ellos:</p>
61
- <h3 id="characters">Personajes y accesorios</h3>
62
-
63
- <p>También puedes actualizar tu personaje para mejorar sus atributos, como velocidad, salto, poder de disparo y súper poder. Cuanto más alto sea tu nivel, más opciones tendrás para mejorar tu personaje. También puedes usar objetos especiales, como tarjetas y pociones, para mejorar tu rendimiento temporalmente. </p>
64
- <h3 id="powers">Super Powers</h3>
65
- <p>Otra característica que hace Kafa Topu 2 diferente de otros juegos de fútbol es el uso de superpoderes. Los súper poderes son habilidades especiales que puedes activar durante una partida para obtener una ventaja sobre tu oponente. Por ejemplo, puedes usar una bola de fuego para lanzar un tiro poderoso que puede quemar la red de goles del oponente, o un imán para atraer la pelota a tu cabeza. </p>
66
- <p>Hay más de 30 superpoderes diferentes en Kafa Topu 2, cada uno con sus propios efectos y animaciones. Puedes desbloquear nuevos súper poderes a medida que avanzas en el juego, o comprarlos con diamantes o monedas. También puedes actualizar tus súper poderes para hacerlos más efectivos y durar más. </p>
67
- <p></p>
68
- <h3 id="teams">Equipos y Ligas</h3>
69
- <p>Kafa Topu 2 no es solo un juego para jugadores en solitario. También puede unirse a un equipo o crear su propio equipo y jugar con otros jugadores que comparten su pasión por el fútbol. Al unirte a un equipo, puedes participar en eventos de equipo, como torneos y ligas, donde puedes competir contra otros equipos y ganar recompensas. </p>
70
- <p>También puedes chatear con tus compañeros de equipo, enviarles regalos, invitarlos a partidos y apoyarlos durante sus partidos. También puedes contribuir al progreso de tu equipo donando monedas o diamantes. También puedes crear tu propio equipo e invitar a tus amigos a unirse a ti. Puedes personalizar el nombre, el logotipo y el lema de tu equipo, y gestionar los miembros y eventos de tu equipo. </p>
71
- <h3 id="social">Integración de redes sociales</h3>
72
-
73
- <p>También puedes chatear con otros jugadores en el juego, enviarles mensajes, emojis y regalos, y retarlos a partidos amistosos. También puede unirse a la comunidad oficial de Kafa Topu 2 en las plataformas de redes sociales, donde puede interactuar con otros fans, obtener las últimas noticias y actualizaciones, participar en concursos y regalos, y obtener apoyo de los desarrolladores. </p>
74
- <h2 id="download">Cómo descargar Kafa Topu 2 APK</h2>
75
- <p>Si usted está interesado en jugar Kafa Topu 2, es posible que se pregunte cómo descargar el archivo APK para su dispositivo. APK significa Android Package Kit, y es un formato de archivo que le permite instalar aplicaciones que no están disponibles en Google Play Store. Aquí están los pasos para descargar Kafa Topu 2 APK para dispositivos Android y usuarios de PC:</p>
76
- <h3 id="android">Para dispositivos Android</h3>
77
- <ol>
78
- <li>Vaya al sitio web oficial de Kafa Topu 2 en <a href="">https://www.kafatopu2.com/</a> y haga clic en el botón "Descargar". </li>
79
- <li>Usted será redirigido a una página donde se puede elegir la versión del archivo APK que se adapte a su dispositivo. Por ejemplo, si tienes un dispositivo Android 10, puedes elegir la opción "Android 10". </li>
80
- <li>Después de elegir la versión, haga clic en el botón "Descargar APK" y espere a que el archivo se descargue en su dispositivo. </li>
81
- <li>Antes de instalar el archivo APK, es necesario habilitar la "Fuentes desconocidas" opción en el dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
82
- <li> Ahora puede instalar el archivo APK tocando en él y siguiendo las instrucciones en la pantalla. </li>
83
- <li>Una vez completada la instalación, puede iniciar el juego y disfrutar jugando Kafa Topu 2.</li>
84
- </ol>
85
- <h3 id="pc">Para usuarios de PC</h3>
86
- <ol>
87
- <li>Si desea jugar Kafa Topu 2 en su PC, es necesario utilizar un emulador de Android. Un emulador de Android es un software que le permite ejecutar aplicaciones Android en su PC. Hay muchos emuladores de Android disponibles en línea, como BlueStacks, NoxPlayer, MEmu, etc.</li>
88
-
89
- <li>Después de instalar el emulador, iniciarlo e iniciar sesión con su cuenta de Google. </li>
90
- <li>Vaya al sitio web oficial de Kafa Topu 2 en <a href="">https://www.kafatopu2.com/</a> y haga clic en el botón "Descargar". </li>
91
- <li>Usted será redirigido a una página donde se puede elegir la versión del archivo APK que se adapte a su emulador. Por ejemplo, si está usando BlueStacks, puede elegir la opción "BlueStacks". </li>
92
- <li>Después de elegir la versión, haga clic en el "Descargar APK" botón y esperar a que el archivo para ser descargado a su PC.</li>
93
- <li>Una vez descargado el archivo, arrástrelo y suéltelo en la ventana del emulador o use el administrador de archivos del emulador para localizarlo e instalarlo. </li>
94
- <li>Una vez completada la instalación, puede iniciar el juego y disfrutar jugando Kafa Topu 2 en su PC.</li>
95
- </ol>
96
- <h2 id="tips">Consejos y trucos para jugar Kafa Topu 2</h2>
97
- <p>Kafa Topu 2 es un juego que requiere habilidad, estrategia y práctica para dominar. Aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tu juego y divertirte más:</p>
98
- <ul>
99
- <li>Practica en modo offline antes de jugar online. El modo offline te permite jugar contra oponentes de IA con diferentes niveles de dificultad. Esto puede ayudarte a familiarizarte con los controles, la mecánica de juego, los personajes, los superpoderes y los mapas. </li>
100
- <li>Elige un personaje que se adapte a tu estilo de juego. Cada personaje tiene sus propias fortalezas y debilidades, como velocidad, salto, poder de disparo y súper poder. Experimenta con diferentes caracteres y encuentra uno que coincida con tus preferencias. </li>
101
- <li>Usa súper poderes sabiamente. Los súper poderes pueden darte una ventaja sobre tu oponente, pero también tienen un tiempo de reutilización y una duración limitada. Úsalos en el momento adecuado, como cuando estés en una buena posición para anotar, cuando necesites defender tu objetivo o cuando quieras sorprender a tu oponente. </li>
102
-
103
- <li>Únete a un equipo o crea el tuyo. Jugar con un equipo puede hacer el juego más divertido y gratificante. Puedes unirte a un equipo existente o crear tu propio equipo e invitar a tus amigos a unirse a ti. Al jugar con un equipo, puedes participar en eventos de equipo, chatear con tus compañeros de equipo, enviarles regalos y apoyarlos durante sus partidos. </li>
104
- <li>Sigue el juego en las redes sociales. Al seguir el juego en las plataformas de redes sociales, como Facebook, Twitter e Instagram, puedes obtener las últimas noticias y actualizaciones, participar en concursos y regalos, obtener apoyo de los desarrolladores e interactuar con otros fans. </li>
105
- </ul>
106
- <h2 id="conclusión">Conclusión</h2>
107
- <p>Kafa Topu 2 es un divertido y competitivo juego de fútbol en línea que te mantendrá entretenido durante horas. Puede crear su propio héroe de fútbol, personalizar su apariencia y accesorios, unirse a un equipo o crear el suyo propio, y competir en varias ligas y torneos contra oponentes reales de todo el mundo. También puedes usar superpoderes para mejorar tu rendimiento, marcar goles increíbles y derrotar a tus rivales. También puedes interactuar con otros jugadores a través de la integración de redes sociales, chatear con ellos, enviarles regalos y retarlos a partidos amistosos. </p>
108
- <p>Si usted está buscando una nueva manera de disfrutar de fútbol en línea, entonces Kafa Topu 2 es el juego para usted. Puede descargar el archivo APK para su dispositivo desde el sitio web oficial de Kafa Topu 2 en <a href="">https://www.kafatopu2.com/</a> y comenzar a jugar de inmediato. También puedes seguir el juego en las plataformas de redes sociales para obtener más información y apoyo. </p>
109
- <p>Esperamos que este artículo te haya ayudado a aprender más sobre Kafa Topu 2 y cómo jugarlo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Nos encantaría saber de usted. </p>
110
- <h2 id="faqs">FAQs</h2>
111
- <p>Aquí hay algunas preguntas frecuentes sobre Kafa Topu 2:</p>
112
- <ol>
113
- <li><b>¿Qué es Kafa Topu 2?</b></li>
114
-
115
- <li><b> ¿Cómo descargo Kafa Topu 2 APK? </b></li>
116
- <p>Puede descargar Kafa Topu 2 APK desde el sitio web oficial de Kafa Topu 2 en <a href="">https://www.kafatopu2.com/</a>. Puede elegir la versión del archivo APK que se adapte a su dispositivo o emulador. Debe habilitar la opción "Fuentes desconocidas" en su dispositivo o emulador antes de instalar el archivo APK. </p>
117
- <li><b>¿Cómo puedo usar superpoderes en Kafa Topu 2?</b></li>
118
- <p>Puedes usar superpoderes en Kafa Topu 2 tocando el ícono de súper poder en la esquina inferior derecha de la pantalla durante un partido. Puedes elegir entre más de 30 superpoderes diferentes, cada uno con sus propios efectos y animaciones. Puedes desbloquear nuevos súper poderes a medida que avanzas en el juego, o comprarlos con diamantes o monedas. También puedes actualizar tus súper poderes para hacerlos más efectivos y durar más. </p>
119
- <li><b> ¿Cómo me uno o creo un equipo en Kafa Topu 2?</b></li>
120
- <p>Puedes unirte o crear un equipo en Kafa Topu 2 tocando el icono del equipo en la esquina inferior izquierda de la pantalla. Puede buscar un equipo existente o crear su propio equipo introduciendo un nombre, logotipo y lema. Puedes invitar a tus amigos a unirse a tu equipo enviándoles un código o un enlace. Al unirte a un equipo, puedes participar en eventos de equipo, chatear con tus compañeros de equipo, enviarles regalos y apoyarlos durante sus partidos. </p>
121
- <li><b>¿Cómo sigo Kafa Topu 2 en las redes sociales? </b></li> 64aa2da5cf<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fondo De Pantalla Para Macbook Aire.md DELETED
@@ -1,149 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar fondos de escritorio para su MacBook Air</h1>
3
- <p>Fondo de pantalla es la imagen o color que aparece en el fondo del escritorio. Puede hacer que su MacBook Air se vea más personalizado, atractivo e inspirador. Puede elegir entre una variedad de fondos de pantalla proporcionados por Apple, o utilizar sus propias fotos o imágenes de la web. </p>
4
- <h2>descargar fondo de pantalla para macbook aire</h2><br /><p><b><b>Download</b> ===> <a href="https://bltlly.com/2v6LD9">https://bltlly.com/2v6LD9</a></b></p><br /><br />
5
- <p>En este artículo, aprenderá cómo descargar fondos de escritorio para su MacBook Air de diferentes fuentes, y cómo cambiar y personalizar la configuración del fondo de pantalla. Ya sea que desee un paisaje impresionante, un animal lindo o una cita motivacional, descubrirá cómo hacer que su escritorio se vea increíble en unos pocos pasos simples. </p>
6
- <h2>Lo que necesita para descargar fondos de escritorio para su MacBook Air</h2>
7
- <p>Antes de comenzar a descargar fondos de escritorio para su MacBook Air, necesitará algunas cosas:</p>
8
- <ul>
9
- <li>Un MacBook Air con macOS Ventura, Monterey, o posterior. </li>
10
- <li>Una conexión a Internet. </li>
11
- <li>Un navegador web (como Safari, Chrome o Firefox). </li>
12
- <li>Una carpeta o álbum de fotos donde desea guardar sus imágenes de fondo de pantalla descargadas. </li>
13
- </ul>
14
- <p>También necesitará algunos conocimientos básicos de cómo usar su MacBook Air, como cómo abrir aplicaciones, navegar por la web y guardar archivos. </p>
15
- <p></p>
16
- <h2>Cómo encontrar las mejores fuentes de fondo de pantalla para su MacBook Air</h2>
17
- <p>Hay muchos sitios web que ofrecen imágenes de fondo de pantalla gratuitas y de alta calidad para su MacBook Air. Sin embargo, no todos ellos son seguros, legales o adecuados para su resolución de pantalla. Aquí hay algunos consejos y sitios web para ayudarle a encontrar las mejores fuentes de fondos de escritorio para su MacBook Air:</p>
18
- <ul>
19
- <li>Busque sitios web que tienen una gran colección de imágenes de fondos de escritorio en diferentes categorías, como la naturaleza, los animales, el arte, el resumen, etc.</li>
20
- <li>Compruebe la licencia y los términos de uso de las imágenes de fondo de pantalla antes de descargarlas. Algunos sitios web pueden requerir atribución o permiso del autor original. </li>
21
-
22
- <li>Evite descargar imágenes de papel tapiz de sitios web sospechosos o desconocidos que pueden contener malware o virus. </li>
23
- </ul>
24
- <p>Aquí están algunos de los mejores sitios web que ofrecen imágenes de fondo de pantalla gratuitas y de alta calidad para su MacBook Air:</p>
25
- <tabla>
26
- <tr>
27
- <th>Sitio web</th>
28
- <th>Descripción</th>
29
- </tr>
30
- <tr>
31
- <td><a href="( 1 )">Pexels</a></td>
32
- <td>Pexels es un sitio web popular que ofrece miles de fotos y videos de stock gratuitos que puede usar para fines personales y comerciales. Puede navegar por categoría, color, orientación, tamaño o popularidad. </td>
33
- </tr>
34
- <tr>
35
- <td><a href="( 2 )">Unsplash</a></td>
36
- <td>Unsplash es otro sitio web que ofrece más de 2 millones de imágenes gratuitas de alta resolución que se pueden utilizar para cualquier cosa. Puedes navegar por colecciones, temas o buscar por palabras clave. </td>
37
- </tr>
38
- <tr>
39
- <td><a href="( 3 )">Fondo de pantalla Flare</a></td>
40
- <td>Wallpaper Flare es un sitio web que se especializa en fondos de pantalla de alta definición para diferentes dispositivos y resoluciones de pantalla. Puedes navegar por categorías como anime, juegos, películas, naturaleza, etc.</td>
41
- </tr>
42
- <tr>
43
- <td><a href="( 11 )">9to5Mac</a></td>
44
- <td>9to5Mac es un sitio web que cubre noticias y. comentarios, consejos y descargas de productos de Apple. Puedes encontrar fondos de pantalla inspirados en los fondos de pantalla oficiales de Apple, como macOS Ventura, iOS 15, etc.</td>
45
- </tr>
46
- </tabla>
47
- <h2>Cómo descargar imágenes de papel pintado desde la Web</h2>
48
- <p>Una vez que haya encontrado una imagen de fondo de pantalla que te gusta de uno de los sitios web anteriores, puede descargarlo en su MacBook Air siguiendo estos pasos:</p>
49
- <ol>
50
- <li>Haga clic en la imagen del fondo de pantalla para abrirlo en tamaño completo. </li>
51
- <li>Haga clic derecho en la imagen y seleccione Guardar imagen como...</li>
52
- <li>Elija una carpeta o álbum de fotos donde desea guardar la imagen. Puede crear una nueva carpeta haciendo clic en el botón Nueva carpeta en la parte inferior izquierda de la ventana. </li>
53
- <li>Dele un nombre a la imagen y haga clic en Guardar.</li>
54
- </ol>
55
-
56
- <h2>Cómo cambiar el fondo de pantalla en su MacBook Air</h2>
57
- <p>Después de haber descargado algunas imágenes de fondo, puede cambiar su fondo de escritorio mediante uno de estos métodos:</p>
58
- <h3>Usando las preferencias del sistema</h3>
59
- <ol>
60
- <li>Haga clic en el logotipo de Apple en la esquina superior izquierda de la pantalla y seleccione Preferencias del sistema.</li>
61
- <li>Haga clic en Escritorio y Protector de pantalla.</li>
62
- <li>Haga clic en la pestaña Escritorio. </li>
63
- <li>Seleccione una carpeta o álbum de fotos desde la barra lateral izquierda donde guardó sus imágenes de fondo de pantalla. </li>
64
- <li>Haga clic en la imagen del fondo de pantalla que desea utilizar desde el panel derecho. </li>
65
- </ol>
66
- <p>También puede elegir entre los fondos de pantalla predeterminados proporcionados por Apple seleccionando una de las categorías de la barra lateral izquierda, como Apple, Colores, Fotos, etc.</p>
67
- <h3>Hacer clic derecho</h3>
68
- <ol>
69
- <li>Encuentra la imagen de fondo que quieres usar en Finder o en tu escritorio. </li>
70
- <li>Haga clic derecho en la imagen y seleccione Establecer imagen de escritorio.</li>
71
- </ol>
72
- <p>Esto cambiará instantáneamente el fondo del escritorio a la imagen seleccionada. </p>
73
- <h2>Cómo personalizar la configuración del fondo de pantalla en su MacBook Air</h2>
74
- <p>Si desea personalizar aún más la configuración de su fondo de pantalla, como cómo se ajusta a su pantalla, con qué frecuencia cambia, o cómo se adapta al modo de luz y oscuridad, puede usar estas opciones:</p>
75
- <h3>Ajuste de las opciones de visualización</h3>
76
- <p>Puede ajustar cómo su imagen de fondo de pantalla llena su pantalla utilizando las opciones de visualización en la parte inferior de la pestaña Escritorio en Preferencias del sistema. Puede elegir entre estas opciones:</p>
77
- <ul>
78
- <li>Pantalla de relleno: Esto estirará o recortará su imagen para llenar toda la pantalla. </li>
79
- <li>Ajustar a la pantalla: Esto cambiará el tamaño de la imagen para adaptarse a la pantalla sin cambiar su relación de aspecto. </li>
80
- <li>Centro: Esto centrará su imagen en su pantalla sin cambiar su tamaño. </li>
81
- <li>Azulejo: Esto repetirá su imagen en la pantalla como azulejos. </li>
82
- <li>Estirar a rellenar pantalla: Esto estirará su imagen para llenar toda la pantalla sin recortarla. </li>
83
-
84
- <p>También puede ajustar el color del espacio alrededor de la imagen haciendo clic en el selector de color junto a las opciones de visualización. </p>
85
- <h3>Ciclismo a través de múltiples imágenes</h3>
86
- <p>Si desea usar más de una imagen de fondo de pantalla y hacer que cambien automáticamente, puede usar la opción Cambiar imagen en la parte inferior de la pestaña Escritorio en Preferencias del sistema. Puede elegir entre estas opciones:</p>
87
- <ul>
88
- <li>Seleccione una carpeta o álbum de fotos desde la barra lateral izquierda donde guardó sus imágenes de fondo de pantalla. </li>
89
- <li>Marque la casilla junto a Cambiar imagen y seleccione con qué frecuencia desea que cambie su fondo de pantalla. Puede elegir entre intervalos como cada 5 segundos, cada hora, cada día, etc.</li>
90
- <li> También puede marcar la casilla junto a Orden aleatorio si desea que sus imágenes de fondo de pantalla cambien en un orden aleatorio en lugar de un orden secuencial. </li>
91
- </ul>
92
- <p>Esto creará una presentación de diapositivas de sus imágenes de fondo de pantalla que se desplazará de acuerdo con su configuración. </p>
93
- <h3>Uso de fondos de pantalla dinámicos o claros y oscuros</h3>
94
- <p>Si desea utilizar fondos de pantalla que cambian según la hora del día o el modo de apariencia de su MacBook Air, puede utilizar fondos de pantalla dinámicos o claros y oscuros. Estos son algunos de los fondos de pantalla predeterminados proporcionados por Apple que tienen diferentes versiones para el día y la noche o el modo de luz y oscuridad. Puede encontrarlos en la ficha Escritorio en Preferencias del sistema en Escritorio dinámico o Escritorio claro y oscuro. Puede elegir entre estas opciones:</p>
95
- <ul>
96
- <li>Seleccione un fondo de pantalla dinámico o claro y oscuro desde el panel derecho. Puede previsualizar cómo cambia moviendo el cursor sobre él. </li>
97
- <li>Si elige un fondo de pantalla dinámico, puede ajustar cómo cambia según su ubicación o zona horaria haciendo clic en Opciones de escritorio dinámico en la parte inferior derecha de la ventana. Puede elegir entre opciones de Solar, Lunar o Cambio de Tiempo. </li>
98
-
99
- </ul>
100
- <p>Esto hará que su fondo de pantalla coincida con la luz ambiental o el tema de su MacBook Air.</p>
101
- <h2>Conclusión</h2>
102
- <p>Descargar fondos de escritorio para su MacBook Air es una forma divertida y fácil de personalizar su escritorio y hacerlo más atractivo. Puede encontrar y descargar imágenes de fondos de escritorio de varios sitios web, cambiar y ajustar la configuración de fondos de pantalla, y utilizar fondos de pantalla dinámicos o claros y oscuros para adaptarse a su estado de ánimo o entorno. También puedes experimentar con diferentes imágenes de papel tapiz y ver cuáles te gustan más. </p>
103
- <p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Nos encantaría saber de ti! </p>
104
- <h2>Preguntas frecuentes</h2>
105
- <h3>¿Cómo puedo descargar fondos de escritorio para mi MacBook Air desde mi iPhone o iPad? </h3>
106
- <p>Si tiene un iPhone o iPad, puede descargar imágenes de fondo de pantalla desde su dispositivo y transferirlas a su MacBook Air utilizando AirDrop. Así es como:</p>
107
- <ol>
108
- <li>En su iPhone o iPad, encontrar la imagen de fondo de pantalla que desea utilizar en Fotos o Safari.</li>
109
- <li>Toque en el icono Compartir en la parte inferior izquierda de la pantalla y seleccione AirDrop.</li>
110
- <li>En su MacBook Air, asegúrese de que AirDrop está habilitado en el Finder o Centro de control.</li>
111
- <li>Seleccione su MacBook Air de la lista de dispositivos que aparecen en su iPhone o iPad. </li>
112
- <li>En su MacBook Air, acepte el archivo entrante y elija dónde guardarlo. </li>
113
- </ol>
114
- <h3>¿Cómo puedo descargar fondos de escritorio para mi MacBook Air desde una unidad USB o un disco duro externo? </h3>
115
- <p>Si tiene una unidad USB o un disco duro externo que contiene imágenes de fondo de pantalla, puede descargarlas en su MacBook Air siguiendo estos pasos:</p>
116
- <ol>
117
- <li>Conecte su unidad USB o disco duro externo a su MacBook Air utilizando un cable o adaptador USB. </li>
118
- <li>Abra el Finder y localice su unidad USB o disco duro externo en Ubicaciones en la barra lateral izquierda. </li>
119
-
120
- <li>Arrástrelos y suéltelos en su escritorio o en una carpeta de su MacBook Air.</li>
121
- <li>Expulse su unidad USB o disco duro externo haciendo clic en el icono Expulsar junto a su nombre en Finder.</li>
122
- </ol>
123
- <h3>¿Cómo puedo descargar fondos de escritorio para mi MacBook Air desde un CD o DVD? </h3>
124
- <p>Si tiene un CD o DVD que contiene imágenes de papel tapiz, puede descargarlas en su MacBook Air siguiendo estos pasos:</p>
125
- <ol>
126
- <li>Inserte su CD o DVD en la unidad óptica de su MacBook Air. Si no tiene una unidad óptica, puede utilizar una externa que se conecta a través de USB.</li>
127
- <li>Abra el Finder y localice su CD o DVD en Dispositivos en la barra lateral izquierda. </li>
128
- <li>Abra la carpeta que contiene sus imágenes de fondo de pantalla y seleccione las que desea utilizar. </li>
129
- <li>Arrástrelos y suéltelos en su escritorio o en una carpeta de su MacBook Air.</li>
130
- <li>Expulse su CD o DVD haciendo clic en el icono Expulsar junto a su nombre en Finder.</li>
131
- </ol>
132
- <h3>¿Cómo puedo descargar fondos de escritorio para mi MacBook Air desde un archivo adjunto de correo electrónico? </h3>
133
- <p>Si ha recibido un correo electrónico que contiene una imagen de fondo de pantalla como archivo adjunto, puede descargarlo en su MacBook Air siguiendo estos pasos:</p>
134
- <ol>
135
- <li>Abra el correo electrónico que contiene el archivo adjunto en Mail u otra aplicación de correo electrónico. </li>
136
- <li>Haga clic en el icono de archivo adjunto en la parte inferior del correo electrónico para previsualizarlo. </li>
137
- <li>Haga clic derecho en la imagen y seleccione Guardar archivo adjunto...</li>
138
- <li>Elija una carpeta o álbum de fotos donde desea guardar la imagen y haga clic en Guardar.</li>
139
- </ol>
140
- <h3>¿Cómo puedo descargar fondos de escritorio para mi MacBook Air desde un servicio en la nube? </h3>
141
- <p>Si ha almacenado algunas imágenes de fondo de pantalla en un servicio de nube como iCloud, Dropbox, Google Drive, etc., puede descargarlas en su MacBook Air siguiendo estos pasos:</p>
142
- <ol>
143
- <li>Abra Safari u otro navegador web y vaya al sitio web de su servicio en la nube. </li>
144
- <li>Inicie sesión con su nombre de usuario y contraseña si es necesario. </li>
145
-
146
- <li> Haga clic en el icono Descargar en la parte superior derecha de la ventana y elija dónde guardarlos en su MacBook Air.</li>
147
- </ol></p> 64aa2da5cf<br />
148
- <br />
149
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/diagnose.py DELETED
@@ -1,37 +0,0 @@
1
- import os
2
- import platform
3
-
4
- from pip._vendor.rich import inspect
5
- from pip._vendor.rich.console import Console, get_windows_console_features
6
- from pip._vendor.rich.panel import Panel
7
- from pip._vendor.rich.pretty import Pretty
8
-
9
-
10
- def report() -> None: # pragma: no cover
11
- """Print a report to the terminal with debugging information"""
12
- console = Console()
13
- inspect(console)
14
- features = get_windows_console_features()
15
- inspect(features)
16
-
17
- env_names = (
18
- "TERM",
19
- "COLORTERM",
20
- "CLICOLOR",
21
- "NO_COLOR",
22
- "TERM_PROGRAM",
23
- "COLUMNS",
24
- "LINES",
25
- "JUPYTER_COLUMNS",
26
- "JUPYTER_LINES",
27
- "JPY_PARENT_PID",
28
- "VSCODE_VERBOSE_LOGGING",
29
- )
30
- env = {name: os.getenv(name) for name in env_names}
31
- console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables"))
32
-
33
- console.print(f'platform="{platform.system()}"')
34
-
35
-
36
- if __name__ == "__main__": # pragma: no cover
37
- report()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/requirements.py DELETED
@@ -1,146 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- import re
6
- import string
7
- import urllib.parse
8
- from typing import List, Optional as TOptional, Set
9
-
10
- from setuptools.extern.pyparsing import ( # noqa
11
- Combine,
12
- Literal as L,
13
- Optional,
14
- ParseException,
15
- Regex,
16
- Word,
17
- ZeroOrMore,
18
- originalTextFor,
19
- stringEnd,
20
- stringStart,
21
- )
22
-
23
- from .markers import MARKER_EXPR, Marker
24
- from .specifiers import LegacySpecifier, Specifier, SpecifierSet
25
-
26
-
27
- class InvalidRequirement(ValueError):
28
- """
29
- An invalid requirement was found, users should refer to PEP 508.
30
- """
31
-
32
-
33
- ALPHANUM = Word(string.ascii_letters + string.digits)
34
-
35
- LBRACKET = L("[").suppress()
36
- RBRACKET = L("]").suppress()
37
- LPAREN = L("(").suppress()
38
- RPAREN = L(")").suppress()
39
- COMMA = L(",").suppress()
40
- SEMICOLON = L(";").suppress()
41
- AT = L("@").suppress()
42
-
43
- PUNCTUATION = Word("-_.")
44
- IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
45
- IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
46
-
47
- NAME = IDENTIFIER("name")
48
- EXTRA = IDENTIFIER
49
-
50
- URI = Regex(r"[^ ]+")("url")
51
- URL = AT + URI
52
-
53
- EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
54
- EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
55
-
56
- VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
57
- VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
58
-
59
- VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
60
- VERSION_MANY = Combine(
61
- VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
62
- )("_raw_spec")
63
- _VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
64
- _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
65
-
66
- VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
67
- VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
68
-
69
- MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
70
- MARKER_EXPR.setParseAction(
71
- lambda s, l, t: Marker(s[t._original_start : t._original_end])
72
- )
73
- MARKER_SEPARATOR = SEMICOLON
74
- MARKER = MARKER_SEPARATOR + MARKER_EXPR
75
-
76
- VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
77
- URL_AND_MARKER = URL + Optional(MARKER)
78
-
79
- NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
80
-
81
- REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
82
- # setuptools.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
83
- # issue #104
84
- REQUIREMENT.parseString("x[]")
85
-
86
-
87
- class Requirement:
88
- """Parse a requirement.
89
-
90
- Parse a given requirement string into its parts, such as name, specifier,
91
- URL, and extras. Raises InvalidRequirement on a badly-formed requirement
92
- string.
93
- """
94
-
95
- # TODO: Can we test whether something is contained within a requirement?
96
- # If so how do we do that? Do we need to test against the _name_ of
97
- # the thing as well as the version? What about the markers?
98
- # TODO: Can we normalize the name and extra name?
99
-
100
- def __init__(self, requirement_string: str) -> None:
101
- try:
102
- req = REQUIREMENT.parseString(requirement_string)
103
- except ParseException as e:
104
- raise InvalidRequirement(
105
- f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
106
- )
107
-
108
- self.name: str = req.name
109
- if req.url:
110
- parsed_url = urllib.parse.urlparse(req.url)
111
- if parsed_url.scheme == "file":
112
- if urllib.parse.urlunparse(parsed_url) != req.url:
113
- raise InvalidRequirement("Invalid URL given")
114
- elif not (parsed_url.scheme and parsed_url.netloc) or (
115
- not parsed_url.scheme and not parsed_url.netloc
116
- ):
117
- raise InvalidRequirement(f"Invalid URL: {req.url}")
118
- self.url: TOptional[str] = req.url
119
- else:
120
- self.url = None
121
- self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
122
- self.specifier: SpecifierSet = SpecifierSet(req.specifier)
123
- self.marker: TOptional[Marker] = req.marker if req.marker else None
124
-
125
- def __str__(self) -> str:
126
- parts: List[str] = [self.name]
127
-
128
- if self.extras:
129
- formatted_extras = ",".join(sorted(self.extras))
130
- parts.append(f"[{formatted_extras}]")
131
-
132
- if self.specifier:
133
- parts.append(str(self.specifier))
134
-
135
- if self.url:
136
- parts.append(f"@ {self.url}")
137
- if self.marker:
138
- parts.append(" ")
139
-
140
- if self.marker:
141
- parts.append(f"; {self.marker}")
142
-
143
- return "".join(parts)
144
-
145
- def __repr__(self) -> str:
146
- return f"<Requirement('{self}')>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blessin/yes-and-improv-game/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Yes And Improv Game
3
- emoji: 📊
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CHDCruze/entertainmentbybhdcruze/index.html DELETED
@@ -1,21 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
8
- </head>
9
- <body>
10
- <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
- <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
- <p>
14
- <p>
15
- A blog for tv shows
16
- Also don't forget to check the
17
- <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
18
- </p>
19
- </div>
20
- </body>
21
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/BrAD/app.py DELETED
@@ -1,112 +0,0 @@
1
- import pickle
2
- import os
3
- from sklearn.neighbors import NearestNeighbors
4
- import numpy as np
5
- import gradio as gr
6
- from PIL import Image
7
-
8
- data_root = 'https://ai-vision-public-datasets.s3.eu.cloud-object-storage.appdomain.cloud/DomainNet'
9
- feat_dir = 'brad_feats'
10
- domains = ['sketch', 'painting', 'clipart', 'real']
11
- shots = '-1'
12
- num_nn = 20
13
-
14
- search_domain = 'all'
15
- num_results_per_domain = 5
16
- src_data_dict = {}
17
- class_list = []
18
- if search_domain == 'all':
19
- for d in domains:
20
- with open(os.path.join(feat_dir, f'dst_{d}_{shots}.pkl'), 'rb') as fp:
21
- src_data = pickle.load(fp)
22
- if class_list == []:
23
- for p in src_data[0]:
24
- cl = p.split('/')[-2]
25
- if cl not in class_list:
26
- class_list.append(cl)
27
- src_nn_fit = NearestNeighbors(n_neighbors=num_results_per_domain, algorithm='auto', n_jobs=-1).fit(src_data[1])
28
- src_data_dict[d] = (src_data,src_nn_fit)
29
- else:
30
-
31
- with open(os.path.join(feat_dir, f'dst_{search_domain}_{shots}.pkl'), 'rb') as fp:
32
- src_data = pickle.load(fp)
33
- src_nn_fit = NearestNeighbors(n_neighbors=num_results_per_domain, algorithm='auto', n_jobs=-1).fit(src_data[1])
34
- src_data_dict[search_domain] = (src_data,src_nn_fit)
35
-
36
- dst_data_dict = {}
37
- min_len = 1e10
38
- for d in domains:
39
- with open(os.path.join(feat_dir, f'src_{d}_{shots}.pkl'), 'rb') as fp:
40
- dest_data = pickle.load(fp)
41
- dst_data_dict[d] = ({cl: ([],[]) for cl in class_list},dest_data[1])
42
- for c, p in enumerate(dest_data[0]):
43
- cl = p.split('/')[-2]
44
- dst_data_dict[d][0][cl][0].append(p)
45
- dst_data_dict[d][0][cl][1].append(c)
46
-
47
- for cl in class_list:
48
- min_len = min(min_len, len(dst_data_dict[d][0][cl]))
49
-
50
- def query(query_index, query_domain, cl):
51
- dst_data = dst_data_dict[query_domain]
52
- dst_img_path = os.path.join(data_root, dst_data[0][cl][0][query_index])
53
- query_index = dst_data[0][cl][1][query_index]
54
- img_paths = [dst_img_path]
55
- q_cl = dst_img_path.split('/')[-2]
56
- captions = [f'Query: {q_cl}'.title()]
57
- for s_domain, s_data in src_data_dict.items():
58
- _, top_n_matches_ids = s_data[1].kneighbors(dst_data[1][query_index:query_index+1])
59
- top_n_labels = s_data[0][2][top_n_matches_ids][0]
60
- src_img_pths = [os.path.join(data_root, s_data[0][0][ix]) for ix in top_n_matches_ids[0]]
61
- img_paths += src_img_pths
62
-
63
- for p in src_img_pths:
64
- src_cl = p.split('/')[-2]
65
- src_file = p.split('/')[-1]
66
- captions.append(src_cl.title())
67
- # print(img_paths)
68
- return tuple([p for p in img_paths])+ tuple(captions)
69
-
70
- demo = gr.Blocks()
71
- with demo:
72
- gr.Markdown('# Unsupervised Domain Generalization by Learning a Bridge Across Domains')
73
- gr.Markdown('This demo showcases the cross-domain retrieval capabilities of our self-supervised cross domain training as presented @CVPR 2022. For details please refer to [the paper](https://openaccess.thecvf.com/content/CVPR2022/papers/Harary_Unsupervised_Domain_Generalization_by_Learning_a_Bridge_Across_Domains_CVPR_2022_paper.pdf)')
74
- gr.Markdown('The model is trained in an unsupervised manner on all domains without class labels. The labels are displayed to indicate retrieval success/failure.')
75
- gr.Markdown('## Instructions:')
76
- gr.Markdown('Select a query domain and a class from the drop-down menus and select any random image index from the domain using the slider below, then press the "Run" button. The query image and the retrieved results from each of the four domains, along with the class label will be presented.')
77
- gr.Markdown('## Select Query Domain: ')
78
- gr.Markdown('# Query Image: \t\t\t\t')
79
- # domain_drop = gr.Dropdown(domains)
80
- # cl_drop = gr.Dropdown(class_list)
81
- # domain_select_button = gr.Button("Select Domain")
82
- # slider = gr.Slider(0, min_len)
83
- # slider = gr.Slider(0, 10000)
84
-
85
- with gr.Row():
86
- with gr.Column():
87
- domain_drop = gr.Dropdown(domains, label='Domain')
88
- cl_drop = gr.Dropdown(class_list, label='Query Class')
89
- slider = gr.Slider(0, 100, label='Query image selector slider')
90
-
91
- # gr.Markdown('\t')
92
- # gr.Markdown('\t')
93
- # gr.Markdown('\t')
94
- with gr.Column():
95
- src_cap = gr.Label()
96
- src_img = gr.Image()
97
- image_button = gr.Button("Run")
98
-
99
-
100
- out_images = []
101
- out_captions = []
102
- for d in domains:
103
- gr.Markdown(f'# Retrieved Images from {d.title()} Domain:')
104
- with gr.Row():
105
- for _ in range(num_results_per_domain):
106
- with gr.Column():
107
- out_captions.append(gr.Label())
108
- out_images.append(gr.Image())
109
-
110
- image_button.click(query, inputs=[slider, domain_drop, cl_drop], outputs=[src_img]+out_images +[src_cap]+ out_captions)
111
-
112
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_virtual_functions.py DELETED
@@ -1,380 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import pytest
3
-
4
- import env # noqa: F401
5
-
6
- from pybind11_tests import virtual_functions as m
7
- from pybind11_tests import ConstructorStats
8
-
9
-
10
- def test_override(capture, msg):
11
- class ExtendedExampleVirt(m.ExampleVirt):
12
- def __init__(self, state):
13
- super(ExtendedExampleVirt, self).__init__(state + 1)
14
- self.data = "Hello world"
15
-
16
- def run(self, value):
17
- print('ExtendedExampleVirt::run(%i), calling parent..' % value)
18
- return super(ExtendedExampleVirt, self).run(value + 1)
19
-
20
- def run_bool(self):
21
- print('ExtendedExampleVirt::run_bool()')
22
- return False
23
-
24
- def get_string1(self):
25
- return "override1"
26
-
27
- def pure_virtual(self):
28
- print('ExtendedExampleVirt::pure_virtual(): %s' % self.data)
29
-
30
- class ExtendedExampleVirt2(ExtendedExampleVirt):
31
- def __init__(self, state):
32
- super(ExtendedExampleVirt2, self).__init__(state + 1)
33
-
34
- def get_string2(self):
35
- return "override2"
36
-
37
- ex12 = m.ExampleVirt(10)
38
- with capture:
39
- assert m.runExampleVirt(ex12, 20) == 30
40
- assert capture == """
41
- Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)
42
- """ # noqa: E501 line too long
43
-
44
- with pytest.raises(RuntimeError) as excinfo:
45
- m.runExampleVirtVirtual(ex12)
46
- assert msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"'
47
-
48
- ex12p = ExtendedExampleVirt(10)
49
- with capture:
50
- assert m.runExampleVirt(ex12p, 20) == 32
51
- assert capture == """
52
- ExtendedExampleVirt::run(20), calling parent..
53
- Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)
54
- """ # noqa: E501 line too long
55
- with capture:
56
- assert m.runExampleVirtBool(ex12p) is False
57
- assert capture == "ExtendedExampleVirt::run_bool()"
58
- with capture:
59
- m.runExampleVirtVirtual(ex12p)
60
- assert capture == "ExtendedExampleVirt::pure_virtual(): Hello world"
61
-
62
- ex12p2 = ExtendedExampleVirt2(15)
63
- with capture:
64
- assert m.runExampleVirt(ex12p2, 50) == 68
65
- assert capture == """
66
- ExtendedExampleVirt::run(50), calling parent..
67
- Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)
68
- """ # noqa: E501 line too long
69
-
70
- cstats = ConstructorStats.get(m.ExampleVirt)
71
- assert cstats.alive() == 3
72
- del ex12, ex12p, ex12p2
73
- assert cstats.alive() == 0
74
- assert cstats.values() == ['10', '11', '17']
75
- assert cstats.copy_constructions == 0
76
- assert cstats.move_constructions >= 0
77
-
78
-
79
- def test_alias_delay_initialization1(capture):
80
- """`A` only initializes its trampoline class when we inherit from it
81
-
82
- If we just create and use an A instance directly, the trampoline initialization is
83
- bypassed and we only initialize an A() instead (for performance reasons).
84
- """
85
- class B(m.A):
86
- def __init__(self):
87
- super(B, self).__init__()
88
-
89
- def f(self):
90
- print("In python f()")
91
-
92
- # C++ version
93
- with capture:
94
- a = m.A()
95
- m.call_f(a)
96
- del a
97
- pytest.gc_collect()
98
- assert capture == "A.f()"
99
-
100
- # Python version
101
- with capture:
102
- b = B()
103
- m.call_f(b)
104
- del b
105
- pytest.gc_collect()
106
- assert capture == """
107
- PyA.PyA()
108
- PyA.f()
109
- In python f()
110
- PyA.~PyA()
111
- """
112
-
113
-
114
- def test_alias_delay_initialization2(capture):
115
- """`A2`, unlike the above, is configured to always initialize the alias
116
-
117
- While the extra initialization and extra class layer has small virtual dispatch
118
- performance penalty, it also allows us to do more things with the trampoline
119
- class such as defining local variables and performing construction/destruction.
120
- """
121
- class B2(m.A2):
122
- def __init__(self):
123
- super(B2, self).__init__()
124
-
125
- def f(self):
126
- print("In python B2.f()")
127
-
128
- # No python subclass version
129
- with capture:
130
- a2 = m.A2()
131
- m.call_f(a2)
132
- del a2
133
- pytest.gc_collect()
134
- a3 = m.A2(1)
135
- m.call_f(a3)
136
- del a3
137
- pytest.gc_collect()
138
- assert capture == """
139
- PyA2.PyA2()
140
- PyA2.f()
141
- A2.f()
142
- PyA2.~PyA2()
143
- PyA2.PyA2()
144
- PyA2.f()
145
- A2.f()
146
- PyA2.~PyA2()
147
- """
148
-
149
- # Python subclass version
150
- with capture:
151
- b2 = B2()
152
- m.call_f(b2)
153
- del b2
154
- pytest.gc_collect()
155
- assert capture == """
156
- PyA2.PyA2()
157
- PyA2.f()
158
- In python B2.f()
159
- PyA2.~PyA2()
160
- """
161
-
162
-
163
- # PyPy: Reference count > 1 causes call with noncopyable instance
164
- # to fail in ncv1.print_nc()
165
- @pytest.mark.xfail("env.PYPY")
166
- @pytest.mark.skipif(not hasattr(m, "NCVirt"), reason="NCVirt test broken on ICPC")
167
- def test_move_support():
168
- class NCVirtExt(m.NCVirt):
169
- def get_noncopyable(self, a, b):
170
- # Constructs and returns a new instance:
171
- nc = m.NonCopyable(a * a, b * b)
172
- return nc
173
-
174
- def get_movable(self, a, b):
175
- # Return a referenced copy
176
- self.movable = m.Movable(a, b)
177
- return self.movable
178
-
179
- class NCVirtExt2(m.NCVirt):
180
- def get_noncopyable(self, a, b):
181
- # Keep a reference: this is going to throw an exception
182
- self.nc = m.NonCopyable(a, b)
183
- return self.nc
184
-
185
- def get_movable(self, a, b):
186
- # Return a new instance without storing it
187
- return m.Movable(a, b)
188
-
189
- ncv1 = NCVirtExt()
190
- assert ncv1.print_nc(2, 3) == "36"
191
- assert ncv1.print_movable(4, 5) == "9"
192
- ncv2 = NCVirtExt2()
193
- assert ncv2.print_movable(7, 7) == "14"
194
- # Don't check the exception message here because it differs under debug/non-debug mode
195
- with pytest.raises(RuntimeError):
196
- ncv2.print_nc(9, 9)
197
-
198
- nc_stats = ConstructorStats.get(m.NonCopyable)
199
- mv_stats = ConstructorStats.get(m.Movable)
200
- assert nc_stats.alive() == 1
201
- assert mv_stats.alive() == 1
202
- del ncv1, ncv2
203
- assert nc_stats.alive() == 0
204
- assert mv_stats.alive() == 0
205
- assert nc_stats.values() == ['4', '9', '9', '9']
206
- assert mv_stats.values() == ['4', '5', '7', '7']
207
- assert nc_stats.copy_constructions == 0
208
- assert mv_stats.copy_constructions == 1
209
- assert nc_stats.move_constructions >= 0
210
- assert mv_stats.move_constructions >= 0
211
-
212
-
213
- def test_dispatch_issue(msg):
214
- """#159: virtual function dispatch has problems with similar-named functions"""
215
- class PyClass1(m.DispatchIssue):
216
- def dispatch(self):
217
- return "Yay.."
218
-
219
- class PyClass2(m.DispatchIssue):
220
- def dispatch(self):
221
- with pytest.raises(RuntimeError) as excinfo:
222
- super(PyClass2, self).dispatch()
223
- assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
224
-
225
- p = PyClass1()
226
- return m.dispatch_issue_go(p)
227
-
228
- b = PyClass2()
229
- assert m.dispatch_issue_go(b) == "Yay.."
230
-
231
-
232
- def test_override_ref():
233
- """#392/397: overriding reference-returning functions"""
234
- o = m.OverrideTest("asdf")
235
-
236
- # Not allowed (see associated .cpp comment)
237
- # i = o.str_ref()
238
- # assert o.str_ref() == "asdf"
239
- assert o.str_value() == "asdf"
240
-
241
- assert o.A_value().value == "hi"
242
- a = o.A_ref()
243
- assert a.value == "hi"
244
- a.value = "bye"
245
- assert a.value == "bye"
246
-
247
-
248
- def test_inherited_virtuals():
249
- class AR(m.A_Repeat):
250
- def unlucky_number(self):
251
- return 99
252
-
253
- class AT(m.A_Tpl):
254
- def unlucky_number(self):
255
- return 999
256
-
257
- obj = AR()
258
- assert obj.say_something(3) == "hihihi"
259
- assert obj.unlucky_number() == 99
260
- assert obj.say_everything() == "hi 99"
261
-
262
- obj = AT()
263
- assert obj.say_something(3) == "hihihi"
264
- assert obj.unlucky_number() == 999
265
- assert obj.say_everything() == "hi 999"
266
-
267
- for obj in [m.B_Repeat(), m.B_Tpl()]:
268
- assert obj.say_something(3) == "B says hi 3 times"
269
- assert obj.unlucky_number() == 13
270
- assert obj.lucky_number() == 7.0
271
- assert obj.say_everything() == "B says hi 1 times 13"
272
-
273
- for obj in [m.C_Repeat(), m.C_Tpl()]:
274
- assert obj.say_something(3) == "B says hi 3 times"
275
- assert obj.unlucky_number() == 4444
276
- assert obj.lucky_number() == 888.0
277
- assert obj.say_everything() == "B says hi 1 times 4444"
278
-
279
- class CR(m.C_Repeat):
280
- def lucky_number(self):
281
- return m.C_Repeat.lucky_number(self) + 1.25
282
-
283
- obj = CR()
284
- assert obj.say_something(3) == "B says hi 3 times"
285
- assert obj.unlucky_number() == 4444
286
- assert obj.lucky_number() == 889.25
287
- assert obj.say_everything() == "B says hi 1 times 4444"
288
-
289
- class CT(m.C_Tpl):
290
- pass
291
-
292
- obj = CT()
293
- assert obj.say_something(3) == "B says hi 3 times"
294
- assert obj.unlucky_number() == 4444
295
- assert obj.lucky_number() == 888.0
296
- assert obj.say_everything() == "B says hi 1 times 4444"
297
-
298
- class CCR(CR):
299
- def lucky_number(self):
300
- return CR.lucky_number(self) * 10
301
-
302
- obj = CCR()
303
- assert obj.say_something(3) == "B says hi 3 times"
304
- assert obj.unlucky_number() == 4444
305
- assert obj.lucky_number() == 8892.5
306
- assert obj.say_everything() == "B says hi 1 times 4444"
307
-
308
- class CCT(CT):
309
- def lucky_number(self):
310
- return CT.lucky_number(self) * 1000
311
-
312
- obj = CCT()
313
- assert obj.say_something(3) == "B says hi 3 times"
314
- assert obj.unlucky_number() == 4444
315
- assert obj.lucky_number() == 888000.0
316
- assert obj.say_everything() == "B says hi 1 times 4444"
317
-
318
- class DR(m.D_Repeat):
319
- def unlucky_number(self):
320
- return 123
321
-
322
- def lucky_number(self):
323
- return 42.0
324
-
325
- for obj in [m.D_Repeat(), m.D_Tpl()]:
326
- assert obj.say_something(3) == "B says hi 3 times"
327
- assert obj.unlucky_number() == 4444
328
- assert obj.lucky_number() == 888.0
329
- assert obj.say_everything() == "B says hi 1 times 4444"
330
-
331
- obj = DR()
332
- assert obj.say_something(3) == "B says hi 3 times"
333
- assert obj.unlucky_number() == 123
334
- assert obj.lucky_number() == 42.0
335
- assert obj.say_everything() == "B says hi 1 times 123"
336
-
337
- class DT(m.D_Tpl):
338
- def say_something(self, times):
339
- return "DT says:" + (' quack' * times)
340
-
341
- def unlucky_number(self):
342
- return 1234
343
-
344
- def lucky_number(self):
345
- return -4.25
346
-
347
- obj = DT()
348
- assert obj.say_something(3) == "DT says: quack quack quack"
349
- assert obj.unlucky_number() == 1234
350
- assert obj.lucky_number() == -4.25
351
- assert obj.say_everything() == "DT says: quack 1234"
352
-
353
- class DT2(DT):
354
- def say_something(self, times):
355
- return "DT2: " + ('QUACK' * times)
356
-
357
- def unlucky_number(self):
358
- return -3
359
-
360
- class BT(m.B_Tpl):
361
- def say_something(self, times):
362
- return "BT" * times
363
-
364
- def unlucky_number(self):
365
- return -7
366
-
367
- def lucky_number(self):
368
- return -1.375
369
-
370
- obj = BT()
371
- assert obj.say_something(3) == "BTBTBT"
372
- assert obj.unlucky_number() == -7
373
- assert obj.lucky_number() == -1.375
374
- assert obj.say_everything() == "BT -7"
375
-
376
-
377
- def test_issue_1454():
378
- # Fix issue #1454 (crash when acquiring/releasing GIL on another thread in Python 2.7)
379
- m.test_gil()
380
- m.test_gil_from_thread()