parquet-converter commited on
Commit
88ad4d3
·
1 Parent(s): 8416c76

Update parquet files (step 58 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/create.py +0 -64
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/The Pirate Bay Fallout 4 Codex !FREE! Crack.md +0 -94
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/CorelDRAW X8 How to Unlock the Full Potential of this Graphics Design Software.md +0 -43
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kitserver Winning Eleven 8 Master.md +0 -28
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Electronic Devices and Circuits by Bogart PDF Free Download The Best Book for Electronics Enthusiasts.md +0 -106
  6. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FSX ORBX VECTOR 1.51 The Ultimate Vector Data for Flight Simulator.md +0 -149
  7. spaces/1gistliPinn/ChatGPT4/Examples/Counter-Strike 1.6 V40.1 NonSteam - DiGiTALZONE.rar.rar _HOT_.md +0 -6
  8. spaces/1gistliPinn/ChatGPT4/Examples/Flamingo 2 Rhino 5 Crack.md +0 -134
  9. spaces/A00001/bingothoo/src/components/settings.tsx +0 -157
  10. spaces/A00001/bingothoo/src/pages/api/image.ts +0 -40
  11. spaces/AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary/README.md +0 -11
  12. spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/tts/PortaSpeech.py +0 -85
  13. spaces/AISuperheroes/03GR-Chatbot-Memory/app.py +0 -137
  14. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py +0 -56
  15. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py +0 -46
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Radio.js +0 -82
  17. spaces/Akseluhr/whisper-sv-SE-auhr/app.py +0 -47
  18. spaces/Alican/pixera/data/single_dataset.py +0 -40
  19. spaces/AmirTrader/LinearRegression/app.py +0 -221
  20. spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/python/dqn/dqn.py +0 -245
  21. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/opt.py +0 -100
  22. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/sabl_retina_head.py +0 -621
  23. spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py +0 -2
  24. spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py +0 -7
  25. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/logger.py +0 -495
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/geometric.py +0 -728
  27. spaces/Apex-X/Tm/roop/metadata.py +0 -2
  28. spaces/Arnx/MusicGenXvAKN/tests/modules/test_conv.py +0 -203
  29. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/utils/zoom_out_utils.py +0 -47
  30. spaces/Ashrafb/codellama-34b/app.py +0 -260
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/langrussianmodel.py +0 -0
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/color_triplet.py +0 -38
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/after.py +0 -51
  34. spaces/Audio-AGI/AudioSep/models/CLAP/training/main.py +0 -596
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/test_keypoints.py +0 -19
  36. spaces/AyushP/PolicyChatBot/README.md +0 -12
  37. spaces/Banbri/zcvzcv/src/app/engine/censorship.ts +0 -184
  38. spaces/Bart92/RVC_HF/demucs/__init__.py +0 -7
  39. spaces/Benson/text-generation/Examples/9anime Mod Apk Download.md +0 -74
  40. spaces/Benson/text-generation/Examples/Apk M.facebook.com.md +0 -94
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/__init__.py +0 -467
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/macromanprober.py +0 -162
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/uts46data.py +0 -0
  44. spaces/BilalSardar/AutoML-Model-Training/app.py +0 -45
  45. spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_system_tag.h +0 -34
  46. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/customization.h +0 -128
  47. spaces/CVPR/VizWiz-CLIP-VQA/model/vqa_model.py +0 -123
  48. spaces/CVPR/drawings-to-human/frontend/src/app.css +0 -10
  49. spaces/CVPR/lama-example/fetch_data/places_standard_test_val_sample.sh +0 -22
  50. spaces/Charliee/BingAi/README.md +0 -12
spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/create.py DELETED
@@ -1,64 +0,0 @@
1
- from json import dumps
2
- # from mail import MailClient
3
- from re import findall
4
-
5
- from requests import post, get
6
-
7
- html = get('https://developermail.com/mail/')
8
- print(html.cookies.get('mailboxId'))
9
- email = findall(r'mailto:(.*)">', html.text)[0]
10
-
11
- headers = {
12
- 'apikey': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InVzanNtdWZ1emRjcnJjZXVobnlqIiwicm9sZSI6ImFub24iLCJpYXQiOjE2NzgyODYyMzYsImV4cCI6MTk5Mzg2MjIzNn0.2MQ9Lkh-gPqQwV08inIgqozfbYm5jdYWtf-rn-wfQ7U',
13
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
14
- 'x-client-info': '@supabase/[email protected]',
15
- }
16
-
17
- json_data = {
18
- 'email': email,
19
- 'password': 'T4xyt4Yn6WWQ4NC',
20
- 'data': {},
21
- 'gotrue_meta_security': {},
22
- }
23
-
24
- response = post('https://usjsmufuzdcrrceuhnyj.supabase.co/auth/v1/signup', headers=headers, json=json_data)
25
- print(response.json())
26
-
27
- # email_link = None
28
- # while not email_link:
29
- # sleep(1)
30
-
31
- # mails = mailbox.getmails()
32
- # print(mails)
33
-
34
-
35
- quit()
36
-
37
- url = input("Enter the url: ")
38
- response = get(url, allow_redirects=False)
39
-
40
- # https://openprompt.co/#access_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8&expires_in=604800&refresh_token=_Zp8uXIA2InTDKYgo8TCqA&token_type=bearer&type=signup
41
-
42
- redirect = response.headers.get('location')
43
- access_token = redirect.split('&')[0].split('=')[1]
44
- refresh_token = redirect.split('&')[2].split('=')[1]
45
-
46
- supabase_auth_token = dumps([access_token, refresh_token, None, None, None], separators=(',', ':'))
47
- print(supabase_auth_token)
48
-
49
- cookies = {
50
- 'supabase-auth-token': supabase_auth_token
51
- }
52
-
53
- json_data = {
54
- 'messages': [
55
- {
56
- 'role': 'user',
57
- 'content': 'how do I reverse a string in python?'
58
- }
59
- ]
60
- }
61
-
62
- response = post('https://openprompt.co/api/chat2', cookies=cookies, json=json_data, stream=True)
63
- for chunk in response.iter_content(chunk_size=1024):
64
- print(chunk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/The Pirate Bay Fallout 4 Codex !FREE! Crack.md DELETED
@@ -1,94 +0,0 @@
1
- ## The Pirate Bay Fallout 4 Codex Crack
2
-
3
-
4
-
5
-
6
-
7
- ![The Pirate Bay Fallout 4 Codex !FREE! Crack](https://eprosengenharia.com/wp-content/uploads/2016/05/incendio.jpg?w\u003d640)
8
-
9
-
10
-
11
-
12
-
13
- **Download File ✑ ✑ ✑ [https://www.google.com/url?q=https%3A%2F%2Fcinurl.com%2F2txKLp&sa=D&sntz=1&usg=AOvVaw0FnvQrPp9dpud-nHZVDtkS](https://www.google.com/url?q=https%3A%2F%2Fcinurl.com%2F2txKLp&sa=D&sntz=1&usg=AOvVaw0FnvQrPp9dpud-nHZVDtkS)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # The Pirate Bay Fallout 4 Codex Crack: How to Download and Install
28
-
29
-
30
-
31
- Fallout 4 is one of the most popular RPG games of all time, set in a post-apocalyptic world where you have to survive and rebuild civilization. However, the game is not cheap and requires a Steam account and a valid activation code to play. If you want to play Fallout 4 for free, you might be interested in downloading the Codex crack from The Pirate Bay, one of the most resilient bittorrent sites on the internet.
32
-
33
-
34
-
35
- The Codex crack is a modified version of the game that bypasses the Steam protection and allows you to play without a license key. It also includes all the updates and DLCs that have been released for Fallout 4, as well as some optional mods that enhance the gameplay. However, downloading and installing the Codex crack is not as simple as clicking a button. You need to follow some steps and precautions to make sure everything works properly.
36
-
37
-
38
-
39
- In this article, we will show you how to download and install the Codex crack for Fallout 4 from The Pirate Bay, as well as some tips and tricks to avoid any problems or errors. Please note that this article is for educational purposes only and we do not condone piracy or illegal downloading of any kind. You should always support the developers and publishers of the games you enjoy by buying them legally.
40
-
41
-
42
-
43
- ## Step 1: Download a Torrent Client
44
-
45
-
46
-
47
- The first thing you need to do is to download a torrent client, which is a software that allows you to download files from bittorrent networks. There are many torrent clients available online, but some of the most popular ones are uTorrent, BitTorrent, qBittorrent, and Vuze. You can choose any of them, but make sure you download them from their official websites and not from third-party sources that might contain malware or viruses.
48
-
49
-
50
-
51
- Once you have downloaded and installed your torrent client of choice, you need to configure it properly to ensure optimal performance and security. Some of the settings you should check are:
52
-
53
-
54
-
55
- - Limit your upload and download speed according to your internet connection.
56
-
57
- - Enable encryption to protect your traffic from being monitored or throttled by your ISP.
58
-
59
- - Use a VPN or a proxy to hide your IP address and location from other peers and trackers.
60
-
61
- - Disable DHT, PEX, and LPD to avoid connecting to unwanted or malicious peers.
62
-
63
- - Choose a port that is not commonly used by other applications or blocked by firewalls.
64
-
65
-
66
-
67
- ## Step 2: Download the Codex Crack from The Pirate Bay
68
-
69
-
70
-
71
- The next step is to download the Codex crack for Fallout 4 from The Pirate Bay. To do this, you need to visit the official website of The Pirate Bay, which might be blocked or censored in some countries. If that is the case, you can use a proxy site or a mirror site that has a different domain name but accesses the same content as The Pirate Bay.
72
-
73
-
74
-
75
- Once you are on The Pirate Bay website, you need to search for "Fallout 4 Codex" in the search bar. You will see a list of results that match your query, but not all of them are reliable or safe. You need to look for some indicators that can help you identify the best torrent to download. Some of these indicators are:
76
-
77
-
78
-
79
- - The number of seeders and leechers: Seeders are users who have the complete file and are sharing it with others. Leechers are users who are downloading the file but have not completed it yet. The more seeders and leechers a torrent has, the faster and more stable the download will be.
80
-
81
- - The comments and ratings: Comments and ratings are feedback from other users who have downloaded the torrent before. They can tell you if the torrent is working properly, if it has any errors or viruses, if it has good quality or not, etc. You should always read the comments and ratings before downloading any torrent.
82
-
83
- - The uploader's name and reputation: The uploader's name and reputation are indicators of how trustworthy and reliable they are. You should look for uploaders who have a green or purple skull icon next to their name, which means they are VIP or trusted users who have uploaded many torrents without any issues.
84
-
85
-
86
-
87
- Based on these indicators, we recommend downloading the torrent with index "1" from The
88
-
89
- 1b8d091108
90
-
91
-
92
-
93
-
94
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/CorelDRAW X8 How to Unlock the Full Potential of this Graphics Design Software.md DELETED
@@ -1,43 +0,0 @@
1
-
2
- <h1>How to Download CorelDRAW X8 Full Version with Serial Number</h1>
3
- <p>If you are looking for a powerful and versatile graphics design software, you might want to try CorelDRAW X8. This software can help you create vector illustrations, layouts, photo editing, typography, and more. However, to use this software, you need to have a valid serial number that can activate the full version. In this article, we will show you how to download CorelDRAW X8 full version with serial number for free.</p>
4
- <h2>What is CorelDRAW X8?</h2>
5
- <p>CorelDRAW X8 is the 18th version of the CorelDRAW Graphics Suite, which was released in 2016. It is a software package that includes several applications for different design tasks, such as:</p>
6
- <h2>free download coreldraw x8 full version with serial number</h2><br /><p><b><b>Download File</b> &rarr; <a href="https://byltly.com/2uKyzj">https://byltly.com/2uKyzj</a></b></p><br /><br />
7
- <ul>
8
- <li>CorelDRAW: a vector-based drawing and illustration program</li>
9
- <li>Corel PHOTO-PAINT: a raster-based image editing program</li>
10
- <li>Corel Font Manager: a font management tool</li>
11
- <li>Corel PowerTRACE: a bitmap-to-vector tracing tool</li>
12
- <li>Corel CONNECT: a content browser and search tool</li>
13
- <li>Corel CAPTURE: a screen capture tool</li>
14
- <li>Corel Website Creator: a web design tool</li>
15
- </ul>
16
- <p>Some of the features of CorelDRAW X8 are:</p>
17
- <ul>
18
- <li>Enhanced knife tool that can split vector objects, text, and bitmaps</li>
19
- <li>New healing clone tool that can fix imperfections in photo subjects</li>
20
- <li>New Gaussian blur lens and improved drop shadows</li>
21
- <li>New tools for creating adjacent shapes with similar contours</li>
22
- <li>New color management system and support for Windows 10</li>
23
- <li>Faster performance and system handling</li>
24
- </ul>
25
- <h2>How to Download CorelDRAW X8 Full Version with Serial Number?</h2>
26
- <p>To download CorelDRAW X8 full version with serial number, you need to follow these steps:</p>
27
- <ol>
28
- <li>Go to the official website of CorelDRAW and click on the "Free Download" button.</li>
29
- <li>Select your operating system (Windows or Mac) and your language.</li>
30
- <li>Enter your email address and click on "Download Now". You will receive an email with a download link and instructions.</li>
31
- <li>Click on the download link and save the file on your computer.</li>
32
- <li>Run the installer and follow the on-screen instructions. You will need to enter the serial number that was sent to your email.</li>
33
- <li>After the installation is complete, you can launch CorelDRAW X8 and enjoy its features.</li>
34
- </ol>
35
-
36
- <p>Note: The free download is a trial version that will expire after 15 days. To continue using CorelDRAW X8, you will need to purchase a license or subscription from the official website. Alternatively, you can use a keygen to generate a serial number for free, but this is not recommended as it may be illegal or unsafe.</p>
37
-
38
- <h2>Conclusion</h2>
39
-
40
- <p>CorelDRAW X8 is a great graphics design software that can help you create stunning projects for personal or professional use. However, to use it fully, you need to have a valid serial number that can activate it. You can either download a free trial version from the official website or use a keygen to generate a serial number for free. However, we advise you to purchase a license or subscription from the official website to support the developers and get access to updates and support.</p>
41
- <p></p> ddb901b051<br />
42
- <br />
43
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kitserver Winning Eleven 8 Master.md DELETED
@@ -1,28 +0,0 @@
1
-
2
- <h1>How to Download and Install Kitserver for Winning Eleven 8 Master</h1>
3
- <p>Kitserver is a popular add-on program for Winning Eleven 8 Master, also known as Pro Evolution Soccer 4, that allows you to customize various aspects of the game, such as kits, balls, stadiums, faces, and more. In this article, we will show you how to download and install Kitserver for Winning Eleven 8 Master on your PC.</p>
4
- <h2>Step 1: Download Kitserver</h2>
5
- <p>You can download Kitserver from the official GitHub repository: <a href="https://github.com/kitserver/kitserver4">https://github.com/kitserver/kitserver4</a>. Click on the green "Code" button and choose "Download ZIP". Save the file to your preferred location on your computer.</p>
6
- <h2>Download Kitserver Winning Eleven 8 Master</h2><br /><p><b><b>Download</b> &#10004;&#10004;&#10004; <a href="https://byltly.com/2uKyDd">https://byltly.com/2uKyDd</a></b></p><br /><br />
7
- <h2>Step 2: Extract Kitserver</h2>
8
- <p>After downloading Kitserver, you need to extract the ZIP file using a program like WinRAR or 7-Zip. You should see a folder called "kitserver4-master". Open it and you will find another folder called "kitserver". This is the folder that you need to copy to your Winning Eleven 8 Master installation directory.</p>
9
- <h2>Step 3: Install Kitserver</h2>
10
- <p>Go to your Winning Eleven 8 Master installation directory, which is usually located at C:\Program Files\KONAMI\Winning Eleven 8I. Paste the "kitserver" folder that you copied in the previous step. Your directory structure should look like this:</p>
11
- <ul>
12
- <li>dat/</li>
13
- <li>kitserver/</li>
14
- <li>PES4.exe</li>
15
- <li>readme.htm</li>
16
- <li>settings.dat</li>
17
- <li>settings.exe</li>
18
- </ul>
19
- <p>Now, go to the "kitserver" folder and run setup.exe. You should see your "PES4.exe" in the dropdown list. If Kitserver hasn't been already installed for this executable, the "Install" button should become enabled. Press "Install" button. The installation should happen pretty quickly - in a matter of seconds. Once it is complete, the popup window will display "SUCCESS!" message, or report an error if one occurred. If an error occurs, check if your PES4.exe is not currently in use (i.e. exit the game, if it is currently running). Also, check that PES4.exe is not marked as read-only file.</p>
20
- <h2>Step 4: Use Kitserver</h2>
21
- <p>Congratulations! You have successfully installed Kitserver for Winning Eleven 8 Master. Now you can use it to enhance your game experience. To use Kitserver, you need to place additional folders in the "kitserver" folder. Each folder corresponds to a certain team, ball, stadium, face, or other mod. You can find many mods online from various sources, such as <a href="https://www.gamefront.com/games/winning-eleven-8">https://www.gamefront.com/games/winning-eleven-8</a>. Make sure to follow the instructions provided by each mod creator on how to install and use their mods.</p>
22
- <p>To activate Kitserver in-game, you need to use configurable hot-keys that are defined in the "config.txt" file in the "kitserver" folder. For example, by default, you can press F1 and F2 keys to cycle through different kits for Home and Away teams (for players and goalkeepers), and F3 key to cycle through different balls. You can also press F12 key to see which mods are currently loaded by Kitserver.</p>
23
- <p></p>
24
- <p>For more information on how to use Kitserver, you can read the manual here: <a href="http://kitserver.mapote.com/ks7/manual.html">http://kitserver.mapote.com/ks7/manual.html</a>.</p>
25
- <h2>Step 5: Uninstall Kitserver</h2>
26
- <p>If you want to uninstall Kitserver from your Winning Eleven 8 Master game, you can do so by launching the setup.exe again, selecting PES4.exe, and pressing "Remove" button. After that, you can safely delete the whole "kitserver" folder from your game directory.</p> 7b8c122e87<br />
27
- <br />
28
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Electronic Devices and Circuits by Bogart PDF Free Download The Best Book for Electronics Enthusiasts.md DELETED
@@ -1,106 +0,0 @@
1
- <br />
2
- <h1>Electronic Devices and Circuits by Bogart PDF Free Download</h1>
3
- <p>If you are looking for a comprehensive and up-to-date textbook on electronic devices and circuits, you might want to check out <strong>Electronic Devices and Circuits</strong> by Theodore F. Bogart. This book covers a wide range of topics in modern industrial applications and emerging technologies, using a structured, systems approach. In this article, we will give you an overview of the book, its features, and how you can download it for free.</p>
4
- <h2>Introduction</h2>
5
- <h3>What are electronic devices and circuits?</h3>
6
- <p>Electronic devices are components that can manipulate electric signals or currents, such as resistors, capacitors, diodes, transistors, LEDs, etc. Electronic circuits are combinations of electronic devices that perform specific functions, such as amplifiers, oscillators, filters, converters, etc. Electronic devices and circuits are essential for many fields of engineering and science, such as communications, computing, robotics, biomedical, aerospace, etc.</p>
7
- <h2>electronic devices and circuits by bogart pdf free download</h2><br /><p><b><b>Download Zip</b> &mdash; <a href="https://byltly.com/2uKyuz">https://byltly.com/2uKyuz</a></b></p><br /><br />
8
- <h3>Why study electronic devices and circuits?</h3>
9
- <p>Studying electronic devices and circuits can help you understand the principles and applications of electronics, which is a rapidly evolving and expanding field. You can learn how to design, analyze, and troubleshoot electronic systems using various tools and techniques. You can also explore the latest developments and innovations in electronic devices and circuits, such as nanoelectronics, optoelectronics, microelectromechanical systems (MEMS), etc.</p>
10
- <h3>Who is Theodore F. Bogart?</h3>
11
- <p>Theodore F. Bogart is a professor emeritus of electrical engineering at Pennsylvania State University. He has over 40 years of teaching experience in electronics and has authored or co-authored several textbooks on the subject. He has also received several awards for his excellence in teaching and research. He is the main author of <strong>Electronic Devices and Circuits</strong>, which was first published in 1993 and has been revised several times since then.</p>
12
- <h2>Features of the book</h2>
13
- <h3>Structured, systems approach</h3>
14
- <p>The book uses a structured, systems approach to present electronic devices and circuits in a logical and coherent manner. It starts with the basic concepts of electronics, such as voltage, current, power, resistance, etc., and then introduces the various types of electronic devices and their characteristics. It then shows how these devices can be combined into circuits to perform different functions. It also explains how these circuits can be integrated into larger systems to achieve specific goals.</p>
15
- <h3>Modern, thorough treatment of topics</h3>
16
- <p>The book covers a wide range of topics in electronic devices and circuits that are relevant for modern industrial applications and emerging technologies. It includes topics such as semiconductor physics, diode models and applications, bipolar junction transistor (BJT) models and applications, field-effect transistor (FET) models and applications, digital logic circuits, analog-to-digital converters (ADCs), digital-to-analog converters (DACs), etc. It also updates the content with the latest information and examples from real-world situations.</p>
17
- <h3>Integrated circuit theory and design</h3>
18
- <p>The book provides extensive coverage of integrated circuit theory and design, which is an important aspect of electronic devices and circuits. It explains how electronic devices can be fabricated on a single chip using various processes and techniques. It also discusses how integrated circuits can be classified into different types based on their functions and complexity levels. It also covers analog and digital integrated circuit design principles and methods.</p>
19
- <h3>Operational amplifier theory and applications</h3>
20
- <p>The book devotes several chapters to operational amplifier theory and applications, which is another important aspect of electronic devices and circuits. It describes what an operational amplifier is, how it works, and what its characteristics are. It also shows how operational amplifiers can be used to implement various types of linear and nonlinear circuits, such as amplifiers, filters, comparators, oscillators, etc. It also illustrates how operational amplifiers can be integrated into larger systems to perform complex tasks.</p>
21
- <p>bogart electronic devices and circuits pdf download<br />
22
- free pdf of electronic devices and circuits by bogart<br />
23
- electronic devices and circuits by bogart ebook download<br />
24
- download electronic devices and circuits by bogart pdf free<br />
25
- electronic devices and circuits by bogart 6th edition pdf free download<br />
26
- bogart electronic devices and circuits book pdf free download<br />
27
- electronic devices and circuits by bogart solution manual pdf free download<br />
28
- electronic devices and circuits by bogart 5th edition pdf free download<br />
29
- how to download electronic devices and circuits by bogart pdf for free<br />
30
- electronic devices and circuits by bogart online pdf free download<br />
31
- electronic devices and circuits by bogart 4th edition pdf free download<br />
32
- electronic devices and circuits by bogart pdf free download google drive<br />
33
- electronic devices and circuits by bogart 7th edition pdf free download<br />
34
- electronic devices and circuits by bogart pdf free download quora<br />
35
- electronic devices and circuits by bogart 3rd edition pdf free download<br />
36
- best site to download electronic devices and circuits by bogart pdf free<br />
37
- electronic devices and circuits by bogart lecture notes pdf free download<br />
38
- electronic devices and circuits by bogart 2nd edition pdf free download<br />
39
- electronic devices and circuits by bogart 8th edition pdf free download<br />
40
- electronic devices and circuits by bogart pdf free download reddit<br />
41
- where can I find electronic devices and circuits by bogart pdf free download<br />
42
- electronic devices and circuits by bogart lab manual pdf free download<br />
43
- electronic devices and circuits by bogart 9th edition pdf free download<br />
44
- electronic devices and circuits by bogart mcq pdf free download<br />
45
- is it legal to download electronic devices and circuits by bogart pdf for free<br />
46
- electronic devices and circuits by bogart ppt slides pdf free download<br />
47
- electronic devices and circuits by bogart objective questions pdf free download<br />
48
- electronic devices and circuits by bogart previous year question papers pdf free download<br />
49
- advantages of downloading electronic devices and circuits by bogart pdf for free<br />
50
- reviews of electronic devices and circuits by bogart pdf free download<br />
51
- alternatives to electronic devices and circuits by bogart pdf free download<br />
52
- tips for downloading electronic devices and circuits by bogart pdf for free<br />
53
- electronic devices and circuits by bogart syllabus pdf free download<br />
54
- comparison of electronic devices and circuits by bogart with other books pdf free download<br />
55
- summary of electronic devices and circuits by bogart pdf free download<br />
56
- features of electronic devices and circuits by bogart pdf free download<br />
57
- benefits of reading electronic devices and circuits by bogart pdf for free<br />
58
- challenges of downloading electronic devices and circuits by bogart pdf for free<br />
59
- examples of projects using electronic devices and circuits by bogart pdf for free<br />
60
- testimonials of students who downloaded electronic devices and circuits by bogart pdf for free<br />
61
- how to cite electronic devices and circuits by bogart pdf in your paper or report for free<br />
62
- how to share electronic devices and circuits by bogart pdf with your friends or classmates for free<br />
63
- how to print or convert electronic devices and circuits by bogart pdf to other formats for free<br />
64
- how to access or view electronic devices and circuits by bogart pdf on different devices for free<br />
65
- how to edit or annotate electronic devices and circuits by bogart pdf for your study or research for free<br />
66
- how to learn or teach from electronic devices and circuits by bogart pdf for free<br />
67
- how to use or apply the concepts from electronic devices and circuits by bogart in your projects or assignments for free</p>
68
- <h3>Specialized electronic devices and circuits</h3>
69
- <p>The book also covers some specialized electronic devices and circuits that are useful for specific purposes or emerging fields. It includes topics such as switching regulators, optoelectronics, MEMS, nanoelectronics, etc. It explains what these devices and circuits are, how they work, and what their advantages and disadvantages are. It also gives examples of their applications and challenges.</p>
70
- <h2>How to download the book for free?</h2>
71
- <h3>Internet Archive</h3>
72
- <p>One way to download the book for free is to use the Internet Archive, which is a non-profit digital library that offers free access to millions of books, movies, music, software, etc. You can find the PDF version of <strong>Electronic Devices and Circuits</strong> by Bogart on this website. You can either read it online or download it to your device. You can also borrow it for 14 days if you create an account on the website.</p>
73
- <h3>Google Books</h3>
74
- <p>Another way to download the book for free is to use Google Books, which is a service that allows you to search and preview millions of books from various publishers and libraries. You can find a preview version of <strong>Electronic Devices and Circuits</strong> by Bogart on this website. You can read some pages of the book online or download them as PDF files. However, you may not be able to access the full content of the book unless you buy it or find it in a library.</p>
75
- <h3>Other sources</h3>
76
- <p>Besides these two websites, you may also find other sources that offer free downloads of <strong>Electronic Devices and Circuits</strong> by Bogart. However, you should be careful when using these sources as they may not be legal or safe. Some of them may contain viruses or malware that can harm your device or steal your personal information. Some of them may also violate the copyright laws or terms of service of the original publishers or authors. Therefore, you should always check the credibility and reliability of these sources before downloading anything from them.</p>
77
- <h2>Conclusion</h2>
78
- <p>In conclusion, <strong>Electronic Devices and Circuits</strong> by Bogart is a comprehensive and up-to-date textbook on electronic devices and circuits that covers a wide range of topics in modern industrial applications and emerging technologies. It uses a structured, systems approach to present electronic devices and circuits in a logical and coherent manner. It also provides extensive coverage of integrated circuit theory and design, operational amplifier theory and applications, and specialized electronic devices and circuits. If you want to download this book for free, you can use the Internet Archive or Google Books websites, or look for other sources that may offer free downloads. However, you should always be careful when using these sources as they may not be legal or safe.</p>
79
- <h2>FAQs</h2>
80
- <ol>
81
- <li><strong>What is the difference between electronic devices and electrical devices?</strong></li>
82
- <p>An electronic device is a component that can manipulate electric signals or currents based on some logic or function. An electrical device is a component that can convert electric energy into other forms of energy or vice versa based on some physical principle.</p>
83
- <li><strong>What are some examples of electronic devices?</strong></li>
84
- <p>Some examples of electronic devices are resistors, capacitors, diodes, transistors, LEDs, LCDs, sensors, microcontrollers, etc.</p>
85
- <li><strong>What are some examples of electronic circuits?</strong></li>
86
- <p>Some examples of electronic circuits are amplifiers, oscillators, filters, converters, counters, adders, multiplexers, etc.</p>
87
- <li><strong>What are some benefits of studying electronic devices and circuits?</strong></li>
88
- <p>Some benefits of studying electronic devices and circuits are:</p>
89
- <ul>
90
- ```html <li>You can understand the principles and applications of electronics, which is a rapidly evolving and expanding field.</li>
91
- <li>You can learn how to design, analyze, and troubleshoot electronic systems using various tools and techniques.</li>
92
- <li>You can explore the latest developments and innovations in electronic devices and circuits, such as nanoelectronics, optoelectronics, microelectromechanical systems (MEMS), etc.</li>
93
- <li>You can enhance your skills and knowledge in engineering and science, which can help you in your academic or professional career.</li>
94
- </ul>
95
- <li><strong>What are some challenges of studying electronic devices and circuits?</strong></li>
96
- <p>Some challenges of studying electronic devices and circuits are:</p>
97
- <ul>
98
- <li>You need to have a good background in mathematics, physics, and chemistry, which are the foundations of electronics.</li>
99
- <li>You need to be familiar with various symbols, notations, conventions, and units that are used in electronics.</li>
100
- <li>You need to be able to apply abstract concepts and theories to practical problems and situations.</li>
101
- <li>You need to keep up with the fast-paced changes and advancements in electronic devices and circuits.</li>
102
- </ul>
103
- </ol>
104
- </p> 0a6ba089eb<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FSX ORBX VECTOR 1.51 The Ultimate Vector Data for Flight Simulator.md DELETED
@@ -1,149 +0,0 @@
1
-
2
- <h1>FSX ORBX VECTOR 1.51 Download for Computer</h1>
3
- <p>If you are a fan of flight simulation games, you might have heard of FSX ORBX VECTOR 1.51. This is a product that enhances the realism and accuracy of your virtual world by adding vector data to your scenery. In this article, we will explain what FSX ORBX VECTOR 1.51 is, how to download and install it on your computer, why you should use it, and how to get the most out of it.</p>
4
- <h2>FSX ORBX VECTOR 1.51 download for computer</h2><br /><p><b><b>DOWNLOAD</b> &#128279; <a href="https://byltly.com/2uKy2Y">https://byltly.com/2uKy2Y</a></b></p><br /><br />
5
- <h2>What is FSX ORBX VECTOR 1.51?</h2>
6
- <p>FSX ORBX VECTOR 1.51 is a product developed by ORBX, a company that specializes in creating high-quality scenery addons for flight simulation games. FSX ORBX VECTOR 1.51 is designed for Microsoft Flight Simulator X (FSX) and Lockheed Martin Prepar3D (P3D), two of the most popular flight simulation platforms.</p>
7
- <p>FSX ORBX VECTOR 1.51 adds vector data to your scenery, which means it improves the accuracy and detail of features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more. It also corrects some errors and anomalies in the default scenery, such as misplaced or missing features, unrealistic shapes or sizes, or incorrect elevations.</p>
8
- <p>FSX ORBX VECTOR 1.51 covers the entire world with over 78 million square kilometers of vector data. It also includes a vector configuration tool that allows you to customize the settings and options of the product according to your preferences and system performance.</p>
9
- <h2>How to download and install FSX ORBX VECTOR 1.51 on your computer</h2>
10
- <p>If you are interested in using FSX ORBX VECTOR 1.51 on your computer, you will need to follow these steps:</p>
11
- <h3>Requirements and compatibility</h3>
12
- <p>Before you download and install FSX ORBX VECTOR 1.51, you need to make sure that your computer meets the minimum requirements and that you have a compatible flight simulation platform.</p>
13
- <p>The minimum requirements for FSX ORBX VECTOR 1.51 are:</p>
14
- <ul>
15
- <li>Windows 7 or higher (64-bit recommended)</li>
16
- <li>Intel Core i5 or equivalent processor</li>
17
- <li>8 GB of RAM</li>
18
- <li>NVIDIA GeForce GTX 660 or equivalent graphics card</li>
19
- <li>30 GB of free hard disk space</li>
20
- <li>Internet connection for downloading and activating the product</li>
21
- </ul>
22
- <p>The compatible flight simulation platforms are:</p>
23
- <p>How to install FSX ORBX VECTOR 1.51 on P3D<br />
24
- FSX ORBX VECTOR 1.51 review and comparison<br />
25
- FSX ORBX VECTOR 1.51 download link and instructions<br />
26
- FSX ORBX VECTOR 1.51 vs FTX Global Base<br />
27
- FSX ORBX VECTOR 1.51 new features and improvements<br />
28
- FSX ORBX VECTOR 1.51 system requirements and compatibility<br />
29
- FSX ORBX VECTOR 1.51 best settings and tips<br />
30
- FSX ORBX VECTOR 1.51 update and patch notes<br />
31
- FSX ORBX VECTOR 1.51 free trial and demo<br />
32
- FSX ORBX VECTOR 1.51 discount and coupon code<br />
33
- FSX ORBX VECTOR 1.51 support and troubleshooting<br />
34
- FSX ORBX VECTOR 1.51 scenery and airport list<br />
35
- FSX ORBX VECTOR 1.51 screenshots and videos<br />
36
- FSX ORBX VECTOR 1.51 performance and FPS<br />
37
- FSX ORBX VECTOR 1.51 bugs and issues<br />
38
- FSX ORBX VECTOR 1.51 alternatives and competitors<br />
39
- FSX ORBX VECTOR 1.51 addons and mods<br />
40
- FSX ORBX VECTOR 1.51 forum and community<br />
41
- FSX ORBX VECTOR 1.51 license and activation<br />
42
- FSX ORBX VECTOR 1.51 refund and cancellation policy<br />
43
- How to uninstall FSX ORBX VECTOR 1.51 from your computer<br />
44
- How to backup and restore FSX ORBX VECTOR 1.51 data<br />
45
- How to upgrade from previous versions of FSX ORBX VECTOR<br />
46
- How to customize and tweak FSX ORBX VECTOR 1.51 settings<br />
47
- How to fix common errors and problems with FSX ORBX VECTOR 1.51<br />
48
- How to optimize your PC for running FSX ORBX VECTOR 1.51 smoothly<br />
49
- How to use FSX ORBX VECTOR 1.51 with other flight simulators<br />
50
- How to get the most out of FSX ORBX VECTOR 1.51 features<br />
51
- How to fly with realistic weather and traffic using FSX ORBX VECTOR 1.51<br />
52
- How to create your own scenery and airports with FSX ORBX VECTOR 1.51 tools<br />
53
- What are the benefits of using FSX ORBX VECTOR 1.51 for your flight simulation<br />
54
- What are the drawbacks and limitations of using FSX ORBX VECTOR 1.51 for your flight simulation<br />
55
- What are the differences between FSX ORBX VECTOR 1.51 and other vector products for flight simulators<br />
56
- What are the best sources and resources for learning more about FSX ORBX VECTOR 1.51<br />
57
- What are the best practices and recommendations for using FSX ORBX VECTOR 1.51 effectively<br />
58
- What are the latest news and updates on FSX ORBX VECTOR 1.51 development and release<br />
59
- What are the best places and websites to buy or download FSX ORBX VECTOR 1.51 legally and safely<br />
60
- What are the best ways and methods to test and evaluate FSX ORBX VECTOR 1.51 quality and performance<br />
61
- What are the best examples and showcases of using FSX ORBX VECTOR 1.51 for your flight simulation projects<br />
62
- What are the best reviews and ratings of FSX ORBX VECTOR 1.51 by experts and users</p>
63
- <ul>
64
- <li>Microsoft Flight Simulator X (SP2 or Acceleration)</li>
65
- <li>Microsoft Flight Simulator X: Steam Edition</li>
66
- <li>Lockheed Martin Prepar3D v1-v5</li>
67
- </ul>
68
- <h3>Steps to download and install</h3>
69
- <p>Once you have verified that your computer meets the requirements and that you have a compatible flight simulation platform, you can proceed to download and install FSX ORBX VECTOR 1.51.</p>
70
- <ol>
71
- <li>Go to the official website of ORBX at https://orbxdirect.com/ and create an account if you don't have one already.</li>
72
- <li>Browse the products section and find FSX ORBX VECTOR 1.51 under the Global Range category.</li>
73
- <li>Add the product to your cart and proceed to checkout.</li>
74
- <li>Complete the payment process using your preferred method.</li>
75
- <li>Download the product using the ORBX Central application, which will be automatically installed on your computer after purchase.</li>
76
- <li>Launch the ORBX Central application and select your flight simulation platform from the menu.</li>
77
- <li>Select FSX ORBX VECTOR 1.51 from the list of products and click on Install.</li>
78
- <li>Wait for the installation process to finish.</li>
79
- <li>Launch your flight simulation platform and enjoy FSX ORBX VECTOR 1.51.</li>
80
- </ol>
81
- <h3>Troubleshooting tips</h3>
82
- <p>If you encounter any issues or problems while downloading or installing FSX ORBX VECTOR 1.51, here are some tips that might help you:</p>
83
- <ul>
84
- <li>Make sure that your internet connection is stable and fast enough to download the product without interruptions.</li>
85
- <li>Make sure that you have enough free disk space on your computer to store the product files.</li>
86
- <li>Make sure that you have administrator rights on your computer to run the installation process.</li>
87
- <li>Make sure that your antivirus or firewall software does not block or interfere with the installation process.</li>
88
- <li>Make sure that you have updated your flight simulation platform to the latest version available.</li>
89
- <li>If you have any other scenery addons installed on your computer, make sure that they are compatible with FSX ORBX VECTOR 1.51 and that they are placed in the correct order in the scenery library.</li>
90
- <li>If you have any questions or need further assistance, contact the ORBX support team at https://orbxdirect.com/support.</li>
91
- </ul>
92
- <h2>Why should you use FSX ORBX VECTOR 1.51?</h2>
93
- <p>Now that you know what FSX ORBX VECTOR 1.51 is and how to download and install it on your computer, you might be wondering why you should use it. What are the benefits of using FSX ORBX VECTOR 1.51 for your flight simulation experience?</p>
94
- <p>Well, there are many reasons why FSX ORBX VECTOR 1.51 is a great product for flight simulation enthusiasts. Here are some of them:</p>
95
- <h3>The benefits of using FSX ORBX VECTOR 1.51 for your flight simulation experience</h3>
96
- <h4>Enhanced realism and accuracy</h4>
97
- <p>One of the main benefits of using FSX ORBX VECTOR 1.51 is that it enhances the realism and accuracy of your virtual world. By adding vector data to your scenery, it makes your environment look more natural and authentic. You will be able to see features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more in their correct locations, shapes, sizes, and elevations.</p>
98
- <p>This will make your flight simulation experience more immersive and enjoyable. You will be able to explore different regions and airports with more detail and variety. You will also be able to follow real-world navigation charts and procedures with more confidence and accuracy.</p>
99
- <h4>Improved performance and stability</h4>
100
- <p>Another benefit of using FSX ORBX VECTOR 1.51 is that it improves the performance and stability of your flight simulation platform. By using a smart compression technology, it reduces the size of the vector data files without compromising the quality. This means that it will not take up too much space on your hard disk or memory.</p>
101
- <p>FSX ORBX VECTOR 1.51 also optimizes the loading and rendering of the vector data according to your system performance and settings. This means that it will not cause any significant impact on your frame rates or loading times. You will be able to enjoy a smooth and stable flight simulation experience without any lag or stutter.</p>
102
- <h4>Customizable settings and options</h4>
103
- <p>A third benefit of using FSX ORBX VECTOR 1.51 is that it offers customizable settings and options for your convenience and preference. By using the vector configuration tool that comes with the product, you will be able to adjust various aspects of the vector data according to your needs and desires.</p>
104
- <p>For example, you will be able to enable or disable certain features such as roads, railways, bridges, power lines, golf courses, parks, and more. You will also be able to change the colors, widths, densities, and styles of these features to suit your taste. You will also be able to fine-tune the elevation correction settings to avoid any conflicts or errors with other scenery addons or mesh products.</p>
105
- <h2>How to get the most out of FSX ORBX VECTOR 1.51?</h2>
106
- <p>Finally, you might be wondering how to get the most out of FSX ORBX VECTOR 1.51. How can you optimize your FSX ORBX VECTOR 1.51 usage to enhance your flight simulation experience even further?</p>
107
- <p>Well, there are some tips and tricks that you can follow to make the best use of FSX ORBX VECTOR 1.51. Here are some of them:</p>
108
- <h3>Some tips and tricks to optimize your FSX ORBX VECTOR 1.51 usage</h3>
109
- <h4>Adjusting the vector configuration tool</h4>
110
- <p>As mentioned earlier, the vector configuration tool allows you to customize the settings and options of FSX ORBX VECTOR 1.51 according to your preferences and system performance. However, you should also be aware that some features might have more impact on your frame rates or loading times than others.</p>
111
- <p>For example, roads and railways might have more impact than coastlines or rivers because they have more segments and curves. Therefore, you might want to reduce the width or density of these features if you have a lower-end system or if you want to improve your performance.</p>
112
- <p>You should also experiment with different combinations of features and colors to find the best balance between realism and performance for your system and taste.</p>
113
- <h4>Using compatible scenery addons and mesh products</h4>
114
- <p>Another tip is to use compatible scenery addons and mesh products with FSX ORBX VECTOR 1.51 to enhance your virtual world even more. Scenery addons are products that add more detail and variety to specific regions or airports in your scenery. Mesh products are products that improve the elevation data of your terrain.</p>
115
- <p>By using compatible scenery addons and mesh products with FSX ORBX VECTOR 1.51, you will be able to enjoy a more realistic and diverse environment with more features and landmarks. However, you should also make sure that these products are placed in the correct order in your scenery library to avoid any conflicts or errors.</p>
116
- <p>The recommended order for placing these products in your scenery library is:</p>
117
- <ol>
118
- <li>Your custom airports or regions</li>
119
- <li>Your mesh products</li>
120
- <li>Your landclass products</li>
121
- <li>Your global base products</li>
122
- <li>Your global vector products (FSX ORBX VECTOR 1.51)</li>
123
- <li>Your default scenery</li>
124
- </ol>
125
- <h4>Exploring different regions and airports with FSX ORBX VECTOR 1.51</h4>
126
- <p>A final tip is to explore different regions and airports with FSX ORBX VECTOR 1.51 to enjoy its full potential. By using FSX ORBX VECTOR 1.51, you will be able to see more detail and variety in your virtual world than ever before.</p>
127
- <p>You will be able to discover new places and landmarks that you might have missed before. You will also be able to fly over different terrains and landscapes with more realism and accuracy.</p>
128
- <p>You can use online resources such as Google Maps or Wikipedia to find interesting regions or airports to visit with FSX ORBX VECTOR 1.51. You can also use online flight planners such as SimBrief or SkyVector to plan realistic routes and procedures with FSX ORBX VECTOR 1.51.</p>
129
- <h2>Conclusion</h2>
130
- <h2>Conclusion</h2>
131
- <p>In conclusion, FSX ORBX VECTOR 1.51 is a product that enhances the realism and accuracy of your virtual world by adding vector data to your scenery. It covers the entire world with over 78 million square kilometers of vector data that improves the detail and quality of features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more. It also corrects some errors and anomalies in the default scenery.</p>
132
- <p>FSX ORBX VECTOR 1.51 also offers many benefits for your flight simulation experience. It improves the performance and stability of your flight simulation platform by using a smart compression technology and optimizing the loading and rendering of the vector data. It also offers customizable settings and options for your convenience and preference by using a vector configuration tool.</p>
133
- <p>FSX ORBX VECTOR 1.51 is compatible with Microsoft Flight Simulator X (FSX) and Lockheed Martin Prepar3D (P3D), two of the most popular flight simulation platforms. It is easy to download and install on your computer by using the ORBX Central application. It also works well with other scenery addons and mesh products that are compatible with FSX ORBX VECTOR 1.51.</p>
134
- <p>FSX ORBX VECTOR 1.51 is a great product for flight simulation enthusiasts who want to enhance their virtual world and enjoy a more immersive and enjoyable flight simulation experience. You can get FSX ORBX VECTOR 1.51 from the official website of ORBX at https://orbxdirect.com/ for $69.95 USD.</p>
135
- <h2>FAQs</h2>
136
- <p>Here are some frequently asked questions about FSX ORBX VECTOR 1.51:</p>
137
- <h3>What is the difference between FSX ORBX VECTOR 1.51 and FSX ORBX Global Base?</h3>
138
- <p>FSX ORBX Global Base is another product by ORBX that improves the texture and color of your terrain by replacing the default landclass data with high-resolution photorealistic data. FSX ORBX VECTOR 1.51 complements FSX ORBX Global Base by adding vector data to your scenery that improves the accuracy and detail of features such as coastlines, rivers, lakes, roads, railways, bridges, power lines, golf courses, parks, and more.</p>
139
- <h3>Do I need FSX ORBX Global Base to use FSX ORBX VECTOR 1.51?</h3>
140
- <p>No, you do not need FSX ORBX Global Base to use FSX ORBX VECTOR 1.51. However, it is highly recommended that you use both products together to get the best results for your scenery.</p>
141
- <h3>Can I use FSX ORBX VECTOR 1.51 with other scenery addons or mesh products?</h3>
142
- <p>Yes, you can use FSX ORBX VECTOR 1.51 with other scenery addons or mesh products that are compatible with FSX ORBX VECTOR 1.51. However, you should make sure that these products are placed in the correct order in your scenery library to avoid any conflicts or errors.</p>
143
- <h3>How can I update FSX ORBX VECTOR 1.51 to the latest version?</h3>
144
- <p>You can update FSX ORBX VECTOR 1.51 to the latest version by using the ORBX Central application. Simply launch the application and select your flight simulation platform from the menu. Then select FSX ORBX VECTOR 1.51 from the list of products and click on Update.</p>
145
- <h3>How can I contact the ORBX support team if I have any questions or issues with FSX ORBX VECTOR 1.51?</h3>
146
- <p>You can contact the ORBX support team by visiting their website at https://orbxdirect.com/support. You can also join their online community at https://orbxsystems.com/forum/ where you can find helpful resources and interact with other users and developers.</p>
147
- </p> 0a6ba089eb<br />
148
- <br />
149
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Counter-Strike 1.6 V40.1 NonSteam - DiGiTALZONE.rar.rar _HOT_.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Counter-Strike 1.6 V40.1 NonSteam - DiGiTALZONE.rar.rar</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://imgfil.com/2uxWXr">https://imgfil.com/2uxWXr</a></b></p><br /><br />
2
-
3
- SnowBall War Mod CS 1.6 Counter Strike 1 6 Non Steam ... 2010 counter the and strike ZONE cs 1.6 v40 digitalzone free The exe Download v32 V40 Strike ... 2010).rar 6 download locations Counter Strike 1 6 Non Steam v48 Rev Emu Steam ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Flamingo 2 Rhino 5 Crack.md DELETED
@@ -1,134 +0,0 @@
1
- <h2>flamingo 2 rhino 5 crack</h2><br /><p><b><b>Download Zip</b> &#10004; <a href="https://imgfil.com/2uy07K">https://imgfil.com/2uy07K</a></b></p><br /><br />
2
- <br />
3
- mantle 7 manta 1
4
-
5
- mandarin 7 manta 1
6
-
7
- marigold 1 marigold 5
8
-
9
- meadow 1 meadow 5
10
-
11
- melon 6 melon 1
12
-
13
- meringue 1 meringue 2
14
-
15
- meteor 2 meteor 2
16
-
17
- minaret 2 minaret 2
18
-
19
- mongoose 4 mongoose 4
20
-
21
- monkey 1 monkey 4
22
-
23
- monkey 2 monkey 4
24
-
25
- moss 6 moss 1
26
-
27
- mousetrap 4 mousetrap 2
28
-
29
- mushroom 5 mushroom 2
30
-
31
- musk 1 musk 2
32
-
33
- mustache 3 mustache 3
34
-
35
- nautilus 7 nautilus 1
36
-
37
- neptune 7 neptune 1
38
-
39
- oar 3 oar 3
40
-
41
- octopus 7 octopus 1
42
-
43
- orchid 4 orchid 1
44
-
45
- orca 7 orca 1
46
-
47
- otter 3 otter 4
48
-
49
- owl 3 owl 4
50
-
51
- ox 1 ox 5
52
-
53
- palm 2 palm 6
54
-
55
- palm 6 palm 2
56
-
57
- parrot 2 parrot 6
58
-
59
- parrot 5 parrot 2
60
-
61
- peanut 7 peanut 1
62
-
63
- pear 1 pear 7
64
-
65
- pear 7 pear 1
66
-
67
- pear 3 pear 4
68
-
69
- pencil 2 pencil 2
70
-
71
- penguin 3 penguin 5
72
-
73
- penguin 4 penguin 5
74
-
75
- penguin 5 penguin 4
76
-
77
- pepper 3 pepper 3
78
-
79
- pig 7 pig 1
80
-
81
- pig 2 pig 3
82
-
83
- pinecone 3 pinecone 1
84
-
85
- plank 7 plank 1
86
-
87
- plank 6 plank 4
88
-
89
- plum 1 plum 7
90
-
91
- plum 2 plum 7
92
-
93
- pond 2 pond 2
94
-
95
- pond 6 pond 2
96
-
97
- pond 7 pond 1
98
-
99
- pond 5 pond 6
100
-
101
- pond 6 pond 5
102
-
103
- porcupine 3 porcupine 4
104
-
105
- potted-plant 5 potted-plant 2
106
-
107
- potted-plant 4 potted-plant 6
108
-
109
- potted-plant 7 potted-plant 3
110
-
111
- potted-plant 6 potted-plant 4
112
-
113
- potted-plant 7 potted-plant 6
114
-
115
- printer 2 printer 2
116
-
117
- rabbit 4 rabbit 5
118
-
119
- rabbit 2 rabbit 4
120
-
121
- rag 2 rag 3
122
-
123
- rag 4 rag 3
124
-
125
- rail 1 rail 6
126
-
127
- rail 2 rail 6
128
-
129
- railway 2 railway 6
130
-
131
- rainbow 2 4fefd39f24<br />
132
- <br />
133
- <br />
134
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/settings.tsx DELETED
@@ -1,157 +0,0 @@
1
- import { useEffect, useState } from 'react'
2
- import { useAtom } from 'jotai'
3
- import { Switch } from '@headlessui/react'
4
- import { toast } from 'react-hot-toast'
5
- import { hashAtom, voiceAtom } from '@/state'
6
- import {
7
- Dialog,
8
- DialogContent,
9
- DialogDescription,
10
- DialogFooter,
11
- DialogHeader,
12
- DialogTitle
13
- } from '@/components/ui/dialog'
14
- import { Button } from './ui/button'
15
- import { Input } from './ui/input'
16
- import { ChunkKeys, parseCookies, extraCurlFromCookie, encodeHeadersToCookie, getCookie, setCookie } from '@/lib/utils'
17
- import { ExternalLink } from './external-link'
18
- import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
19
-
20
-
21
- export function Settings() {
22
- const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
23
- const [loc, setLoc] = useAtom(hashAtom)
24
- const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys)))
25
- const [imageOnly, setImageOnly] = useState(getCookie('IMAGE_ONLY') !== '0')
26
- const [enableTTS, setEnableTTS] = useAtom(voiceAtom)
27
-
28
- useEffect(() => {
29
- if (isCopied) {
30
- toast.success('复制成功')
31
- }
32
- }, [isCopied])
33
-
34
- if (loc === 'settings') {
35
- return (
36
- <Dialog open onOpenChange={() => setLoc('')} modal>
37
- <DialogContent>
38
- <DialogHeader>
39
- <DialogTitle>设置你的用户信息</DialogTitle>
40
- <DialogDescription>
41
- 请使用 Edge 浏览器
42
- <ExternalLink
43
- href="https://www.bing.com/turing/captcha/challenge"
44
- >
45
- 打开并登录 Bing
46
- </ExternalLink>
47
- ,然后再打开
48
- <ExternalLink href="https://www.bing.com/turing/captcha/challenge">Challenge 接口</ExternalLink>
49
- 右键 》检查。打开开发者工具,在网络里面找到 Create 接口 》右键复制》复制为 cURL(bash),粘贴到此处,然后保存。
50
- <div className="h-2" />
51
- 图文示例:
52
- <ExternalLink href="https://github.com/weaigc/bingo#如何获取%20BING_HEADER">如何获取 BING_HEADER</ExternalLink>
53
- </DialogDescription>
54
- </DialogHeader>
55
- <div className="flex gap-4">
56
-
57
- </div>
58
- <Input
59
- value={curlValue}
60
- placeholder="在此填写用户信息,格式: curl 'https://www.bing.com/turing/captcha/challenge' ..."
61
- onChange={e => setCurlValue(e.target.value)}
62
- />
63
- <div className="flex gap-2">
64
- 身份信息仅用于画图(推荐)
65
- <Switch
66
- checked={imageOnly}
67
- className={`${imageOnly ? 'bg-blue-600' : 'bg-gray-200'} relative inline-flex h-6 w-11 items-center rounded-full`}
68
- onChange={(checked: boolean) => setImageOnly(checked)}
69
- >
70
- <span
71
- className={`${imageOnly ? 'translate-x-6' : 'translate-x-1'} inline-block h-4 w-4 transform rounded-full bg-white transition`}
72
- />
73
- </Switch>
74
- </div>
75
-
76
- <Button variant="ghost" className="bg-[#F5F5F5] hover:bg-[#F2F2F2]" onClick={() => copyToClipboard(btoa(curlValue))}>
77
- 转成 BING_HEADER 并复制
78
- </Button>
79
-
80
- <DialogFooter className="items-center">
81
- <Button
82
- variant="secondary"
83
- className="bg-[#c7f3ff] hover:bg-[#fdc7ff]"
84
- onClick={() => {
85
- let headerValue = curlValue
86
- if (headerValue) {
87
- try {
88
- headerValue = atob(headerValue)
89
- } catch (e) { }
90
- if (!/^\s*curl ['"]https:\/\/www\.bing\.com\/turing\/captcha\/challenge['"]/.test(headerValue)) {
91
- toast.error('格式不正确')
92
- return
93
- }
94
- const maxAge = 86400 * 30
95
- encodeHeadersToCookie(headerValue).forEach(cookie => document.cookie = `${cookie}; Max-Age=${maxAge}; Path=/; SameSite=None; Secure`)
96
- } else {
97
- [...ChunkKeys, 'BING_COOKIE', 'BING_UA', 'BING_IP'].forEach(key => setCookie(key, ''))
98
- }
99
- setCookie('IMAGE_ONLY', imageOnly ? '1' : '0')
100
-
101
- toast.success('保存成功')
102
- setLoc('')
103
- setTimeout(() => {
104
- location.href = './'
105
- }, 2000)
106
- }}
107
- >
108
- 保存
109
- </Button>
110
- </DialogFooter>
111
- </DialogContent>
112
- </Dialog>
113
- )
114
- } else if (loc === 'voice') {
115
- return (
116
- <Dialog open onOpenChange={() => setLoc('')} modal>
117
- <DialogContent>
118
- <DialogHeader>
119
- <DialogTitle>语音设置</DialogTitle>
120
- <DialogDescription>
121
- 目前仅支持 PC 端 Edge 及 Chrome 浏览器
122
- </DialogDescription>
123
- </DialogHeader>
124
-
125
- <div className="flex gap-2">
126
- 启用语音回答
127
- <Switch
128
- checked={enableTTS}
129
- className={`${enableTTS ? 'bg-blue-600' : 'bg-gray-200'} relative inline-flex h-6 w-11 items-center rounded-full`}
130
- onChange={(checked: boolean) => setEnableTTS(checked)}
131
- >
132
- <span
133
- className={`${enableTTS ? 'translate-x-6' : 'translate-x-1'} inline-block h-4 w-4 transform rounded-full bg-white transition`}
134
- />
135
- </Switch>
136
- </div>
137
-
138
- <DialogFooter className="items-center">
139
- <Button
140
- variant="secondary"
141
- onClick={() => {
142
- toast.success('保存成功')
143
- setLoc('')
144
- setTimeout(() => {
145
- location.href = './'
146
- }, 2000)
147
- }}
148
- >
149
- 保存
150
- </Button>
151
- </DialogFooter>
152
- </DialogContent>
153
- </Dialog>
154
- )
155
- }
156
- return null
157
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/pages/api/image.ts DELETED
@@ -1,40 +0,0 @@
1
- 'use server'
2
-
3
- import { NextApiRequest, NextApiResponse } from 'next'
4
- import { debug } from '@/lib/isomorphic'
5
- import { createHeaders } from '@/lib/utils'
6
- import { createImage } from '@/lib/bots/bing/utils'
7
-
8
- export default async function handler(req: NextApiRequest, res: NextApiResponse) {
9
- const { prompt, id } = req.query
10
- if (!prompt) {
11
- return res.json({
12
- result: {
13
- value: 'Image',
14
- message: 'No Prompt'
15
- }
16
- })
17
- }
18
- try {
19
- const headers = createHeaders(req.cookies, {
20
- IMAGE_BING_COOKIE: process.env.IMAGE_BING_COOKIE
21
- }, 'image')
22
-
23
- debug('headers', headers)
24
- const response = await createImage(String(prompt), String(id), {
25
- ...headers,
26
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
27
- })
28
- res.writeHead(200, {
29
- 'Content-Type': 'text/plain; charset=UTF-8',
30
- })
31
- return res.end(response)
32
- } catch (e) {
33
- return res.json({
34
- result: {
35
- value: 'Error',
36
- message: `${e}`
37
- }
38
- })
39
- }
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: AI.Dashboard.HEDIS.Terminology.Vocabulary.Codes
3
- emoji: 😻
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: static
7
- pinned: false
8
- license: mit
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/tts/PortaSpeech.py DELETED
@@ -1,85 +0,0 @@
1
- import torch
2
- from inference.tts.base_tts_infer import BaseTTSInfer
3
- from utils.ckpt_utils import load_ckpt
4
- from modules.portaspeech.portaspeech import PortaSpeech
5
-
6
- class TTSInference(BaseTTSInfer):
7
- def __init__(self, hparams, device=None):
8
- super().__init__(hparams, device)
9
- print("Initializing TTS model to %s" % device)
10
- self.spk_map = self.preprocessor.load_spk_map(self.data_dir)
11
- print("TTS loaded!")
12
-
13
- def build_model(self):
14
- model = PortaSpeech(self.ph_encoder, self.word_encoder)
15
- load_ckpt(model, self.hparams['work_dir'], 'model')
16
- with torch.no_grad():
17
- model.store_inverse_all()
18
- return model
19
-
20
- def forward_model(self, inp):
21
- sample = self.input_to_batch(inp)
22
- with torch.no_grad():
23
- output = self.model(
24
- sample['txt_tokens'],
25
- sample['word_tokens'],
26
- ph2word=sample['ph2word'],
27
- word_len=sample['word_lengths'].max(),
28
- infer=True,
29
- forward_post_glow=True,
30
- spk_id=sample.get('spk_ids')
31
- )
32
- mel_out = output['mel_out']
33
- wav_out = self.run_vocoder(mel_out)
34
- wav_out = wav_out.cpu().numpy()
35
- return wav_out[0]
36
-
37
- def preprocess_input(self, inp):
38
- """
39
-
40
- :param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)}
41
- :return:
42
- """
43
- preprocessor, preprocess_args = self.preprocessor, self.preprocess_args
44
- text_raw = inp['text']
45
- item_name = inp.get('item_name', '<ITEM_NAME>')
46
- spk_name = inp.get('spk_name', '<SINGLE_SPK>')
47
- ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph(
48
- preprocessor.txt_processor, text_raw, preprocess_args)
49
- word_token = self.word_encoder.encode(word)
50
- ph_token = self.ph_encoder.encode(ph)
51
- spk_id = self.spk_map[spk_name]
52
- item = {'item_name': item_name, 'text': txt, 'ph': ph, 'spk_id': spk_id,
53
- 'ph_token': ph_token, 'word_token': word_token, 'ph2word': ph2word,
54
- 'ph_words':ph_gb_word, 'words': word}
55
- item['ph_len'] = len(item['ph_token'])
56
- return item
57
-
58
- def input_to_batch(self, item):
59
- item_names = [item['item_name']]
60
- text = [item['text']]
61
- ph = [item['ph']]
62
- txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device)
63
- txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
64
- word_tokens = torch.LongTensor(item['word_token'])[None, :].to(self.device)
65
- word_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
66
- ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device)
67
- spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device)
68
- batch = {
69
- 'item_name': item_names,
70
- 'text': text,
71
- 'ph': ph,
72
- 'txt_tokens': txt_tokens,
73
- 'txt_lengths': txt_lengths,
74
- 'word_tokens': word_tokens,
75
- 'word_lengths': word_lengths,
76
- 'ph2word': ph2word,
77
- 'spk_ids': spk_ids,
78
- }
79
- return batch
80
-
81
- def postprocess_output(self, output):
82
- return output
83
-
84
-
85
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISuperheroes/03GR-Chatbot-Memory/app.py DELETED
@@ -1,137 +0,0 @@
1
- from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
2
- import torch
3
- import gradio as gr
4
-
5
-
6
- # PersistDataset -----
7
- import os
8
- import csv
9
- import gradio as gr
10
- from gradio import inputs, outputs
11
- import huggingface_hub
12
- from huggingface_hub import Repository, hf_hub_download, upload_file
13
- from datetime import datetime
14
- DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/Carddata.csv"
15
- DATASET_REPO_ID = "awacke1/Carddata.csv"
16
- DATA_FILENAME = "Carddata.csv"
17
- DATA_FILE = os.path.join("data", DATA_FILENAME)
18
- HF_TOKEN = os.environ.get("HF_TOKEN")
19
-
20
- SCRIPT = """
21
- <script>
22
- if (!window.hasBeenRun) {
23
- window.hasBeenRun = true;
24
- console.log("should only happen once");
25
- document.querySelector("button.submit").click();
26
- }
27
- </script>
28
- """
29
-
30
- try:
31
- hf_hub_download(
32
- repo_id=DATASET_REPO_ID,
33
- filename=DATA_FILENAME,
34
- cache_dir=DATA_DIRNAME,
35
- force_filename=DATA_FILENAME
36
- )
37
- except:
38
- print("file not found")
39
- repo = Repository(
40
- local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
41
- )
42
-
43
- def generate_html() -> str:
44
- with open(DATA_FILE) as csvfile:
45
- reader = csv.DictReader(csvfile)
46
- rows = []
47
- for row in reader:
48
- rows.append(row)
49
- rows.reverse()
50
- if len(rows) == 0:
51
- return "no messages yet"
52
- else:
53
- html = "<div class='chatbot'>"
54
- for row in rows:
55
- html += "<div>"
56
- html += f"<span>{row['inputs']}</span>"
57
- html += f"<span class='outputs'>{row['outputs']}</span>"
58
- html += "</div>"
59
- html += "</div>"
60
- return html
61
-
62
- def store_message(name: str, message: str):
63
- if name and message:
64
- with open(DATA_FILE, "a") as csvfile:
65
- writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
66
- writer.writerow(
67
- {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())}
68
- )
69
- commit_url = repo.push_to_hub()
70
- return ""
71
-
72
- iface = gr.Interface(
73
- store_message,
74
- [
75
- inputs.Textbox(placeholder="Your name"),
76
- inputs.Textbox(placeholder="Your message", lines=2),
77
- ],
78
- "html",
79
- css="""
80
- .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; }
81
- """,
82
- title="Reading/writing to a HuggingFace dataset repo from Spaces",
83
- description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.",
84
- article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})",
85
- )
86
-
87
-
88
- mname = "facebook/blenderbot-400M-distill"
89
- model = BlenderbotForConditionalGeneration.from_pretrained(mname)
90
- tokenizer = BlenderbotTokenizer.from_pretrained(mname)
91
-
92
- def take_last_tokens(inputs, note_history, history):
93
- """Filter the last 128 tokens"""
94
- if inputs['input_ids'].shape[1] > 128:
95
- inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()])
96
- inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()])
97
- note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])]
98
- history = history[1:]
99
- return inputs, note_history, history
100
-
101
- def add_note_to_history(note, note_history):
102
- """Add a note to the historical information"""
103
- note_history.append(note)
104
- note_history = '</s> <s>'.join(note_history)
105
- return [note_history]
106
-
107
- title = "Chatbot State of the Art now with Memory Saved to Dataset"
108
- description = """Chatbot With Memory"""
109
-
110
- def chat(message, history):
111
- history = history or []
112
- if history:
113
- history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])]
114
- else:
115
- history_useful = []
116
- history_useful = add_note_to_history(message, history_useful)
117
- inputs = tokenizer(history_useful, return_tensors="pt")
118
- inputs, history_useful, history = take_last_tokens(inputs, history_useful, history)
119
- reply_ids = model.generate(**inputs)
120
- response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]
121
- history_useful = add_note_to_history(response, history_useful)
122
- list_history = history_useful[0].split('</s> <s>')
123
- history.append((list_history[-2], list_history[-1]))
124
- store_message(message, response) # Save to dataset
125
- return history, history
126
-
127
- gr.Interface(
128
- fn=chat,
129
- theme="huggingface",
130
- css=".footer {display:none !important}",
131
- inputs=["text", "state"],
132
- outputs=["chatbot", "state"],
133
- title=title,
134
- allow_flagging="never",
135
- description=f"Gradio chatbot backed by memory in a dataset repository.",
136
- article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})"
137
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py DELETED
@@ -1,56 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/resnet50.py',
3
- '../_base_/datasets/imagenet_bs256_rsb_a12.py',
4
- '../_base_/schedules/imagenet_bs2048_rsb.py',
5
- '../_base_/default_runtime.py'
6
- ]
7
-
8
- # model settings
9
- model = dict(
10
- backbone=dict(
11
- norm_cfg=dict(type='SyncBN', requires_grad=True),
12
- drop_path_rate=0.05,
13
- ),
14
- head=dict(
15
- loss=dict(
16
- type='LabelSmoothLoss',
17
- label_smooth_val=0.1,
18
- mode='original',
19
- use_sigmoid=True,
20
- )),
21
- train_cfg=dict(augments=[
22
- dict(type='Mixup', alpha=0.2),
23
- dict(type='CutMix', alpha=1.0)
24
- ]),
25
- )
26
-
27
- # dataset settings
28
- train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True))
29
-
30
- # schedule settings
31
- optim_wrapper = dict(
32
- optimizer=dict(weight_decay=0.01),
33
- paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.),
34
- )
35
-
36
- param_scheduler = [
37
- # warm up learning rate scheduler
38
- dict(
39
- type='LinearLR',
40
- start_factor=0.0001,
41
- by_epoch=True,
42
- begin=0,
43
- end=5,
44
- # update by iter
45
- convert_to_iter_based=True),
46
- # main learning rate scheduler
47
- dict(
48
- type='CosineAnnealingLR',
49
- T_max=595,
50
- eta_min=1.0e-6,
51
- by_epoch=True,
52
- begin=5,
53
- end=600)
54
- ]
55
-
56
- train_cfg = dict(by_epoch=True, max_epochs=600)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py DELETED
@@ -1,46 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/resnet50.py',
3
- '../_base_/datasets/imagenet_bs256_rsb_a12.py',
4
- '../_base_/schedules/imagenet_bs2048_rsb.py',
5
- '../_base_/default_runtime.py'
6
- ]
7
-
8
- # model settings
9
- model = dict(
10
- backbone=dict(
11
- norm_cfg=dict(type='SyncBN', requires_grad=True),
12
- drop_path_rate=0.05,
13
- ),
14
- head=dict(loss=dict(use_sigmoid=True)),
15
- train_cfg=dict(augments=[
16
- dict(type='Mixup', alpha=0.1),
17
- dict(type='CutMix', alpha=1.0)
18
- ]))
19
-
20
- # dataset settings
21
- train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True))
22
-
23
- # schedule settings
24
- optim_wrapper = dict(
25
- paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.))
26
-
27
- param_scheduler = [
28
- # warm up learning rate scheduler
29
- dict(
30
- type='LinearLR',
31
- start_factor=0.0001,
32
- by_epoch=True,
33
- begin=0,
34
- end=5,
35
- # update by iter
36
- convert_to_iter_based=True),
37
- # main learning rate scheduler
38
- dict(
39
- type='CosineAnnealingLR',
40
- T_max=295,
41
- eta_min=1.0e-6,
42
- by_epoch=True,
43
- begin=5,
44
- end=300)
45
- ]
46
- train_cfg = dict(by_epoch=True, max_epochs=300)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/radio/Radio.js DELETED
@@ -1,82 +0,0 @@
1
- import Base from '../base/Base.js';
2
- import { Circle, Lines } from '../utils/Geoms.js';
3
- import Yoyo from '../utils/Yoyo.js';
4
-
5
- const Linear = Phaser.Math.Linear;
6
- const ExpoIn = Phaser.Math.Easing.Expo.In;
7
-
8
- class Radio extends Base {
9
- constructor(scene, config) {
10
- super(scene, config);
11
- this.type = 'rexSpinnerRadio';
12
- }
13
-
14
- buildShapes() {
15
- this.addShape((new Circle()).setName('center'));
16
- this.addShape((new Lines()).setName('arc0'));
17
- this.addShape((new Lines()).setName('arc1'));
18
- }
19
-
20
- updateShapes() {
21
- var centerX = this.centerX;
22
- var centerY = this.centerY;
23
- var radius = this.radius;
24
- var isSizeChanged = this.isSizeChanged;
25
-
26
- var centerRadius = (radius * 2) / 6;
27
- var x = centerX - radius + centerRadius;
28
- var y = centerY + radius - centerRadius;
29
-
30
- var shapes = this.getShapes();
31
- for (var i = 0, cnt = shapes.length; i < cnt; i++) {
32
- var shape = shapes[i];
33
-
34
- var t = (this.value + ((cnt - i) * 0.1)) % 1;
35
- t = ExpoIn(Yoyo(t));
36
-
37
- switch (shape.name) {
38
- case 'center':
39
- shape.fillStyle(this.color, Linear(0.25, 1, t))
40
-
41
- if (isSizeChanged) {
42
- shape
43
- .setRadius(centerRadius)
44
- .setCenterPosition(x, y);
45
- }
46
- break;
47
- case 'arc0':
48
- shape.fillStyle(this.color, Linear(0.25, 1, t));
49
-
50
- if (isSizeChanged) {
51
- var radius0 = centerRadius * 2,
52
- radius1 = centerRadius * 3;
53
- shape
54
- .startAt(x, y - radius0)
55
- .lineTo(x, y - radius1)
56
- .setIterations(8).arc(x, y, radius1, 270, 360)
57
- .lineTo(x + radius0, y)
58
- .setIterations(6).arc(x, y, radius0, 360, 270, true)
59
- .close();
60
- }
61
- break;
62
- case 'arc1':
63
- shape.fillStyle(this.color, Linear(0.25, 1, t));
64
-
65
- if (isSizeChanged) {
66
- var radius0 = centerRadius * 4,
67
- radius1 = centerRadius * 5;
68
- shape
69
- .startAt(x, y - radius0)
70
- .lineTo(x, y - radius1)
71
- .setIterations(8).arc(x, y, radius1, 270, 360)
72
- .lineTo(x + radius0, y)
73
- .setIterations(6).arc(x, y, radius0, 360, 270, true)
74
- .close();
75
- }
76
- break;
77
- }
78
- }
79
- }
80
- }
81
-
82
- export default Radio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akseluhr/whisper-sv-SE-auhr/app.py DELETED
@@ -1,47 +0,0 @@
1
- from transformers import pipeline
2
- import gradio as gr
3
- from pytube import YouTube
4
- import os
5
-
6
- # Get model from my model repo
7
- pipe = pipeline(model="Akseluhr/whisper-small-sv-SE-auhr-v2")
8
-
9
- def get_audio(url):
10
- yt = YouTube(url) # Downloads yt video
11
- video = yt.streams.filter(only_audio=True).first() # Gets the audio of the video
12
- print(video)
13
- out_file=video.download(output_path=".") # Write the stream to disk
14
- base, ext = os.path.splitext(out_file) # Split the path
15
- new_file = base+'.mp3'
16
- os.rename(out_file, new_file) # Convert to .mp3
17
- audio_file = new_file
18
- return audio_file
19
-
20
- def transcribe(rec=None, file=None, url=""):
21
- if rec != None:
22
- audio = rec
23
- elif file != None:
24
- audio = file
25
- elif url != "":
26
- audio = get_audio(url)
27
- else:
28
- return "Provide a recording or a file."
29
-
30
- text = pipe(audio)["text"]
31
- return text
32
-
33
-
34
- iface = gr.Interface(
35
- fn=transcribe,
36
- inputs=[
37
- gr.Audio(source="microphone", type="filepath", optional=True),
38
- gr.Audio(source="upload", type="filepath", optional=True),
39
- gr.Textbox(placeholder='Enter the Youtube video URL', label='URL', optional=True),
40
- ],
41
- outputs="text",
42
- title="Whisper Small Swedish",
43
- description="Realtime demo for Swedish speech recognition using a fine-tuned Whisper model.",
44
- )
45
-
46
-
47
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alican/pixera/data/single_dataset.py DELETED
@@ -1,40 +0,0 @@
1
- from data.base_dataset import BaseDataset, get_transform
2
- from data.image_folder import make_dataset
3
- from PIL import Image
4
-
5
-
6
- class SingleDataset(BaseDataset):
7
- """This dataset class can load a set of images specified by the path --dataroot /path/to/data.
8
-
9
- It can be used for generating CycleGAN results only for one side with the model option '-model test'.
10
- """
11
-
12
- def __init__(self, opt):
13
- """Initialize this dataset class.
14
-
15
- Parameters:
16
- opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
17
- """
18
- BaseDataset.__init__(self, opt)
19
- self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
20
- input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
21
- self.transform = get_transform(opt, grayscale=(input_nc == 1))
22
-
23
- def __getitem__(self, index):
24
- """Return a data point and its metadata information.
25
-
26
- Parameters:
27
- index - - a random integer for data indexing
28
-
29
- Returns a dictionary that contains A and A_paths
30
- A(tensor) - - an image in one domain
31
- A_paths(str) - - the path of the image
32
- """
33
- A_path = self.A_paths[index]
34
- A_img = Image.open(A_path).convert('RGB')
35
- A = self.transform(A_img)
36
- return {'A': A, 'A_paths': A_path}
37
-
38
- def __len__(self):
39
- """Return the total number of images in the dataset."""
40
- return len(self.A_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AmirTrader/LinearRegression/app.py DELETED
@@ -1,221 +0,0 @@
1
-
2
- import pandas as pd
3
- import panel as pn
4
- from datetime import datetime
5
- from datetime import date
6
- pn.extension('bokeh', template='bootstrap')
7
- import hvplot.pandas
8
-
9
- import pandas as pd
10
- import yfinance as yf
11
- import panel as pn
12
-
13
- @pn.cache
14
- def get_df(ticker, startdate , enddate , interval="1d",window=50,window2=150):
15
- # interval="1d"
16
- # get_df(ticker ="PG", startdate="2000-01-01" , enddate="2023-09-01" , interval="1d")
17
- DF = yf.Ticker(ticker).history(start=startdate,end=enddate,interval=interval)
18
- DF['SMA'] = DF.Close.rolling(window=window).mean()
19
- DF['SMA2'] = DF.Close.rolling(window=window2).mean()
20
- DF = DF.reset_index()
21
- return DF
22
-
23
- def get_hvplot(ticker , startdate , enddate , interval,window,window2):
24
- DF = get_df(ticker , startdate=startdate , enddate=enddate , interval=interval,window=window,window2=window2)
25
-
26
- import hvplot.pandas # Ensure hvplot is installed (pip install hvplot)
27
- from sklearn.linear_model import LinearRegression
28
- import holoviews as hv
29
- hv.extension('bokeh')
30
- # Assuming your dataframe is named 'df' with columns 'Date' and 'Close'
31
- # If not, replace 'Date' and 'Close' with your actual column names.
32
-
33
- # Step 1: Create a scatter plot using hvplot
34
- scatter_plot = DF.hvplot(x='Date', y='Close', kind='scatter',title=f'{ticker} Close vs. Date')
35
-
36
- # Step 2: Fit a linear regression model
37
- DF['Date2'] = pd.to_numeric(DF['Date'])
38
- X = DF[['Date2']]
39
- y = DF[['Close']] #.values
40
- model = LinearRegression().fit(X, y)
41
-
42
- # # Step 3: Predict using the linear regression model
43
- DF['Predicted_Close'] = model.predict(X)
44
-
45
- # # Step 4: Create a line plot for linear regression
46
- line_plot = DF.hvplot(x='Date', y='Predicted_Close', kind='line',line_dash='dashed', color='red')
47
- line_plot_SMA = DF.hvplot(x='Date', y='SMA', kind='line',line_dash='dashed', color='orange')
48
- line_plot_SMA2 = DF.hvplot(x='Date', y='SMA2', kind='line',line_dash='dashed', color='orange')
49
-
50
- # # Step 5: Overlay scatter plot and linear regression line
51
- # return (scatter_plot * line_plot).opts(width=800, height=600, show_grid=True, gridstyle={ 'grid_line_color': 'gray'})
52
- # grid_style = {'grid_line_color': 'black'}#, 'grid_line_width': 1.5, 'ygrid_bounds': (0.3, 0.7),'minor_xgrid_line_color': 'lightgray', 'xgrid_line_dash': [4, 4]}
53
- return (scatter_plot * line_plot *line_plot_SMA *line_plot_SMA2).opts(width=800, height=600, show_grid=True)
54
- def get_income_statement_df(ticker):
55
- yfobj = yf.Ticker(ticker)
56
- df= yfobj.financials.T
57
- df.index = pd.to_datetime(df.index, format='%Y-%m-%d')
58
- return df
59
-
60
- def get_income_hvplot(ticker):
61
- DF = get_income_statement_df(ticker)
62
- plt1 = DF.hvplot.line(y='Total Revenue') * DF.hvplot.scatter(y='Total Revenue').opts(color="red")
63
- plt1.opts(width=600, height=450, show_grid=True)
64
- plt2 = DF.hvplot.line(y='Gross Profit') * DF.hvplot.scatter(y='Gross Profit').opts(color="red")
65
- plt2.opts(width=600, height=450, show_grid=True)
66
- plt3 = DF.hvplot.line(y='Net Income') * DF.hvplot.scatter(y='Net Income').opts(color="red")
67
- plt3.opts(width=600, height=450, show_grid=True)
68
- return pn.Column(plt1 , plt2 , plt3 )
69
- # return ( DF.hvplot.line(y='Net Income') * DF.hvplot.scatter(y='Net Income').opts(color="red") )+ (DF.hvplot.line(y='Gross Profit') * DF.hvplot.scatter(y='Gross Profit').opts(color="red") )+
70
- # (DF.hvplot.line(y='Total Revenue') * DF.hvplot.scatter(y='Total Revenue').opts(color="red") )
71
-
72
- def lookup_discountedrate(betavalue):
73
- betavsdiscountedrate = {1: 5, 1: 6, 1.1: 6.5, 1.2: 7, 1.3: 7.5, 1.4: 8, 1.5: 8.5, 1.6: 9}
74
- if betavalue < 1:
75
- return betavsdiscountedrate[1] # Return the value for key 1 if key is below 1
76
- elif betavalue > 1.6:
77
- return betavsdiscountedrate[1.6] # Return the value for key 1.6 if key is above 1.6
78
- else:
79
- # Find the closest key to the given key
80
- closest_key = min(betavsdiscountedrate.keys(), key=lambda x: abs(x - betavalue))
81
-
82
- # Get the corresponding value
83
- value = betavsdiscountedrate[closest_key]
84
-
85
- return value
86
-
87
-
88
- def calc_fairprice_CDF(ticker):
89
- import yfinance as yf
90
- yfobj = yf.Ticker(ticker)
91
-
92
- #calculate eps growing next 5 years
93
- EPSnext5Y = yfobj.get_info()['trailingPE'] / yfobj.get_info()['trailingPegRatio']
94
-
95
- years = 10
96
- #
97
- cashflowinitial = yfobj.get_info()['operatingCashflow']
98
-
99
- cashflowlst=[]
100
- cashflow = cashflowinitial
101
- for i in range(1,years+1):
102
- cashflow = cashflow*(1+EPSnext5Y/100)
103
- cashflowlst.append(cashflow)
104
-
105
- try:
106
- discountedrate = lookup_discountedrate(yfobj.get_info()['beta'])
107
- except:
108
- discountedrate = 5
109
-
110
- discountedfactorlst =[]
111
- discountedvaluelst=[]
112
- discountedfactor =1
113
-
114
- for i in range(1,years+1):
115
- discountedfactor =( 1 / (1+ discountedrate/100)**i)
116
- discountedfactorlst.append(discountedfactor)
117
- discountedvalue = discountedfactor * cashflowlst[i-1]
118
- discountedvaluelst.append(discountedvalue)
119
-
120
- PV10yearsCashFlow =0
121
- for i in range(0,years):
122
- PV10yearsCashFlow += discountedvaluelst[i]
123
-
124
- #intrinsic value before cash/debt
125
- intrinsicvaluebeforecashdebt = PV10yearsCashFlow / yfobj.get_info()['sharesOutstanding']
126
-
127
- debtpershare = yfobj.get_info()['totalDebt'] / yfobj.get_info()['sharesOutstanding']
128
- cashpershare = yfobj.get_info()['totalCash'] / yfobj.get_info()['sharesOutstanding']
129
- intrinsicvalue = intrinsicvaluebeforecashdebt + cashpershare - debtpershare
130
-
131
- previousClose = yfobj.get_info()['previousClose']
132
- deviation = 100*(intrinsicvalue - previousClose) / previousClose
133
- # return intrinsicvalue , previousClose , deviation
134
- return pn.Row(pn.widgets.StaticText(name='fairprice_CDF', value=str(round(intrinsicvalue,1))) ,pn.widgets.StaticText(name='deviation', value=str(round(deviation,2))) )
135
-
136
-
137
- def calc_fairprice_DnetP(ticker):
138
- import yfinance as yf
139
- yfobj = yf.Ticker(ticker)
140
-
141
- #calculate eps growing next 5 years
142
- EPSnext5Y = yfobj.get_info()['trailingPE'] / yfobj.get_info()['trailingPegRatio']
143
-
144
- years = 5
145
- #
146
- cashflowinitial = yfobj.get_info()['netIncomeToCommon']
147
-
148
- cashflowlst=[]
149
- cashflow = cashflowinitial
150
- for i in range(1,years+1):
151
- cashflow = cashflow*(1+EPSnext5Y/100)
152
- cashflowlst.append(cashflow)
153
-
154
- try:
155
- discountedrate = lookup_discountedrate(yfobj.get_info()['beta'])
156
- except:
157
- discountedrate = 5
158
-
159
- discountedfactorlst =[]
160
- discountedvaluelst=[]
161
- discountedfactor =1
162
-
163
- for i in range(1,years+1):
164
- discountedfactor =( 1 / (1+ discountedrate/100)**i)
165
- discountedfactorlst.append(discountedfactor)
166
- discountedvalue = discountedfactor * cashflowlst[i-1]
167
- discountedvaluelst.append(discountedvalue)
168
-
169
- PV10yearsCashFlow =0
170
- for i in range(0,years):
171
- PV10yearsCashFlow += discountedvaluelst[i]
172
-
173
- #intrinsic value before cash/debt
174
- intrinsicvaluebeforecashdebt = PV10yearsCashFlow / yfobj.get_info()['sharesOutstanding']
175
-
176
- debtpershare = yfobj.get_info()['totalDebt'] / yfobj.get_info()['sharesOutstanding']
177
- cashpershare = yfobj.get_info()['totalCash'] / yfobj.get_info()['sharesOutstanding']
178
- intrinsicvalue = intrinsicvaluebeforecashdebt + cashpershare - debtpershare
179
-
180
- previousClose = yfobj.get_info()['previousClose']
181
- intrinsicvalue= intrinsicvalue + previousClose
182
-
183
- deviation = 100*(intrinsicvalue - previousClose) / previousClose
184
- # return intrinsicvalue , previousClose , deviation
185
- return pn.Row(pn.widgets.StaticText(name='fairprice_DnetP', value=str(round(intrinsicvalue,1))) , pn.widgets.StaticText(name='deviation', value=str(round(deviation,2))) )
186
-
187
- # tickers = ['AAPL', 'META', 'GOOG', 'IBM', 'MSFT','NKE','DLTR','DG']
188
- # ticker = pn.widgets.Select(name='Ticker', options=tickers)
189
-
190
- tickers = pd.read_csv('tickers.csv').Ticker.to_list()
191
- ticker = pn.widgets.AutocompleteInput(name='Ticker', options=tickers , placeholder='Write Ticker here همین جا')
192
- ticker.value = "AAPL"
193
- window = pn.widgets.IntSlider(name='Window Size', value=50, start=5, end=1000, step=5)
194
- window2 = pn.widgets.IntSlider(name='Window Size2', value=150, start=5, end=1000, step=5)
195
-
196
- # Create a DatePicker widget with a minimum date of 2000-01-01
197
- date_start = pn.widgets.DatePicker(
198
- name ="Start Date",
199
- description='Select a Date',
200
- start= date(2000, 1, 1)
201
- )
202
-
203
- date_end = pn.widgets.DatePicker(
204
- name ="End Date",# value=datetime(2000, 1, 1),
205
- description='Select a Date',
206
- end= date.today() #date(2023, 9, 1)
207
- )
208
-
209
- date_start.value = date(2010,1,1)
210
- date_end.value = date.today()
211
-
212
- pn.Row(
213
- pn.Column( ticker, window , window2, date_start , date_end),
214
- # pn.bind(calc_fairprice_CDF,ticker),
215
- # pn.bind(calc_fairprice_DnetP,ticker)),
216
- # pn.panel(pn.bind(get_hvplot, ticker, "2010-01-01","2023-09-01","1d")) #, sizing_mode='stretch_width')
217
- pn.panel(pn.bind(get_hvplot, ticker, date_start , date_end,"1d",window,window2)), #, sizing_mode='stretch_width')
218
- pn.panel(pn.bind(get_income_hvplot, ticker)) #, sizing_mode='stretch_width')
219
- ).servable(title="Under Valued Screener- Linear Regression")
220
-
221
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/python/dqn/dqn.py DELETED
@@ -1,245 +0,0 @@
1
- from typing import Any, Dict, List, Optional, Tuple, Type, Union
2
-
3
- import gym
4
- import numpy as np
5
- import torch as th
6
- from torch.nn import functional as F
7
-
8
- from stable_baselines3.common import logger
9
- from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
10
- from stable_baselines3.common.preprocessing import maybe_transpose
11
- from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
12
- from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
13
- from stable_baselines3.dqn.policies import DQNPolicy
14
-
15
-
16
- class DQN(OffPolicyAlgorithm):
17
- """
18
- Deep Q-Network (DQN)
19
-
20
- Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
21
- Default hyperparameters are taken from the nature paper,
22
- except for the optimizer and learning rate that were taken from Stable Baselines defaults.
23
-
24
- :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
25
- :param env: The environment to learn from (if registered in Gym, can be str)
26
- :param learning_rate: The learning rate, it can be a function
27
- of the current progress remaining (from 1 to 0)
28
- :param buffer_size: size of the replay buffer
29
- :param learning_starts: how many steps of the model to collect transitions for before learning starts
30
- :param batch_size: Minibatch size for each gradient update
31
- :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
32
- :param gamma: the discount factor
33
- :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
34
- like ``(5, "step")`` or ``(2, "episode")``.
35
- :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
36
- Set to ``-1`` means to do as many gradient steps as steps done in the environment
37
- during the rollout.
38
- :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
39
- at a cost of more complexity.
40
- See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
41
- :param target_update_interval: update the target network every ``target_update_interval``
42
- environment steps.
43
- :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
44
- :param exploration_initial_eps: initial value of random action probability
45
- :param exploration_final_eps: final value of random action probability
46
- :param max_grad_norm: The maximum value for the gradient clipping
47
- :param tensorboard_log: the log location for tensorboard (if None, no logging)
48
- :param create_eval_env: Whether to create a second environment that will be
49
- used for evaluating the agent periodically. (Only available when passing string for the environment)
50
- :param policy_kwargs: additional arguments to be passed to the policy on creation
51
- :param verbose: the verbosity level: 0 no output, 1 info, 2 debug
52
- :param seed: Seed for the pseudo random generators
53
- :param device: Device (cpu, cuda, ...) on which the code should be run.
54
- Setting it to auto, the code will be run on the GPU if possible.
55
- :param _init_setup_model: Whether or not to build the network at the creation of the instance
56
- """
57
-
58
- def __init__(
59
- self,
60
- policy: Union[str, Type[DQNPolicy]],
61
- env: Union[GymEnv, str],
62
- learning_rate: Union[float, Schedule] = 1e-4,
63
- buffer_size: int = 1000000,
64
- learning_starts: int = 50000,
65
- batch_size: Optional[int] = 32,
66
- tau: float = 1.0,
67
- gamma: float = 0.99,
68
- train_freq: Union[int, Tuple[int, str]] = 4,
69
- gradient_steps: int = 1,
70
- optimize_memory_usage: bool = False,
71
- target_update_interval: int = 10000,
72
- exploration_fraction: float = 0.1,
73
- exploration_initial_eps: float = 1.0,
74
- exploration_final_eps: float = 0.05,
75
- max_grad_norm: float = 10,
76
- tensorboard_log: Optional[str] = None,
77
- create_eval_env: bool = False,
78
- policy_kwargs: Optional[Dict[str, Any]] = None,
79
- verbose: int = 0,
80
- seed: Optional[int] = None,
81
- device: Union[th.device, str] = "auto",
82
- _init_setup_model: bool = True,
83
- ):
84
-
85
- super(DQN, self).__init__(
86
- policy,
87
- env,
88
- DQNPolicy,
89
- learning_rate,
90
- buffer_size,
91
- learning_starts,
92
- batch_size,
93
- tau,
94
- gamma,
95
- train_freq,
96
- gradient_steps,
97
- action_noise=None, # No action noise
98
- policy_kwargs=policy_kwargs,
99
- tensorboard_log=tensorboard_log,
100
- verbose=verbose,
101
- device=device,
102
- create_eval_env=create_eval_env,
103
- seed=seed,
104
- sde_support=False,
105
- optimize_memory_usage=optimize_memory_usage,
106
- supported_action_spaces=(gym.spaces.Discrete,),
107
- )
108
-
109
- self.exploration_initial_eps = exploration_initial_eps
110
- self.exploration_final_eps = exploration_final_eps
111
- self.exploration_fraction = exploration_fraction
112
- self.target_update_interval = target_update_interval
113
- self.max_grad_norm = max_grad_norm
114
- # "epsilon" for the epsilon-greedy exploration
115
- self.exploration_rate = 0.0
116
- # Linear schedule will be defined in `_setup_model()`
117
- self.exploration_schedule = None
118
- self.q_net, self.q_net_target = None, None
119
-
120
- if _init_setup_model:
121
- self._setup_model()
122
-
123
- def _setup_model(self) -> None:
124
- super(DQN, self)._setup_model()
125
- self._create_aliases()
126
- self.exploration_schedule = get_linear_fn(
127
- self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
128
- )
129
-
130
- def _create_aliases(self) -> None:
131
- self.q_net = self.policy.q_net
132
- self.q_net_target = self.policy.q_net_target
133
-
134
- def _on_step(self) -> None:
135
- """
136
- Update the exploration rate and target network if needed.
137
- This method is called in ``collect_rollouts()`` after each step in the environment.
138
- """
139
- if self.num_timesteps % self.target_update_interval == 0:
140
- polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
141
-
142
- self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
143
- logger.record("rollout/exploration rate", self.exploration_rate)
144
-
145
- def train(self, gradient_steps: int, batch_size: int = 100) -> None:
146
- # Update learning rate according to schedule
147
- self._update_learning_rate(self.policy.optimizer)
148
-
149
- losses = []
150
- for _ in range(gradient_steps):
151
- # Sample replay buffer
152
- replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
153
-
154
- with th.no_grad():
155
- # Compute the next Q-values using the target network
156
- next_q_values = self.q_net_target(replay_data.next_observations)
157
- # Follow greedy policy: use the one with the highest value
158
- next_q_values, _ = next_q_values.max(dim=1)
159
- # Avoid potential broadcast issue
160
- next_q_values = next_q_values.reshape(-1, 1)
161
- # 1-step TD target
162
- target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
163
-
164
- # Get current Q-values estimates
165
- current_q_values = self.q_net(replay_data.observations)
166
-
167
- # Retrieve the q-values for the actions from the replay buffer
168
- current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
169
-
170
- # Compute Huber loss (less sensitive to outliers)
171
- loss = F.smooth_l1_loss(current_q_values, target_q_values)
172
- losses.append(loss.item())
173
-
174
- # Optimize the policy
175
- self.policy.optimizer.zero_grad()
176
- loss.backward()
177
- # Clip gradient norm
178
- th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
179
- self.policy.optimizer.step()
180
-
181
- # Increase update counter
182
- self._n_updates += gradient_steps
183
-
184
- logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
185
- logger.record("train/loss", np.mean(losses))
186
-
187
- def predict(
188
- self,
189
- observation: np.ndarray,
190
- state: Optional[np.ndarray] = None,
191
- mask: Optional[np.ndarray] = None,
192
- deterministic: bool = False,
193
- ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
194
- """
195
- Overrides the base_class predict function to include epsilon-greedy exploration.
196
-
197
- :param observation: the input observation
198
- :param state: The last states (can be None, used in recurrent policies)
199
- :param mask: The last masks (can be None, used in recurrent policies)
200
- :param deterministic: Whether or not to return deterministic actions.
201
- :return: the model's action and the next state
202
- (used in recurrent policies)
203
- """
204
- if not deterministic and np.random.rand() < self.exploration_rate:
205
- if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
206
- n_batch = observation.shape[0]
207
- action = np.array([self.action_space.sample() for _ in range(n_batch)])
208
- else:
209
- action = np.array(self.action_space.sample())
210
- else:
211
- action, state = self.policy.predict(observation, state, mask, deterministic)
212
- return action, state
213
-
214
- def learn(
215
- self,
216
- total_timesteps: int,
217
- callback: MaybeCallback = None,
218
- log_interval: int = 4,
219
- eval_env: Optional[GymEnv] = None,
220
- eval_freq: int = -1,
221
- n_eval_episodes: int = 5,
222
- tb_log_name: str = "DQN",
223
- eval_log_path: Optional[str] = None,
224
- reset_num_timesteps: bool = True,
225
- ) -> OffPolicyAlgorithm:
226
-
227
- return super(DQN, self).learn(
228
- total_timesteps=total_timesteps,
229
- callback=callback,
230
- log_interval=log_interval,
231
- eval_env=eval_env,
232
- eval_freq=eval_freq,
233
- n_eval_episodes=n_eval_episodes,
234
- tb_log_name=tb_log_name,
235
- eval_log_path=eval_log_path,
236
- reset_num_timesteps=reset_num_timesteps,
237
- )
238
-
239
- def _excluded_save_params(self) -> List[str]:
240
- return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
241
-
242
- def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
243
- state_dicts = ["policy", "policy.optimizer"]
244
-
245
- return state_dicts, []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/utils/opt.py DELETED
@@ -1,100 +0,0 @@
1
- """
2
- Modified from https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.4/tools/program.py
3
- """
4
- from typing import Optional
5
- from argparse import ArgumentParser, RawDescriptionHelpFormatter
6
- import yaml
7
- import json
8
- from src.utils.loading import load_yaml
9
-
10
-
11
- class Config(dict):
12
- """Single level attribute dict, NOT recursive"""
13
-
14
- def __init__(self, yaml_path):
15
- super(Config, self).__init__()
16
-
17
- config = load_yaml(yaml_path)
18
- super(Config, self).update(config)
19
-
20
- def __getattr__(self, key):
21
- if key in self:
22
- return self[key]
23
- raise AttributeError("object has no attribute '{}'".format(key))
24
-
25
- def save_yaml(self, path):
26
- print(f"Saving config to {path}...")
27
- with open(path, "w") as f:
28
- yaml.dump(dict(self), f, default_flow_style=False, sort_keys=False)
29
-
30
- @classmethod
31
- def load_yaml(cls, path):
32
- print(f"Loading config from {path}...")
33
- return cls(path)
34
-
35
- def __repr__(self) -> str:
36
- return str(json.dumps(dict(self), sort_keys=False, indent=4))
37
-
38
-
39
- class Opts(ArgumentParser):
40
- def __init__(self, cfg: Optional[str] = None):
41
- super(Opts, self).__init__(formatter_class=RawDescriptionHelpFormatter)
42
- self.add_argument(
43
- "-c", "--config", default=cfg, help="configuration file to use"
44
- )
45
- self.add_argument(
46
- "-o", "--opt", nargs="+", help="override configuration options"
47
- )
48
-
49
- def parse_args(self, argv=None):
50
- args = super(Opts, self).parse_args(argv)
51
- assert args.config is not None, "Please specify --config=configure_file_path."
52
- args.opt = self._parse_opt(args.opt)
53
-
54
- config = Config(args.config)
55
- config = self.override(config, args.opt)
56
- return config
57
-
58
- def _parse_opt(self, opts):
59
- config = {}
60
- if not opts:
61
- return config
62
- for s in opts:
63
- s = s.strip()
64
- k, v = s.split("=")
65
- config[k] = yaml.load(v, Loader=yaml.Loader)
66
- return config
67
-
68
- def override(self, global_config, overriden):
69
- """
70
- Merge config into global config.
71
- Args:
72
- config (dict): Config to be merged.
73
- Returns: global config
74
- """
75
- print("Overriding configurating")
76
- for key, value in overriden.items():
77
- if "." not in key:
78
- if isinstance(value, dict) and key in global_config:
79
- global_config[key].update(value)
80
- else:
81
- if key in global_config.keys():
82
- global_config[key] = value
83
- print(f"'{key}' not found in config")
84
- else:
85
- sub_keys = key.split(".")
86
- assert (
87
- sub_keys[0] in global_config
88
- ), "the sub_keys can only be one of global_config: {}, but get: {}, please check your running command".format(
89
- global_config.keys(), sub_keys[0]
90
- )
91
- cur = global_config[sub_keys[0]]
92
- for idx, sub_key in enumerate(sub_keys[1:]):
93
- if idx == len(sub_keys) - 2:
94
- if sub_key in cur.keys():
95
- cur[sub_key] = value
96
- else:
97
- print(f"'{key}' not found in config")
98
- else:
99
- cur = cur[sub_key]
100
- return global_config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/sabl_retina_head.py DELETED
@@ -1,621 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
5
- from mmcv.runner import force_fp32
6
-
7
- from mmdet.core import (build_anchor_generator, build_assigner,
8
- build_bbox_coder, build_sampler, images_to_levels,
9
- multi_apply, multiclass_nms, unmap)
10
- from ..builder import HEADS, build_loss
11
- from .base_dense_head import BaseDenseHead
12
- from .guided_anchor_head import GuidedAnchorHead
13
-
14
-
15
- @HEADS.register_module()
16
- class SABLRetinaHead(BaseDenseHead):
17
- """Side-Aware Boundary Localization (SABL) for RetinaNet.
18
-
19
- The anchor generation, assigning and sampling in SABLRetinaHead
20
- are the same as GuidedAnchorHead for guided anchoring.
21
-
22
- Please refer to https://arxiv.org/abs/1912.04260 for more details.
23
-
24
- Args:
25
- num_classes (int): Number of classes.
26
- in_channels (int): Number of channels in the input feature map.
27
- stacked_convs (int): Number of Convs for classification \
28
- and regression branches. Defaults to 4.
29
- feat_channels (int): Number of hidden channels. \
30
- Defaults to 256.
31
- approx_anchor_generator (dict): Config dict for approx generator.
32
- square_anchor_generator (dict): Config dict for square generator.
33
- conv_cfg (dict): Config dict for ConvModule. Defaults to None.
34
- norm_cfg (dict): Config dict for Norm Layer. Defaults to None.
35
- bbox_coder (dict): Config dict for bbox coder.
36
- reg_decoded_bbox (bool): If true, the regression loss would be
37
- applied directly on decoded bounding boxes, converting both
38
- the predicted boxes and regression targets to absolute
39
- coordinates format. Default False. It should be `True` when
40
- using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
41
- train_cfg (dict): Training config of SABLRetinaHead.
42
- test_cfg (dict): Testing config of SABLRetinaHead.
43
- loss_cls (dict): Config of classification loss.
44
- loss_bbox_cls (dict): Config of classification loss for bbox branch.
45
- loss_bbox_reg (dict): Config of regression loss for bbox branch.
46
- """
47
-
48
- def __init__(self,
49
- num_classes,
50
- in_channels,
51
- stacked_convs=4,
52
- feat_channels=256,
53
- approx_anchor_generator=dict(
54
- type='AnchorGenerator',
55
- octave_base_scale=4,
56
- scales_per_octave=3,
57
- ratios=[0.5, 1.0, 2.0],
58
- strides=[8, 16, 32, 64, 128]),
59
- square_anchor_generator=dict(
60
- type='AnchorGenerator',
61
- ratios=[1.0],
62
- scales=[4],
63
- strides=[8, 16, 32, 64, 128]),
64
- conv_cfg=None,
65
- norm_cfg=None,
66
- bbox_coder=dict(
67
- type='BucketingBBoxCoder',
68
- num_buckets=14,
69
- scale_factor=3.0),
70
- reg_decoded_bbox=False,
71
- train_cfg=None,
72
- test_cfg=None,
73
- loss_cls=dict(
74
- type='FocalLoss',
75
- use_sigmoid=True,
76
- gamma=2.0,
77
- alpha=0.25,
78
- loss_weight=1.0),
79
- loss_bbox_cls=dict(
80
- type='CrossEntropyLoss',
81
- use_sigmoid=True,
82
- loss_weight=1.5),
83
- loss_bbox_reg=dict(
84
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)):
85
- super(SABLRetinaHead, self).__init__()
86
- self.in_channels = in_channels
87
- self.num_classes = num_classes
88
- self.feat_channels = feat_channels
89
- self.num_buckets = bbox_coder['num_buckets']
90
- self.side_num = int(np.ceil(self.num_buckets / 2))
91
-
92
- assert (approx_anchor_generator['octave_base_scale'] ==
93
- square_anchor_generator['scales'][0])
94
- assert (approx_anchor_generator['strides'] ==
95
- square_anchor_generator['strides'])
96
-
97
- self.approx_anchor_generator = build_anchor_generator(
98
- approx_anchor_generator)
99
- self.square_anchor_generator = build_anchor_generator(
100
- square_anchor_generator)
101
- self.approxs_per_octave = (
102
- self.approx_anchor_generator.num_base_anchors[0])
103
-
104
- # one anchor per location
105
- self.num_anchors = 1
106
- self.stacked_convs = stacked_convs
107
- self.conv_cfg = conv_cfg
108
- self.norm_cfg = norm_cfg
109
-
110
- self.reg_decoded_bbox = reg_decoded_bbox
111
-
112
- self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
113
- self.sampling = loss_cls['type'] not in [
114
- 'FocalLoss', 'GHMC', 'QualityFocalLoss'
115
- ]
116
- if self.use_sigmoid_cls:
117
- self.cls_out_channels = num_classes
118
- else:
119
- self.cls_out_channels = num_classes + 1
120
-
121
- self.bbox_coder = build_bbox_coder(bbox_coder)
122
- self.loss_cls = build_loss(loss_cls)
123
- self.loss_bbox_cls = build_loss(loss_bbox_cls)
124
- self.loss_bbox_reg = build_loss(loss_bbox_reg)
125
-
126
- self.train_cfg = train_cfg
127
- self.test_cfg = test_cfg
128
-
129
- if self.train_cfg:
130
- self.assigner = build_assigner(self.train_cfg.assigner)
131
- # use PseudoSampler when sampling is False
132
- if self.sampling and hasattr(self.train_cfg, 'sampler'):
133
- sampler_cfg = self.train_cfg.sampler
134
- else:
135
- sampler_cfg = dict(type='PseudoSampler')
136
- self.sampler = build_sampler(sampler_cfg, context=self)
137
-
138
- self.fp16_enabled = False
139
- self._init_layers()
140
-
141
- def _init_layers(self):
142
- self.relu = nn.ReLU(inplace=True)
143
- self.cls_convs = nn.ModuleList()
144
- self.reg_convs = nn.ModuleList()
145
- for i in range(self.stacked_convs):
146
- chn = self.in_channels if i == 0 else self.feat_channels
147
- self.cls_convs.append(
148
- ConvModule(
149
- chn,
150
- self.feat_channels,
151
- 3,
152
- stride=1,
153
- padding=1,
154
- conv_cfg=self.conv_cfg,
155
- norm_cfg=self.norm_cfg))
156
- self.reg_convs.append(
157
- ConvModule(
158
- chn,
159
- self.feat_channels,
160
- 3,
161
- stride=1,
162
- padding=1,
163
- conv_cfg=self.conv_cfg,
164
- norm_cfg=self.norm_cfg))
165
- self.retina_cls = nn.Conv2d(
166
- self.feat_channels, self.cls_out_channels, 3, padding=1)
167
- self.retina_bbox_reg = nn.Conv2d(
168
- self.feat_channels, self.side_num * 4, 3, padding=1)
169
- self.retina_bbox_cls = nn.Conv2d(
170
- self.feat_channels, self.side_num * 4, 3, padding=1)
171
-
172
- def init_weights(self):
173
- for m in self.cls_convs:
174
- normal_init(m.conv, std=0.01)
175
- for m in self.reg_convs:
176
- normal_init(m.conv, std=0.01)
177
- bias_cls = bias_init_with_prob(0.01)
178
- normal_init(self.retina_cls, std=0.01, bias=bias_cls)
179
- normal_init(self.retina_bbox_reg, std=0.01)
180
- normal_init(self.retina_bbox_cls, std=0.01)
181
-
182
- def forward_single(self, x):
183
- cls_feat = x
184
- reg_feat = x
185
- for cls_conv in self.cls_convs:
186
- cls_feat = cls_conv(cls_feat)
187
- for reg_conv in self.reg_convs:
188
- reg_feat = reg_conv(reg_feat)
189
- cls_score = self.retina_cls(cls_feat)
190
- bbox_cls_pred = self.retina_bbox_cls(reg_feat)
191
- bbox_reg_pred = self.retina_bbox_reg(reg_feat)
192
- bbox_pred = (bbox_cls_pred, bbox_reg_pred)
193
- return cls_score, bbox_pred
194
-
195
- def forward(self, feats):
196
- return multi_apply(self.forward_single, feats)
197
-
198
- def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
199
- """Get squares according to feature map sizes and guided anchors.
200
-
201
- Args:
202
- featmap_sizes (list[tuple]): Multi-level feature map sizes.
203
- img_metas (list[dict]): Image meta info.
204
- device (torch.device | str): device for returned tensors
205
-
206
- Returns:
207
- tuple: square approxs of each image
208
- """
209
- num_imgs = len(img_metas)
210
-
211
- # since feature map sizes of all images are the same, we only compute
212
- # squares for one time
213
- multi_level_squares = self.square_anchor_generator.grid_anchors(
214
- featmap_sizes, device=device)
215
- squares_list = [multi_level_squares for _ in range(num_imgs)]
216
-
217
- return squares_list
218
-
219
- def get_target(self,
220
- approx_list,
221
- inside_flag_list,
222
- square_list,
223
- gt_bboxes_list,
224
- img_metas,
225
- gt_bboxes_ignore_list=None,
226
- gt_labels_list=None,
227
- label_channels=None,
228
- sampling=True,
229
- unmap_outputs=True):
230
- """Compute bucketing targets.
231
- Args:
232
- approx_list (list[list]): Multi level approxs of each image.
233
- inside_flag_list (list[list]): Multi level inside flags of each
234
- image.
235
- square_list (list[list]): Multi level squares of each image.
236
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
237
- img_metas (list[dict]): Meta info of each image.
238
- gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
239
- gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
240
- label_channels (int): Channel of label.
241
- sampling (bool): Sample Anchors or not.
242
- unmap_outputs (bool): unmap outputs or not.
243
-
244
- Returns:
245
- tuple: Returns a tuple containing learning targets.
246
-
247
- - labels_list (list[Tensor]): Labels of each level.
248
- - label_weights_list (list[Tensor]): Label weights of each \
249
- level.
250
- - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \
251
- each level.
252
- - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \
253
- each level.
254
- - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \
255
- each level.
256
- - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \
257
- each level.
258
- - num_total_pos (int): Number of positive samples in all \
259
- images.
260
- - num_total_neg (int): Number of negative samples in all \
261
- images.
262
- """
263
- num_imgs = len(img_metas)
264
- assert len(approx_list) == len(inside_flag_list) == len(
265
- square_list) == num_imgs
266
- # anchor number of multi levels
267
- num_level_squares = [squares.size(0) for squares in square_list[0]]
268
- # concat all level anchors and flags to a single tensor
269
- inside_flag_flat_list = []
270
- approx_flat_list = []
271
- square_flat_list = []
272
- for i in range(num_imgs):
273
- assert len(square_list[i]) == len(inside_flag_list[i])
274
- inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
275
- approx_flat_list.append(torch.cat(approx_list[i]))
276
- square_flat_list.append(torch.cat(square_list[i]))
277
-
278
- # compute targets for each image
279
- if gt_bboxes_ignore_list is None:
280
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
281
- if gt_labels_list is None:
282
- gt_labels_list = [None for _ in range(num_imgs)]
283
- (all_labels, all_label_weights, all_bbox_cls_targets,
284
- all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights,
285
- pos_inds_list, neg_inds_list) = multi_apply(
286
- self._get_target_single,
287
- approx_flat_list,
288
- inside_flag_flat_list,
289
- square_flat_list,
290
- gt_bboxes_list,
291
- gt_bboxes_ignore_list,
292
- gt_labels_list,
293
- img_metas,
294
- label_channels=label_channels,
295
- sampling=sampling,
296
- unmap_outputs=unmap_outputs)
297
- # no valid anchors
298
- if any([labels is None for labels in all_labels]):
299
- return None
300
- # sampled anchors of all images
301
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
302
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
303
- # split targets to a list w.r.t. multiple levels
304
- labels_list = images_to_levels(all_labels, num_level_squares)
305
- label_weights_list = images_to_levels(all_label_weights,
306
- num_level_squares)
307
- bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets,
308
- num_level_squares)
309
- bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights,
310
- num_level_squares)
311
- bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets,
312
- num_level_squares)
313
- bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights,
314
- num_level_squares)
315
- return (labels_list, label_weights_list, bbox_cls_targets_list,
316
- bbox_cls_weights_list, bbox_reg_targets_list,
317
- bbox_reg_weights_list, num_total_pos, num_total_neg)
318
-
319
- def _get_target_single(self,
320
- flat_approxs,
321
- inside_flags,
322
- flat_squares,
323
- gt_bboxes,
324
- gt_bboxes_ignore,
325
- gt_labels,
326
- img_meta,
327
- label_channels=None,
328
- sampling=True,
329
- unmap_outputs=True):
330
- """Compute regression and classification targets for anchors in a
331
- single image.
332
-
333
- Args:
334
- flat_approxs (Tensor): flat approxs of a single image,
335
- shape (n, 4)
336
- inside_flags (Tensor): inside flags of a single image,
337
- shape (n, ).
338
- flat_squares (Tensor): flat squares of a single image,
339
- shape (approxs_per_octave * n, 4)
340
- gt_bboxes (Tensor): Ground truth bboxes of a single image, \
341
- shape (num_gts, 4).
342
- gt_bboxes_ignore (Tensor): Ground truth bboxes to be
343
- ignored, shape (num_ignored_gts, 4).
344
- gt_labels (Tensor): Ground truth labels of each box,
345
- shape (num_gts,).
346
- img_meta (dict): Meta info of the image.
347
- label_channels (int): Channel of label.
348
- sampling (bool): Sample Anchors or not.
349
- unmap_outputs (bool): unmap outputs or not.
350
-
351
- Returns:
352
- tuple:
353
-
354
- - labels_list (Tensor): Labels in a single image
355
- - label_weights (Tensor): Label weights in a single image
356
- - bbox_cls_targets (Tensor): BBox cls targets in a single image
357
- - bbox_cls_weights (Tensor): BBox cls weights in a single image
358
- - bbox_reg_targets (Tensor): BBox reg targets in a single image
359
- - bbox_reg_weights (Tensor): BBox reg weights in a single image
360
- - num_total_pos (int): Number of positive samples \
361
- in a single image
362
- - num_total_neg (int): Number of negative samples \
363
- in a single image
364
- """
365
- if not inside_flags.any():
366
- return (None, ) * 8
367
- # assign gt and sample anchors
368
- expand_inside_flags = inside_flags[:, None].expand(
369
- -1, self.approxs_per_octave).reshape(-1)
370
- approxs = flat_approxs[expand_inside_flags, :]
371
- squares = flat_squares[inside_flags, :]
372
-
373
- assign_result = self.assigner.assign(approxs, squares,
374
- self.approxs_per_octave,
375
- gt_bboxes, gt_bboxes_ignore)
376
- sampling_result = self.sampler.sample(assign_result, squares,
377
- gt_bboxes)
378
-
379
- num_valid_squares = squares.shape[0]
380
- bbox_cls_targets = squares.new_zeros(
381
- (num_valid_squares, self.side_num * 4))
382
- bbox_cls_weights = squares.new_zeros(
383
- (num_valid_squares, self.side_num * 4))
384
- bbox_reg_targets = squares.new_zeros(
385
- (num_valid_squares, self.side_num * 4))
386
- bbox_reg_weights = squares.new_zeros(
387
- (num_valid_squares, self.side_num * 4))
388
- labels = squares.new_full((num_valid_squares, ),
389
- self.num_classes,
390
- dtype=torch.long)
391
- label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float)
392
-
393
- pos_inds = sampling_result.pos_inds
394
- neg_inds = sampling_result.neg_inds
395
- if len(pos_inds) > 0:
396
- (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets,
397
- pos_bbox_cls_weights) = self.bbox_coder.encode(
398
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
399
-
400
- bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets
401
- bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets
402
- bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights
403
- bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights
404
- if gt_labels is None:
405
- # Only rpn gives gt_labels as None
406
- # Foreground is the first class
407
- labels[pos_inds] = 0
408
- else:
409
- labels[pos_inds] = gt_labels[
410
- sampling_result.pos_assigned_gt_inds]
411
- if self.train_cfg.pos_weight <= 0:
412
- label_weights[pos_inds] = 1.0
413
- else:
414
- label_weights[pos_inds] = self.train_cfg.pos_weight
415
- if len(neg_inds) > 0:
416
- label_weights[neg_inds] = 1.0
417
-
418
- # map up to original set of anchors
419
- if unmap_outputs:
420
- num_total_anchors = flat_squares.size(0)
421
- labels = unmap(
422
- labels, num_total_anchors, inside_flags, fill=self.num_classes)
423
- label_weights = unmap(label_weights, num_total_anchors,
424
- inside_flags)
425
- bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors,
426
- inside_flags)
427
- bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors,
428
- inside_flags)
429
- bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors,
430
- inside_flags)
431
- bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors,
432
- inside_flags)
433
- return (labels, label_weights, bbox_cls_targets, bbox_cls_weights,
434
- bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds)
435
-
436
- def loss_single(self, cls_score, bbox_pred, labels, label_weights,
437
- bbox_cls_targets, bbox_cls_weights, bbox_reg_targets,
438
- bbox_reg_weights, num_total_samples):
439
- # classification loss
440
- labels = labels.reshape(-1)
441
- label_weights = label_weights.reshape(-1)
442
- cls_score = cls_score.permute(0, 2, 3,
443
- 1).reshape(-1, self.cls_out_channels)
444
- loss_cls = self.loss_cls(
445
- cls_score, labels, label_weights, avg_factor=num_total_samples)
446
- # regression loss
447
- bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4)
448
- bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4)
449
- bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4)
450
- bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4)
451
- (bbox_cls_pred, bbox_reg_pred) = bbox_pred
452
- bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape(
453
- -1, self.side_num * 4)
454
- bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape(
455
- -1, self.side_num * 4)
456
- loss_bbox_cls = self.loss_bbox_cls(
457
- bbox_cls_pred,
458
- bbox_cls_targets.long(),
459
- bbox_cls_weights,
460
- avg_factor=num_total_samples * 4 * self.side_num)
461
- loss_bbox_reg = self.loss_bbox_reg(
462
- bbox_reg_pred,
463
- bbox_reg_targets,
464
- bbox_reg_weights,
465
- avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk)
466
- return loss_cls, loss_bbox_cls, loss_bbox_reg
467
-
468
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
469
- def loss(self,
470
- cls_scores,
471
- bbox_preds,
472
- gt_bboxes,
473
- gt_labels,
474
- img_metas,
475
- gt_bboxes_ignore=None):
476
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
477
- assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
478
-
479
- device = cls_scores[0].device
480
-
481
- # get sampled approxes
482
- approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs(
483
- self, featmap_sizes, img_metas, device=device)
484
-
485
- square_list = self.get_anchors(featmap_sizes, img_metas, device=device)
486
-
487
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
488
-
489
- cls_reg_targets = self.get_target(
490
- approxs_list,
491
- inside_flag_list,
492
- square_list,
493
- gt_bboxes,
494
- img_metas,
495
- gt_bboxes_ignore_list=gt_bboxes_ignore,
496
- gt_labels_list=gt_labels,
497
- label_channels=label_channels,
498
- sampling=self.sampling)
499
- if cls_reg_targets is None:
500
- return None
501
- (labels_list, label_weights_list, bbox_cls_targets_list,
502
- bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list,
503
- num_total_pos, num_total_neg) = cls_reg_targets
504
- num_total_samples = (
505
- num_total_pos + num_total_neg if self.sampling else num_total_pos)
506
- losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply(
507
- self.loss_single,
508
- cls_scores,
509
- bbox_preds,
510
- labels_list,
511
- label_weights_list,
512
- bbox_cls_targets_list,
513
- bbox_cls_weights_list,
514
- bbox_reg_targets_list,
515
- bbox_reg_weights_list,
516
- num_total_samples=num_total_samples)
517
- return dict(
518
- loss_cls=losses_cls,
519
- loss_bbox_cls=losses_bbox_cls,
520
- loss_bbox_reg=losses_bbox_reg)
521
-
522
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
523
- def get_bboxes(self,
524
- cls_scores,
525
- bbox_preds,
526
- img_metas,
527
- cfg=None,
528
- rescale=False):
529
- assert len(cls_scores) == len(bbox_preds)
530
- num_levels = len(cls_scores)
531
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
532
-
533
- device = cls_scores[0].device
534
- mlvl_anchors = self.get_anchors(
535
- featmap_sizes, img_metas, device=device)
536
- result_list = []
537
- for img_id in range(len(img_metas)):
538
- cls_score_list = [
539
- cls_scores[i][img_id].detach() for i in range(num_levels)
540
- ]
541
- bbox_cls_pred_list = [
542
- bbox_preds[i][0][img_id].detach() for i in range(num_levels)
543
- ]
544
- bbox_reg_pred_list = [
545
- bbox_preds[i][1][img_id].detach() for i in range(num_levels)
546
- ]
547
- img_shape = img_metas[img_id]['img_shape']
548
- scale_factor = img_metas[img_id]['scale_factor']
549
- proposals = self.get_bboxes_single(cls_score_list,
550
- bbox_cls_pred_list,
551
- bbox_reg_pred_list,
552
- mlvl_anchors[img_id], img_shape,
553
- scale_factor, cfg, rescale)
554
- result_list.append(proposals)
555
- return result_list
556
-
557
- def get_bboxes_single(self,
558
- cls_scores,
559
- bbox_cls_preds,
560
- bbox_reg_preds,
561
- mlvl_anchors,
562
- img_shape,
563
- scale_factor,
564
- cfg,
565
- rescale=False):
566
- cfg = self.test_cfg if cfg is None else cfg
567
- mlvl_bboxes = []
568
- mlvl_scores = []
569
- mlvl_confids = []
570
- assert len(cls_scores) == len(bbox_cls_preds) == len(
571
- bbox_reg_preds) == len(mlvl_anchors)
572
- for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip(
573
- cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors):
574
- assert cls_score.size()[-2:] == bbox_cls_pred.size(
575
- )[-2:] == bbox_reg_pred.size()[-2::]
576
- cls_score = cls_score.permute(1, 2,
577
- 0).reshape(-1, self.cls_out_channels)
578
- if self.use_sigmoid_cls:
579
- scores = cls_score.sigmoid()
580
- else:
581
- scores = cls_score.softmax(-1)
582
- bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape(
583
- -1, self.side_num * 4)
584
- bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape(
585
- -1, self.side_num * 4)
586
- nms_pre = cfg.get('nms_pre', -1)
587
- if nms_pre > 0 and scores.shape[0] > nms_pre:
588
- if self.use_sigmoid_cls:
589
- max_scores, _ = scores.max(dim=1)
590
- else:
591
- max_scores, _ = scores[:, :-1].max(dim=1)
592
- _, topk_inds = max_scores.topk(nms_pre)
593
- anchors = anchors[topk_inds, :]
594
- bbox_cls_pred = bbox_cls_pred[topk_inds, :]
595
- bbox_reg_pred = bbox_reg_pred[topk_inds, :]
596
- scores = scores[topk_inds, :]
597
- bbox_preds = [
598
- bbox_cls_pred.contiguous(),
599
- bbox_reg_pred.contiguous()
600
- ]
601
- bboxes, confids = self.bbox_coder.decode(
602
- anchors.contiguous(), bbox_preds, max_shape=img_shape)
603
- mlvl_bboxes.append(bboxes)
604
- mlvl_scores.append(scores)
605
- mlvl_confids.append(confids)
606
- mlvl_bboxes = torch.cat(mlvl_bboxes)
607
- if rescale:
608
- mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
609
- mlvl_scores = torch.cat(mlvl_scores)
610
- mlvl_confids = torch.cat(mlvl_confids)
611
- if self.use_sigmoid_cls:
612
- padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
613
- mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
614
- det_bboxes, det_labels = multiclass_nms(
615
- mlvl_bboxes,
616
- mlvl_scores,
617
- cfg.score_thr,
618
- cfg.nms,
619
- cfg.max_per_img,
620
- score_factors=mlvl_confids)
621
- return det_bboxes, det_labels
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './encnet_r50-d8_512x512_160k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/nonlocal_r50-d8.py',
3
- '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_20k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/logger.py DELETED
@@ -1,495 +0,0 @@
1
- """
2
- Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
3
- https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
4
- """
5
-
6
- import os
7
- import sys
8
- import shutil
9
- import os.path as osp
10
- import json
11
- import time
12
- import datetime
13
- import tempfile
14
- import warnings
15
- from collections import defaultdict
16
- from contextlib import contextmanager
17
-
18
- DEBUG = 10
19
- INFO = 20
20
- WARN = 30
21
- ERROR = 40
22
-
23
- DISABLED = 50
24
-
25
-
26
- class KVWriter(object):
27
- def writekvs(self, kvs):
28
- raise NotImplementedError
29
-
30
-
31
- class SeqWriter(object):
32
- def writeseq(self, seq):
33
- raise NotImplementedError
34
-
35
-
36
- class HumanOutputFormat(KVWriter, SeqWriter):
37
- def __init__(self, filename_or_file):
38
- if isinstance(filename_or_file, str):
39
- self.file = open(filename_or_file, "wt")
40
- self.own_file = True
41
- else:
42
- assert hasattr(filename_or_file, "read"), (
43
- "expected file or str, got %s" % filename_or_file
44
- )
45
- self.file = filename_or_file
46
- self.own_file = False
47
-
48
- def writekvs(self, kvs):
49
- # Create strings for printing
50
- key2str = {}
51
- for (key, val) in sorted(kvs.items()):
52
- if hasattr(val, "__float__"):
53
- valstr = "%-8.3g" % val
54
- else:
55
- valstr = str(val)
56
- key2str[self._truncate(key)] = self._truncate(valstr)
57
-
58
- # Find max widths
59
- if len(key2str) == 0:
60
- print("WARNING: tried to write empty key-value dict")
61
- return
62
- else:
63
- keywidth = max(map(len, key2str.keys()))
64
- valwidth = max(map(len, key2str.values()))
65
-
66
- # Write out the data
67
- dashes = "-" * (keywidth + valwidth + 7)
68
- lines = [dashes]
69
- for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
70
- lines.append(
71
- "| %s%s | %s%s |"
72
- % (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
73
- )
74
- lines.append(dashes)
75
- self.file.write("\n".join(lines) + "\n")
76
-
77
- # Flush the output to the file
78
- self.file.flush()
79
-
80
- def _truncate(self, s):
81
- maxlen = 30
82
- return s[: maxlen - 3] + "..." if len(s) > maxlen else s
83
-
84
- def writeseq(self, seq):
85
- seq = list(seq)
86
- for (i, elem) in enumerate(seq):
87
- self.file.write(elem)
88
- if i < len(seq) - 1: # add space unless this is the last one
89
- self.file.write(" ")
90
- self.file.write("\n")
91
- self.file.flush()
92
-
93
- def close(self):
94
- if self.own_file:
95
- self.file.close()
96
-
97
-
98
- class JSONOutputFormat(KVWriter):
99
- def __init__(self, filename):
100
- self.file = open(filename, "wt")
101
-
102
- def writekvs(self, kvs):
103
- for k, v in sorted(kvs.items()):
104
- if hasattr(v, "dtype"):
105
- kvs[k] = float(v)
106
- self.file.write(json.dumps(kvs) + "\n")
107
- self.file.flush()
108
-
109
- def close(self):
110
- self.file.close()
111
-
112
-
113
- class CSVOutputFormat(KVWriter):
114
- def __init__(self, filename):
115
- self.file = open(filename, "w+t")
116
- self.keys = []
117
- self.sep = ","
118
-
119
- def writekvs(self, kvs):
120
- # Add our current row to the history
121
- extra_keys = list(kvs.keys() - self.keys)
122
- extra_keys.sort()
123
- if extra_keys:
124
- self.keys.extend(extra_keys)
125
- self.file.seek(0)
126
- lines = self.file.readlines()
127
- self.file.seek(0)
128
- for (i, k) in enumerate(self.keys):
129
- if i > 0:
130
- self.file.write(",")
131
- self.file.write(k)
132
- self.file.write("\n")
133
- for line in lines[1:]:
134
- self.file.write(line[:-1])
135
- self.file.write(self.sep * len(extra_keys))
136
- self.file.write("\n")
137
- for (i, k) in enumerate(self.keys):
138
- if i > 0:
139
- self.file.write(",")
140
- v = kvs.get(k)
141
- if v is not None:
142
- self.file.write(str(v))
143
- self.file.write("\n")
144
- self.file.flush()
145
-
146
- def close(self):
147
- self.file.close()
148
-
149
-
150
- class TensorBoardOutputFormat(KVWriter):
151
- """
152
- Dumps key/value pairs into TensorBoard's numeric format.
153
- """
154
-
155
- def __init__(self, dir):
156
- os.makedirs(dir, exist_ok=True)
157
- self.dir = dir
158
- self.step = 1
159
- prefix = "events"
160
- path = osp.join(osp.abspath(dir), prefix)
161
- import tensorflow as tf
162
- from tensorflow.python import pywrap_tensorflow
163
- from tensorflow.core.util import event_pb2
164
- from tensorflow.python.util import compat
165
-
166
- self.tf = tf
167
- self.event_pb2 = event_pb2
168
- self.pywrap_tensorflow = pywrap_tensorflow
169
- self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
170
-
171
- def writekvs(self, kvs):
172
- def summary_val(k, v):
173
- kwargs = {"tag": k, "simple_value": float(v)}
174
- return self.tf.Summary.Value(**kwargs)
175
-
176
- summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
177
- event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
178
- event.step = (
179
- self.step
180
- ) # is there any reason why you'd want to specify the step?
181
- self.writer.WriteEvent(event)
182
- self.writer.Flush()
183
- self.step += 1
184
-
185
- def close(self):
186
- if self.writer:
187
- self.writer.Close()
188
- self.writer = None
189
-
190
-
191
- def make_output_format(format, ev_dir, log_suffix=""):
192
- os.makedirs(ev_dir, exist_ok=True)
193
- if format == "stdout":
194
- return HumanOutputFormat(sys.stdout)
195
- elif format == "log":
196
- return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
197
- elif format == "json":
198
- return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
199
- elif format == "csv":
200
- return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
201
- elif format == "tensorboard":
202
- return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
203
- else:
204
- raise ValueError("Unknown format specified: %s" % (format,))
205
-
206
-
207
- # ================================================================
208
- # API
209
- # ================================================================
210
-
211
-
212
- def logkv(key, val):
213
- """
214
- Log a value of some diagnostic
215
- Call this once for each diagnostic quantity, each iteration
216
- If called many times, last value will be used.
217
- """
218
- get_current().logkv(key, val)
219
-
220
-
221
- def logkv_mean(key, val):
222
- """
223
- The same as logkv(), but if called many times, values averaged.
224
- """
225
- get_current().logkv_mean(key, val)
226
-
227
-
228
- def logkvs(d):
229
- """
230
- Log a dictionary of key-value pairs
231
- """
232
- for (k, v) in d.items():
233
- logkv(k, v)
234
-
235
-
236
- def dumpkvs():
237
- """
238
- Write all of the diagnostics from the current iteration
239
- """
240
- return get_current().dumpkvs()
241
-
242
-
243
- def getkvs():
244
- return get_current().name2val
245
-
246
-
247
- def log(*args, level=INFO):
248
- """
249
- Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
250
- """
251
- get_current().log(*args, level=level)
252
-
253
-
254
- def debug(*args):
255
- log(*args, level=DEBUG)
256
-
257
-
258
- def info(*args):
259
- log(*args, level=INFO)
260
-
261
-
262
- def warn(*args):
263
- log(*args, level=WARN)
264
-
265
-
266
- def error(*args):
267
- log(*args, level=ERROR)
268
-
269
-
270
- def set_level(level):
271
- """
272
- Set logging threshold on current logger.
273
- """
274
- get_current().set_level(level)
275
-
276
-
277
- def set_comm(comm):
278
- get_current().set_comm(comm)
279
-
280
-
281
- def get_dir():
282
- """
283
- Get directory that log files are being written to.
284
- will be None if there is no output directory (i.e., if you didn't call start)
285
- """
286
- return get_current().get_dir()
287
-
288
-
289
- record_tabular = logkv
290
- dump_tabular = dumpkvs
291
-
292
-
293
- @contextmanager
294
- def profile_kv(scopename):
295
- logkey = "wait_" + scopename
296
- tstart = time.time()
297
- try:
298
- yield
299
- finally:
300
- get_current().name2val[logkey] += time.time() - tstart
301
-
302
-
303
- def profile(n):
304
- """
305
- Usage:
306
- @profile("my_func")
307
- def my_func(): code
308
- """
309
-
310
- def decorator_with_name(func):
311
- def func_wrapper(*args, **kwargs):
312
- with profile_kv(n):
313
- return func(*args, **kwargs)
314
-
315
- return func_wrapper
316
-
317
- return decorator_with_name
318
-
319
-
320
- # ================================================================
321
- # Backend
322
- # ================================================================
323
-
324
-
325
- def get_current():
326
- if Logger.CURRENT is None:
327
- _configure_default_logger()
328
-
329
- return Logger.CURRENT
330
-
331
-
332
- class Logger(object):
333
- DEFAULT = None # A logger with no output files. (See right below class definition)
334
- # So that you can still log to the terminal without setting up any output files
335
- CURRENT = None # Current logger being used by the free functions above
336
-
337
- def __init__(self, dir, output_formats, comm=None):
338
- self.name2val = defaultdict(float) # values this iteration
339
- self.name2cnt = defaultdict(int)
340
- self.level = INFO
341
- self.dir = dir
342
- self.output_formats = output_formats
343
- self.comm = comm
344
-
345
- # Logging API, forwarded
346
- # ----------------------------------------
347
- def logkv(self, key, val):
348
- self.name2val[key] = val
349
-
350
- def logkv_mean(self, key, val):
351
- oldval, cnt = self.name2val[key], self.name2cnt[key]
352
- self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
353
- self.name2cnt[key] = cnt + 1
354
-
355
- def dumpkvs(self):
356
- if self.comm is None:
357
- d = self.name2val
358
- else:
359
- d = mpi_weighted_mean(
360
- self.comm,
361
- {
362
- name: (val, self.name2cnt.get(name, 1))
363
- for (name, val) in self.name2val.items()
364
- },
365
- )
366
- if self.comm.rank != 0:
367
- d["dummy"] = 1 # so we don't get a warning about empty dict
368
- out = d.copy() # Return the dict for unit testing purposes
369
- for fmt in self.output_formats:
370
- if isinstance(fmt, KVWriter):
371
- fmt.writekvs(d)
372
- self.name2val.clear()
373
- self.name2cnt.clear()
374
- return out
375
-
376
- def log(self, *args, level=INFO):
377
- if self.level <= level:
378
- self._do_log(args)
379
-
380
- # Configuration
381
- # ----------------------------------------
382
- def set_level(self, level):
383
- self.level = level
384
-
385
- def set_comm(self, comm):
386
- self.comm = comm
387
-
388
- def get_dir(self):
389
- return self.dir
390
-
391
- def close(self):
392
- for fmt in self.output_formats:
393
- fmt.close()
394
-
395
- # Misc
396
- # ----------------------------------------
397
- def _do_log(self, args):
398
- for fmt in self.output_formats:
399
- if isinstance(fmt, SeqWriter):
400
- fmt.writeseq(map(str, args))
401
-
402
-
403
- def get_rank_without_mpi_import():
404
- # check environment variables here instead of importing mpi4py
405
- # to avoid calling MPI_Init() when this module is imported
406
- for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
407
- if varname in os.environ:
408
- return int(os.environ[varname])
409
- return 0
410
-
411
-
412
- def mpi_weighted_mean(comm, local_name2valcount):
413
- """
414
- Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
415
- Perform a weighted average over dicts that are each on a different node
416
- Input: local_name2valcount: dict mapping key -> (value, count)
417
- Returns: key -> mean
418
- """
419
- all_name2valcount = comm.gather(local_name2valcount)
420
- if comm.rank == 0:
421
- name2sum = defaultdict(float)
422
- name2count = defaultdict(float)
423
- for n2vc in all_name2valcount:
424
- for (name, (val, count)) in n2vc.items():
425
- try:
426
- val = float(val)
427
- except ValueError:
428
- if comm.rank == 0:
429
- warnings.warn(
430
- "WARNING: tried to compute mean on non-float {}={}".format(
431
- name, val
432
- )
433
- )
434
- else:
435
- name2sum[name] += val * count
436
- name2count[name] += count
437
- return {name: name2sum[name] / name2count[name] for name in name2sum}
438
- else:
439
- return {}
440
-
441
-
442
- def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
443
- """
444
- If comm is provided, average all numerical stats across that comm
445
- """
446
- if dir is None:
447
- dir = os.getenv("OPENAI_LOGDIR")
448
- if dir is None:
449
- dir = osp.join(
450
- tempfile.gettempdir(),
451
- datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
452
- )
453
- assert isinstance(dir, str)
454
- dir = os.path.expanduser(dir)
455
- os.makedirs(os.path.expanduser(dir), exist_ok=True)
456
-
457
- rank = get_rank_without_mpi_import()
458
- if rank > 0:
459
- log_suffix = log_suffix + "-rank%03i" % rank
460
-
461
- if format_strs is None:
462
- if rank == 0:
463
- format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
464
- else:
465
- format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
466
- format_strs = filter(None, format_strs)
467
- output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
468
-
469
- Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
470
- if output_formats:
471
- log("Logging to %s" % dir)
472
-
473
-
474
- def _configure_default_logger():
475
- configure()
476
- Logger.DEFAULT = Logger.CURRENT
477
-
478
-
479
- def reset():
480
- if Logger.CURRENT is not Logger.DEFAULT:
481
- Logger.CURRENT.close()
482
- Logger.CURRENT = Logger.DEFAULT
483
- log("Reset logger")
484
-
485
-
486
- @contextmanager
487
- def scoped_configure(dir=None, format_strs=None, comm=None):
488
- prevlogger = Logger.CURRENT
489
- configure(dir=dir, format_strs=format_strs, comm=comm)
490
- try:
491
- yield
492
- finally:
493
- Logger.CURRENT.close()
494
- Logger.CURRENT = prevlogger
495
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/geometric.py DELETED
@@ -1,728 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import numbers
3
-
4
- import cv2
5
- import numpy as np
6
-
7
- from ..utils import to_2tuple
8
- from .io import imread_backend
9
-
10
- try:
11
- from PIL import Image
12
- except ImportError:
13
- Image = None
14
-
15
-
16
- def _scale_size(size, scale):
17
- """Rescale a size by a ratio.
18
-
19
- Args:
20
- size (tuple[int]): (w, h).
21
- scale (float | tuple(float)): Scaling factor.
22
-
23
- Returns:
24
- tuple[int]: scaled size.
25
- """
26
- if isinstance(scale, (float, int)):
27
- scale = (scale, scale)
28
- w, h = size
29
- return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5)
30
-
31
-
32
- cv2_interp_codes = {
33
- 'nearest': cv2.INTER_NEAREST,
34
- 'bilinear': cv2.INTER_LINEAR,
35
- 'bicubic': cv2.INTER_CUBIC,
36
- 'area': cv2.INTER_AREA,
37
- 'lanczos': cv2.INTER_LANCZOS4
38
- }
39
-
40
- if Image is not None:
41
- pillow_interp_codes = {
42
- 'nearest': Image.NEAREST,
43
- 'bilinear': Image.BILINEAR,
44
- 'bicubic': Image.BICUBIC,
45
- 'box': Image.BOX,
46
- 'lanczos': Image.LANCZOS,
47
- 'hamming': Image.HAMMING
48
- }
49
-
50
-
51
- def imresize(img,
52
- size,
53
- return_scale=False,
54
- interpolation='bilinear',
55
- out=None,
56
- backend=None):
57
- """Resize image to a given size.
58
-
59
- Args:
60
- img (ndarray): The input image.
61
- size (tuple[int]): Target size (w, h).
62
- return_scale (bool): Whether to return `w_scale` and `h_scale`.
63
- interpolation (str): Interpolation method, accepted values are
64
- "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
65
- backend, "nearest", "bilinear" for 'pillow' backend.
66
- out (ndarray): The output destination.
67
- backend (str | None): The image resize backend type. Options are `cv2`,
68
- `pillow`, `None`. If backend is None, the global imread_backend
69
- specified by ``mmcv.use_backend()`` will be used. Default: None.
70
-
71
- Returns:
72
- tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
73
- `resized_img`.
74
- """
75
- h, w = img.shape[:2]
76
- if backend is None:
77
- backend = imread_backend
78
- if backend not in ['cv2', 'pillow']:
79
- raise ValueError(f'backend: {backend} is not supported for resize.'
80
- f"Supported backends are 'cv2', 'pillow'")
81
-
82
- if backend == 'pillow':
83
- assert img.dtype == np.uint8, 'Pillow backend only support uint8 type'
84
- pil_image = Image.fromarray(img)
85
- pil_image = pil_image.resize(size, pillow_interp_codes[interpolation])
86
- resized_img = np.array(pil_image)
87
- else:
88
- resized_img = cv2.resize(
89
- img, size, dst=out, interpolation=cv2_interp_codes[interpolation])
90
- if not return_scale:
91
- return resized_img
92
- else:
93
- w_scale = size[0] / w
94
- h_scale = size[1] / h
95
- return resized_img, w_scale, h_scale
96
-
97
-
98
- def imresize_to_multiple(img,
99
- divisor,
100
- size=None,
101
- scale_factor=None,
102
- keep_ratio=False,
103
- return_scale=False,
104
- interpolation='bilinear',
105
- out=None,
106
- backend=None):
107
- """Resize image according to a given size or scale factor and then rounds
108
- up the the resized or rescaled image size to the nearest value that can be
109
- divided by the divisor.
110
-
111
- Args:
112
- img (ndarray): The input image.
113
- divisor (int | tuple): Resized image size will be a multiple of
114
- divisor. If divisor is a tuple, divisor should be
115
- (w_divisor, h_divisor).
116
- size (None | int | tuple[int]): Target size (w, h). Default: None.
117
- scale_factor (None | float | tuple[float]): Multiplier for spatial
118
- size. Should match input size if it is a tuple and the 2D style is
119
- (w_scale_factor, h_scale_factor). Default: None.
120
- keep_ratio (bool): Whether to keep the aspect ratio when resizing the
121
- image. Default: False.
122
- return_scale (bool): Whether to return `w_scale` and `h_scale`.
123
- interpolation (str): Interpolation method, accepted values are
124
- "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
125
- backend, "nearest", "bilinear" for 'pillow' backend.
126
- out (ndarray): The output destination.
127
- backend (str | None): The image resize backend type. Options are `cv2`,
128
- `pillow`, `None`. If backend is None, the global imread_backend
129
- specified by ``mmcv.use_backend()`` will be used. Default: None.
130
-
131
- Returns:
132
- tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
133
- `resized_img`.
134
- """
135
- h, w = img.shape[:2]
136
- if size is not None and scale_factor is not None:
137
- raise ValueError('only one of size or scale_factor should be defined')
138
- elif size is None and scale_factor is None:
139
- raise ValueError('one of size or scale_factor should be defined')
140
- elif size is not None:
141
- size = to_2tuple(size)
142
- if keep_ratio:
143
- size = rescale_size((w, h), size, return_scale=False)
144
- else:
145
- size = _scale_size((w, h), scale_factor)
146
-
147
- divisor = to_2tuple(divisor)
148
- size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)])
149
- resized_img, w_scale, h_scale = imresize(
150
- img,
151
- size,
152
- return_scale=True,
153
- interpolation=interpolation,
154
- out=out,
155
- backend=backend)
156
- if return_scale:
157
- return resized_img, w_scale, h_scale
158
- else:
159
- return resized_img
160
-
161
-
162
- def imresize_like(img,
163
- dst_img,
164
- return_scale=False,
165
- interpolation='bilinear',
166
- backend=None):
167
- """Resize image to the same size of a given image.
168
-
169
- Args:
170
- img (ndarray): The input image.
171
- dst_img (ndarray): The target image.
172
- return_scale (bool): Whether to return `w_scale` and `h_scale`.
173
- interpolation (str): Same as :func:`resize`.
174
- backend (str | None): Same as :func:`resize`.
175
-
176
- Returns:
177
- tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
178
- `resized_img`.
179
- """
180
- h, w = dst_img.shape[:2]
181
- return imresize(img, (w, h), return_scale, interpolation, backend=backend)
182
-
183
-
184
- def rescale_size(old_size, scale, return_scale=False):
185
- """Calculate the new size to be rescaled to.
186
-
187
- Args:
188
- old_size (tuple[int]): The old size (w, h) of image.
189
- scale (float | tuple[int]): The scaling factor or maximum size.
190
- If it is a float number, then the image will be rescaled by this
191
- factor, else if it is a tuple of 2 integers, then the image will
192
- be rescaled as large as possible within the scale.
193
- return_scale (bool): Whether to return the scaling factor besides the
194
- rescaled image size.
195
-
196
- Returns:
197
- tuple[int]: The new rescaled image size.
198
- """
199
- w, h = old_size
200
- if isinstance(scale, (float, int)):
201
- if scale <= 0:
202
- raise ValueError(f'Invalid scale {scale}, must be positive.')
203
- scale_factor = scale
204
- elif isinstance(scale, tuple):
205
- max_long_edge = max(scale)
206
- max_short_edge = min(scale)
207
- scale_factor = min(max_long_edge / max(h, w),
208
- max_short_edge / min(h, w))
209
- else:
210
- raise TypeError(
211
- f'Scale must be a number or tuple of int, but got {type(scale)}')
212
-
213
- new_size = _scale_size((w, h), scale_factor)
214
-
215
- if return_scale:
216
- return new_size, scale_factor
217
- else:
218
- return new_size
219
-
220
-
221
- def imrescale(img,
222
- scale,
223
- return_scale=False,
224
- interpolation='bilinear',
225
- backend=None):
226
- """Resize image while keeping the aspect ratio.
227
-
228
- Args:
229
- img (ndarray): The input image.
230
- scale (float | tuple[int]): The scaling factor or maximum size.
231
- If it is a float number, then the image will be rescaled by this
232
- factor, else if it is a tuple of 2 integers, then the image will
233
- be rescaled as large as possible within the scale.
234
- return_scale (bool): Whether to return the scaling factor besides the
235
- rescaled image.
236
- interpolation (str): Same as :func:`resize`.
237
- backend (str | None): Same as :func:`resize`.
238
-
239
- Returns:
240
- ndarray: The rescaled image.
241
- """
242
- h, w = img.shape[:2]
243
- new_size, scale_factor = rescale_size((w, h), scale, return_scale=True)
244
- rescaled_img = imresize(
245
- img, new_size, interpolation=interpolation, backend=backend)
246
- if return_scale:
247
- return rescaled_img, scale_factor
248
- else:
249
- return rescaled_img
250
-
251
-
252
- def imflip(img, direction='horizontal'):
253
- """Flip an image horizontally or vertically.
254
-
255
- Args:
256
- img (ndarray): Image to be flipped.
257
- direction (str): The flip direction, either "horizontal" or
258
- "vertical" or "diagonal".
259
-
260
- Returns:
261
- ndarray: The flipped image.
262
- """
263
- assert direction in ['horizontal', 'vertical', 'diagonal']
264
- if direction == 'horizontal':
265
- return np.flip(img, axis=1)
266
- elif direction == 'vertical':
267
- return np.flip(img, axis=0)
268
- else:
269
- return np.flip(img, axis=(0, 1))
270
-
271
-
272
- def imflip_(img, direction='horizontal'):
273
- """Inplace flip an image horizontally or vertically.
274
-
275
- Args:
276
- img (ndarray): Image to be flipped.
277
- direction (str): The flip direction, either "horizontal" or
278
- "vertical" or "diagonal".
279
-
280
- Returns:
281
- ndarray: The flipped image (inplace).
282
- """
283
- assert direction in ['horizontal', 'vertical', 'diagonal']
284
- if direction == 'horizontal':
285
- return cv2.flip(img, 1, img)
286
- elif direction == 'vertical':
287
- return cv2.flip(img, 0, img)
288
- else:
289
- return cv2.flip(img, -1, img)
290
-
291
-
292
- def imrotate(img,
293
- angle,
294
- center=None,
295
- scale=1.0,
296
- border_value=0,
297
- interpolation='bilinear',
298
- auto_bound=False):
299
- """Rotate an image.
300
-
301
- Args:
302
- img (ndarray): Image to be rotated.
303
- angle (float): Rotation angle in degrees, positive values mean
304
- clockwise rotation.
305
- center (tuple[float], optional): Center point (w, h) of the rotation in
306
- the source image. If not specified, the center of the image will be
307
- used.
308
- scale (float): Isotropic scale factor.
309
- border_value (int): Border value.
310
- interpolation (str): Same as :func:`resize`.
311
- auto_bound (bool): Whether to adjust the image size to cover the whole
312
- rotated image.
313
-
314
- Returns:
315
- ndarray: The rotated image.
316
- """
317
- if center is not None and auto_bound:
318
- raise ValueError('`auto_bound` conflicts with `center`')
319
- h, w = img.shape[:2]
320
- if center is None:
321
- center = ((w - 1) * 0.5, (h - 1) * 0.5)
322
- assert isinstance(center, tuple)
323
-
324
- matrix = cv2.getRotationMatrix2D(center, -angle, scale)
325
- if auto_bound:
326
- cos = np.abs(matrix[0, 0])
327
- sin = np.abs(matrix[0, 1])
328
- new_w = h * sin + w * cos
329
- new_h = h * cos + w * sin
330
- matrix[0, 2] += (new_w - w) * 0.5
331
- matrix[1, 2] += (new_h - h) * 0.5
332
- w = int(np.round(new_w))
333
- h = int(np.round(new_h))
334
- rotated = cv2.warpAffine(
335
- img,
336
- matrix, (w, h),
337
- flags=cv2_interp_codes[interpolation],
338
- borderValue=border_value)
339
- return rotated
340
-
341
-
342
- def bbox_clip(bboxes, img_shape):
343
- """Clip bboxes to fit the image shape.
344
-
345
- Args:
346
- bboxes (ndarray): Shape (..., 4*k)
347
- img_shape (tuple[int]): (height, width) of the image.
348
-
349
- Returns:
350
- ndarray: Clipped bboxes.
351
- """
352
- assert bboxes.shape[-1] % 4 == 0
353
- cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype)
354
- cmin[0::2] = img_shape[1] - 1
355
- cmin[1::2] = img_shape[0] - 1
356
- clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0)
357
- return clipped_bboxes
358
-
359
-
360
- def bbox_scaling(bboxes, scale, clip_shape=None):
361
- """Scaling bboxes w.r.t the box center.
362
-
363
- Args:
364
- bboxes (ndarray): Shape(..., 4).
365
- scale (float): Scaling factor.
366
- clip_shape (tuple[int], optional): If specified, bboxes that exceed the
367
- boundary will be clipped according to the given shape (h, w).
368
-
369
- Returns:
370
- ndarray: Scaled bboxes.
371
- """
372
- if float(scale) == 1.0:
373
- scaled_bboxes = bboxes.copy()
374
- else:
375
- w = bboxes[..., 2] - bboxes[..., 0] + 1
376
- h = bboxes[..., 3] - bboxes[..., 1] + 1
377
- dw = (w * (scale - 1)) * 0.5
378
- dh = (h * (scale - 1)) * 0.5
379
- scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)
380
- if clip_shape is not None:
381
- return bbox_clip(scaled_bboxes, clip_shape)
382
- else:
383
- return scaled_bboxes
384
-
385
-
386
- def imcrop(img, bboxes, scale=1.0, pad_fill=None):
387
- """Crop image patches.
388
-
389
- 3 steps: scale the bboxes -> clip bboxes -> crop and pad.
390
-
391
- Args:
392
- img (ndarray): Image to be cropped.
393
- bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
394
- scale (float, optional): Scale ratio of bboxes, the default value
395
- 1.0 means no padding.
396
- pad_fill (Number | list[Number]): Value to be filled for padding.
397
- Default: None, which means no padding.
398
-
399
- Returns:
400
- list[ndarray] | ndarray: The cropped image patches.
401
- """
402
- chn = 1 if img.ndim == 2 else img.shape[2]
403
- if pad_fill is not None:
404
- if isinstance(pad_fill, (int, float)):
405
- pad_fill = [pad_fill for _ in range(chn)]
406
- assert len(pad_fill) == chn
407
-
408
- _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
409
- scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
410
- clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
411
-
412
- patches = []
413
- for i in range(clipped_bbox.shape[0]):
414
- x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
415
- if pad_fill is None:
416
- patch = img[y1:y2 + 1, x1:x2 + 1, ...]
417
- else:
418
- _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
419
- if chn == 1:
420
- patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
421
- else:
422
- patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
423
- patch = np.array(
424
- pad_fill, dtype=img.dtype) * np.ones(
425
- patch_shape, dtype=img.dtype)
426
- x_start = 0 if _x1 >= 0 else -_x1
427
- y_start = 0 if _y1 >= 0 else -_y1
428
- w = x2 - x1 + 1
429
- h = y2 - y1 + 1
430
- patch[y_start:y_start + h, x_start:x_start + w,
431
- ...] = img[y1:y1 + h, x1:x1 + w, ...]
432
- patches.append(patch)
433
-
434
- if bboxes.ndim == 1:
435
- return patches[0]
436
- else:
437
- return patches
438
-
439
-
440
- def impad(img,
441
- *,
442
- shape=None,
443
- padding=None,
444
- pad_val=0,
445
- padding_mode='constant'):
446
- """Pad the given image to a certain shape or pad on all sides with
447
- specified padding mode and padding value.
448
-
449
- Args:
450
- img (ndarray): Image to be padded.
451
- shape (tuple[int]): Expected padding shape (h, w). Default: None.
452
- padding (int or tuple[int]): Padding on each border. If a single int is
453
- provided this is used to pad all borders. If tuple of length 2 is
454
- provided this is the padding on left/right and top/bottom
455
- respectively. If a tuple of length 4 is provided this is the
456
- padding for the left, top, right and bottom borders respectively.
457
- Default: None. Note that `shape` and `padding` can not be both
458
- set.
459
- pad_val (Number | Sequence[Number]): Values to be filled in padding
460
- areas when padding_mode is 'constant'. Default: 0.
461
- padding_mode (str): Type of padding. Should be: constant, edge,
462
- reflect or symmetric. Default: constant.
463
-
464
- - constant: pads with a constant value, this value is specified
465
- with pad_val.
466
- - edge: pads with the last value at the edge of the image.
467
- - reflect: pads with reflection of image without repeating the
468
- last value on the edge. For example, padding [1, 2, 3, 4]
469
- with 2 elements on both sides in reflect mode will result
470
- in [3, 2, 1, 2, 3, 4, 3, 2].
471
- - symmetric: pads with reflection of image repeating the last
472
- value on the edge. For example, padding [1, 2, 3, 4] with
473
- 2 elements on both sides in symmetric mode will result in
474
- [2, 1, 1, 2, 3, 4, 4, 3]
475
-
476
- Returns:
477
- ndarray: The padded image.
478
- """
479
-
480
- assert (shape is not None) ^ (padding is not None)
481
- if shape is not None:
482
- padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0])
483
-
484
- # check pad_val
485
- if isinstance(pad_val, tuple):
486
- assert len(pad_val) == img.shape[-1]
487
- elif not isinstance(pad_val, numbers.Number):
488
- raise TypeError('pad_val must be a int or a tuple. '
489
- f'But received {type(pad_val)}')
490
-
491
- # check padding
492
- if isinstance(padding, tuple) and len(padding) in [2, 4]:
493
- if len(padding) == 2:
494
- padding = (padding[0], padding[1], padding[0], padding[1])
495
- elif isinstance(padding, numbers.Number):
496
- padding = (padding, padding, padding, padding)
497
- else:
498
- raise ValueError('Padding must be a int or a 2, or 4 element tuple.'
499
- f'But received {padding}')
500
-
501
- # check padding mode
502
- assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
503
-
504
- border_type = {
505
- 'constant': cv2.BORDER_CONSTANT,
506
- 'edge': cv2.BORDER_REPLICATE,
507
- 'reflect': cv2.BORDER_REFLECT_101,
508
- 'symmetric': cv2.BORDER_REFLECT
509
- }
510
- img = cv2.copyMakeBorder(
511
- img,
512
- padding[1],
513
- padding[3],
514
- padding[0],
515
- padding[2],
516
- border_type[padding_mode],
517
- value=pad_val)
518
-
519
- return img
520
-
521
-
522
- def impad_to_multiple(img, divisor, pad_val=0):
523
- """Pad an image to ensure each edge to be multiple to some number.
524
-
525
- Args:
526
- img (ndarray): Image to be padded.
527
- divisor (int): Padded image edges will be multiple to divisor.
528
- pad_val (Number | Sequence[Number]): Same as :func:`impad`.
529
-
530
- Returns:
531
- ndarray: The padded image.
532
- """
533
- pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
534
- pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
535
- return impad(img, shape=(pad_h, pad_w), pad_val=pad_val)
536
-
537
-
538
- def cutout(img, shape, pad_val=0):
539
- """Randomly cut out a rectangle from the original img.
540
-
541
- Args:
542
- img (ndarray): Image to be cutout.
543
- shape (int | tuple[int]): Expected cutout shape (h, w). If given as a
544
- int, the value will be used for both h and w.
545
- pad_val (int | float | tuple[int | float]): Values to be filled in the
546
- cut area. Defaults to 0.
547
-
548
- Returns:
549
- ndarray: The cutout image.
550
- """
551
-
552
- channels = 1 if img.ndim == 2 else img.shape[2]
553
- if isinstance(shape, int):
554
- cut_h, cut_w = shape, shape
555
- else:
556
- assert isinstance(shape, tuple) and len(shape) == 2, \
557
- f'shape must be a int or a tuple with length 2, but got type ' \
558
- f'{type(shape)} instead.'
559
- cut_h, cut_w = shape
560
- if isinstance(pad_val, (int, float)):
561
- pad_val = tuple([pad_val] * channels)
562
- elif isinstance(pad_val, tuple):
563
- assert len(pad_val) == channels, \
564
- 'Expected the num of elements in tuple equals the channels' \
565
- 'of input image. Found {} vs {}'.format(
566
- len(pad_val), channels)
567
- else:
568
- raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`')
569
-
570
- img_h, img_w = img.shape[:2]
571
- y0 = np.random.uniform(img_h)
572
- x0 = np.random.uniform(img_w)
573
-
574
- y1 = int(max(0, y0 - cut_h / 2.))
575
- x1 = int(max(0, x0 - cut_w / 2.))
576
- y2 = min(img_h, y1 + cut_h)
577
- x2 = min(img_w, x1 + cut_w)
578
-
579
- if img.ndim == 2:
580
- patch_shape = (y2 - y1, x2 - x1)
581
- else:
582
- patch_shape = (y2 - y1, x2 - x1, channels)
583
-
584
- img_cutout = img.copy()
585
- patch = np.array(
586
- pad_val, dtype=img.dtype) * np.ones(
587
- patch_shape, dtype=img.dtype)
588
- img_cutout[y1:y2, x1:x2, ...] = patch
589
-
590
- return img_cutout
591
-
592
-
593
- def _get_shear_matrix(magnitude, direction='horizontal'):
594
- """Generate the shear matrix for transformation.
595
-
596
- Args:
597
- magnitude (int | float): The magnitude used for shear.
598
- direction (str): The flip direction, either "horizontal"
599
- or "vertical".
600
-
601
- Returns:
602
- ndarray: The shear matrix with dtype float32.
603
- """
604
- if direction == 'horizontal':
605
- shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]])
606
- elif direction == 'vertical':
607
- shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]])
608
- return shear_matrix
609
-
610
-
611
- def imshear(img,
612
- magnitude,
613
- direction='horizontal',
614
- border_value=0,
615
- interpolation='bilinear'):
616
- """Shear an image.
617
-
618
- Args:
619
- img (ndarray): Image to be sheared with format (h, w)
620
- or (h, w, c).
621
- magnitude (int | float): The magnitude used for shear.
622
- direction (str): The flip direction, either "horizontal"
623
- or "vertical".
624
- border_value (int | tuple[int]): Value used in case of a
625
- constant border.
626
- interpolation (str): Same as :func:`resize`.
627
-
628
- Returns:
629
- ndarray: The sheared image.
630
- """
631
- assert direction in ['horizontal',
632
- 'vertical'], f'Invalid direction: {direction}'
633
- height, width = img.shape[:2]
634
- if img.ndim == 2:
635
- channels = 1
636
- elif img.ndim == 3:
637
- channels = img.shape[-1]
638
- if isinstance(border_value, int):
639
- border_value = tuple([border_value] * channels)
640
- elif isinstance(border_value, tuple):
641
- assert len(border_value) == channels, \
642
- 'Expected the num of elements in tuple equals the channels' \
643
- 'of input image. Found {} vs {}'.format(
644
- len(border_value), channels)
645
- else:
646
- raise ValueError(
647
- f'Invalid type {type(border_value)} for `border_value`')
648
- shear_matrix = _get_shear_matrix(magnitude, direction)
649
- sheared = cv2.warpAffine(
650
- img,
651
- shear_matrix,
652
- (width, height),
653
- # Note case when the number elements in `border_value`
654
- # greater than 3 (e.g. shearing masks whose channels large
655
- # than 3) will raise TypeError in `cv2.warpAffine`.
656
- # Here simply slice the first 3 values in `border_value`.
657
- borderValue=border_value[:3],
658
- flags=cv2_interp_codes[interpolation])
659
- return sheared
660
-
661
-
662
- def _get_translate_matrix(offset, direction='horizontal'):
663
- """Generate the translate matrix.
664
-
665
- Args:
666
- offset (int | float): The offset used for translate.
667
- direction (str): The translate direction, either
668
- "horizontal" or "vertical".
669
-
670
- Returns:
671
- ndarray: The translate matrix with dtype float32.
672
- """
673
- if direction == 'horizontal':
674
- translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]])
675
- elif direction == 'vertical':
676
- translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]])
677
- return translate_matrix
678
-
679
-
680
- def imtranslate(img,
681
- offset,
682
- direction='horizontal',
683
- border_value=0,
684
- interpolation='bilinear'):
685
- """Translate an image.
686
-
687
- Args:
688
- img (ndarray): Image to be translated with format
689
- (h, w) or (h, w, c).
690
- offset (int | float): The offset used for translate.
691
- direction (str): The translate direction, either "horizontal"
692
- or "vertical".
693
- border_value (int | tuple[int]): Value used in case of a
694
- constant border.
695
- interpolation (str): Same as :func:`resize`.
696
-
697
- Returns:
698
- ndarray: The translated image.
699
- """
700
- assert direction in ['horizontal',
701
- 'vertical'], f'Invalid direction: {direction}'
702
- height, width = img.shape[:2]
703
- if img.ndim == 2:
704
- channels = 1
705
- elif img.ndim == 3:
706
- channels = img.shape[-1]
707
- if isinstance(border_value, int):
708
- border_value = tuple([border_value] * channels)
709
- elif isinstance(border_value, tuple):
710
- assert len(border_value) == channels, \
711
- 'Expected the num of elements in tuple equals the channels' \
712
- 'of input image. Found {} vs {}'.format(
713
- len(border_value), channels)
714
- else:
715
- raise ValueError(
716
- f'Invalid type {type(border_value)} for `border_value`.')
717
- translate_matrix = _get_translate_matrix(offset, direction)
718
- translated = cv2.warpAffine(
719
- img,
720
- translate_matrix,
721
- (width, height),
722
- # Note case when the number elements in `border_value`
723
- # greater than 3 (e.g. translating masks whose channels
724
- # large than 3) will raise TypeError in `cv2.warpAffine`.
725
- # Here simply slice the first 3 values in `border_value`.
726
- borderValue=border_value[:3],
727
- flags=cv2_interp_codes[interpolation])
728
- return translated
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/Tm/roop/metadata.py DELETED
@@ -1,2 +0,0 @@
1
- name = 'roop'
2
- version = '1.1.0'
 
 
 
spaces/Arnx/MusicGenXvAKN/tests/modules/test_conv.py DELETED
@@ -1,203 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from itertools import product
8
- import math
9
- import random
10
-
11
- import pytest
12
- import torch
13
- from torch import nn
14
-
15
- from audiocraft.modules import (
16
- NormConv1d,
17
- NormConvTranspose1d,
18
- StreamableConv1d,
19
- StreamableConvTranspose1d,
20
- pad1d,
21
- unpad1d,
22
- )
23
-
24
-
25
- def test_get_extra_padding_for_conv1d():
26
- # TODO: Implement me!
27
- pass
28
-
29
-
30
- def test_pad1d_zeros():
31
- x = torch.randn(1, 1, 20)
32
-
33
- xp1 = pad1d(x, (0, 5), mode='constant', value=0.)
34
- assert xp1.shape[-1] == 25
35
- xp2 = pad1d(x, (5, 5), mode='constant', value=0.)
36
- assert xp2.shape[-1] == 30
37
- xp3 = pad1d(x, (0, 0), mode='constant', value=0.)
38
- assert xp3.shape[-1] == 20
39
- xp4 = pad1d(x, (10, 30), mode='constant', value=0.)
40
- assert xp4.shape[-1] == 60
41
-
42
- with pytest.raises(AssertionError):
43
- pad1d(x, (-1, 0), mode='constant', value=0.)
44
-
45
- with pytest.raises(AssertionError):
46
- pad1d(x, (0, -1), mode='constant', value=0.)
47
-
48
- with pytest.raises(AssertionError):
49
- pad1d(x, (-1, -1), mode='constant', value=0.)
50
-
51
-
52
- def test_pad1d_reflect():
53
- x = torch.randn(1, 1, 20)
54
-
55
- xp1 = pad1d(x, (0, 5), mode='reflect', value=0.)
56
- assert xp1.shape[-1] == 25
57
- xp2 = pad1d(x, (5, 5), mode='reflect', value=0.)
58
- assert xp2.shape[-1] == 30
59
- xp3 = pad1d(x, (0, 0), mode='reflect', value=0.)
60
- assert xp3.shape[-1] == 20
61
- xp4 = pad1d(x, (10, 30), mode='reflect', value=0.)
62
- assert xp4.shape[-1] == 60
63
-
64
- with pytest.raises(AssertionError):
65
- pad1d(x, (-1, 0), mode='reflect', value=0.)
66
-
67
- with pytest.raises(AssertionError):
68
- pad1d(x, (0, -1), mode='reflect', value=0.)
69
-
70
- with pytest.raises(AssertionError):
71
- pad1d(x, (-1, -1), mode='reflect', value=0.)
72
-
73
-
74
- def test_unpad1d():
75
- x = torch.randn(1, 1, 20)
76
-
77
- u1 = unpad1d(x, (5, 5))
78
- assert u1.shape[-1] == 10
79
- u2 = unpad1d(x, (0, 5))
80
- assert u2.shape[-1] == 15
81
- u3 = unpad1d(x, (5, 0))
82
- assert u3.shape[-1] == 15
83
- u4 = unpad1d(x, (0, 0))
84
- assert u4.shape[-1] == x.shape[-1]
85
-
86
- with pytest.raises(AssertionError):
87
- unpad1d(x, (-1, 0))
88
-
89
- with pytest.raises(AssertionError):
90
- unpad1d(x, (0, -1))
91
-
92
- with pytest.raises(AssertionError):
93
- unpad1d(x, (-1, -1))
94
-
95
-
96
- class TestNormConv1d:
97
-
98
- def test_norm_conv1d_modules(self):
99
- N, C, T = 2, 2, random.randrange(1, 100_000)
100
- t0 = torch.randn(N, C, T)
101
-
102
- C_out, kernel_size, stride = 1, 4, 1
103
- expected_out_length = int((T - kernel_size) / stride + 1)
104
- wn_conv = NormConv1d(C, 1, kernel_size=4, norm='weight_norm')
105
- gn_conv = NormConv1d(C, 1, kernel_size=4, norm='time_group_norm')
106
- nn_conv = NormConv1d(C, 1, kernel_size=4, norm='none')
107
-
108
- assert isinstance(wn_conv.norm, nn.Identity)
109
- assert isinstance(wn_conv.conv, nn.Conv1d)
110
-
111
- assert isinstance(gn_conv.norm, nn.GroupNorm)
112
- assert isinstance(gn_conv.conv, nn.Conv1d)
113
-
114
- assert isinstance(nn_conv.norm, nn.Identity)
115
- assert isinstance(nn_conv.conv, nn.Conv1d)
116
-
117
- for conv_layer in [wn_conv, gn_conv, nn_conv]:
118
- out = conv_layer(t0)
119
- assert isinstance(out, torch.Tensor)
120
- assert list(out.shape) == [N, C_out, expected_out_length]
121
-
122
-
123
- class TestNormConvTranspose1d:
124
-
125
- def test_normalizations(self):
126
- N, C, T = 2, 2, random.randrange(1, 100_000)
127
- t0 = torch.randn(N, C, T)
128
-
129
- C_out, kernel_size, stride = 1, 4, 1
130
- expected_out_length = (T - 1) * stride + (kernel_size - 1) + 1
131
-
132
- wn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='weight_norm')
133
- gn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='time_group_norm')
134
- nn_convtr = NormConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride, norm='none')
135
-
136
- assert isinstance(wn_convtr.norm, nn.Identity)
137
- assert isinstance(wn_convtr.convtr, nn.ConvTranspose1d)
138
-
139
- assert isinstance(gn_convtr.norm, nn.GroupNorm)
140
- assert isinstance(gn_convtr.convtr, nn.ConvTranspose1d)
141
-
142
- assert isinstance(nn_convtr.norm, nn.Identity)
143
- assert isinstance(nn_convtr.convtr, nn.ConvTranspose1d)
144
-
145
- for convtr_layer in [wn_convtr, gn_convtr, nn_convtr]:
146
- out = convtr_layer(t0)
147
- assert isinstance(out, torch.Tensor)
148
- assert list(out.shape) == [N, C_out, expected_out_length]
149
-
150
-
151
- class TestStreamableConv1d:
152
-
153
- def get_streamable_conv1d_output_length(self, length, kernel_size, stride, dilation):
154
- # StreamableConv1d internally pads to make sure that the last window is full
155
- padding_total = (kernel_size - 1) * dilation - (stride - 1)
156
- n_frames = (length - kernel_size + padding_total) / stride + 1
157
- ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
158
- return ideal_length // stride
159
-
160
- def test_streamable_conv1d(self):
161
- N, C, T = 2, 2, random.randrange(1, 100_000)
162
- t0 = torch.randn(N, C, T)
163
- C_out = 1
164
-
165
- # conv params are [(kernel_size, stride, dilation)]
166
- conv_params = [(4, 1, 1), (4, 2, 1), (3, 1, 3), (10, 5, 1), (3, 2, 3)]
167
- for causal, (kernel_size, stride, dilation) in product([False, True], conv_params):
168
- expected_out_length = self.get_streamable_conv1d_output_length(T, kernel_size, stride, dilation)
169
- sconv = StreamableConv1d(C, C_out, kernel_size=kernel_size, stride=stride, dilation=dilation, causal=causal)
170
- out = sconv(t0)
171
- assert isinstance(out, torch.Tensor)
172
- print(list(out.shape), [N, C_out, expected_out_length])
173
- assert list(out.shape) == [N, C_out, expected_out_length]
174
-
175
-
176
- class TestStreamableConvTranspose1d:
177
-
178
- def get_streamable_convtr1d_output_length(self, length, kernel_size, stride):
179
- padding_total = (kernel_size - stride)
180
- return (length - 1) * stride - padding_total + (kernel_size - 1) + 1
181
-
182
- def test_streamable_convtr1d(self):
183
- N, C, T = 2, 2, random.randrange(1, 100_000)
184
- t0 = torch.randn(N, C, T)
185
-
186
- C_out = 1
187
-
188
- with pytest.raises(AssertionError):
189
- StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=False, trim_right_ratio=0.5)
190
- StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=-1.)
191
- StreamableConvTranspose1d(C, C_out, kernel_size=4, causal=True, trim_right_ratio=2)
192
-
193
- # causal params are [(causal, trim_right)]
194
- causal_params = [(False, 1.0), (True, 1.0), (True, 0.5), (True, 0.0)]
195
- # conv params are [(kernel_size, stride)]
196
- conv_params = [(4, 1), (4, 2), (3, 1), (10, 5)]
197
- for ((causal, trim_right_ratio), (kernel_size, stride)) in product(causal_params, conv_params):
198
- expected_out_length = self.get_streamable_convtr1d_output_length(T, kernel_size, stride)
199
- sconvtr = StreamableConvTranspose1d(C, C_out, kernel_size=kernel_size, stride=stride,
200
- causal=causal, trim_right_ratio=trim_right_ratio)
201
- out = sconvtr(t0)
202
- assert isinstance(out, torch.Tensor)
203
- assert list(out.shape) == [N, C_out, expected_out_length]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/utils/zoom_out_utils.py DELETED
@@ -1,47 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- from PIL import Image
4
-
5
-
6
- def write_video(file_path, frames, fps):
7
- """
8
- Writes frames to an mp4 video file
9
- :param file_path: Path to output video, must end with .mp4
10
- :param frames: List of PIL.Image objects
11
- :param fps: Desired frame rate
12
- """
13
-
14
- w, h = frames[0].size
15
- fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v")
16
- writer = cv2.VideoWriter(file_path, fourcc, fps, (w, h))
17
-
18
- for frame in frames:
19
- np_frame = np.array(frame.convert("RGB"))
20
- cv_frame = cv2.cvtColor(np_frame, cv2.COLOR_RGB2BGR)
21
- writer.write(cv_frame)
22
-
23
- writer.release()
24
-
25
-
26
- def dummy(images, **kwargs):
27
- return images, False
28
-
29
-
30
- def preprocess_image(current_image, steps, image_size):
31
- next_image = np.array(current_image.convert("RGBA")) * 0
32
- prev_image = current_image.resize((image_size - 2 * steps, image_size - 2 * steps))
33
- prev_image = prev_image.convert("RGBA")
34
- prev_image = np.array(prev_image)
35
- next_image[:, :, 3] = 1
36
- next_image[steps : image_size - steps, steps : image_size - steps, :] = prev_image
37
- prev_image = Image.fromarray(next_image)
38
-
39
- return prev_image
40
-
41
-
42
- def preprocess_mask_image(current_image):
43
- mask_image = np.array(current_image)[:, :, 3] # assume image has alpha mask (use .mode to check for "RGBA")
44
- mask_image = Image.fromarray(255 - mask_image).convert("RGB")
45
- current_image = current_image.convert("RGB")
46
-
47
- return current_image, mask_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ashrafb/codellama-34b/app.py DELETED
@@ -1,260 +0,0 @@
1
- import os
2
- from typing import Iterator
3
-
4
- import gradio as gr
5
-
6
- from model import run
7
-
8
- HF_PUBLIC = os.environ.get("HF_PUBLIC", False)
9
-
10
- DEFAULT_SYSTEM_PROMPT = """\
11
- You are a helpful, respectful and honest assistant with a deep knowledge of code and software design. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
12
- """
13
- MAX_MAX_NEW_TOKENS = 10000
14
- DEFAULT_MAX_NEW_TOKENS = 1024
15
- MAX_INPUT_TOKEN_LENGTH = 10000
16
-
17
- DESCRIPTION = """
18
- # Code Llama 34B Chat
19
-
20
-
21
-
22
- """
23
-
24
- LICENSE = """
25
- <p/>
26
-
27
- ---
28
- As a derivate work of Code Llama by Meta,
29
- this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/codellama-2-34b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/codellama-2-34b-chat/blob/main/USE_POLICY.md).
30
- """
31
-
32
-
33
- def clear_and_save_textbox(message: str) -> tuple[str, str]:
34
- return '', message
35
-
36
-
37
- def display_input(message: str,
38
- history: list[tuple[str, str]]) -> list[tuple[str, str]]:
39
- history.append((message, ''))
40
- return history
41
-
42
-
43
- def delete_prev_fn(
44
- history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
45
- try:
46
- message, _ = history.pop()
47
- except IndexError:
48
- message = ''
49
- return history, message or ''
50
-
51
-
52
- def generate(
53
- message: str,
54
- history_with_input: list[tuple[str, str]],
55
- system_prompt: str,
56
- max_new_tokens: int,
57
- temperature: float,
58
- top_p: float,
59
- top_k: int,
60
- ) -> Iterator[list[tuple[str, str]]]:
61
- if max_new_tokens > MAX_MAX_NEW_TOKENS:
62
- raise ValueError
63
-
64
- history = history_with_input[:-1]
65
- generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k)
66
- try:
67
- first_response = next(generator)
68
- yield history + [(message, first_response)]
69
- except StopIteration:
70
- yield history + [(message, '')]
71
- for response in generator:
72
- yield history + [(message, response)]
73
-
74
-
75
- def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
76
- generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 1, 0.95, 50)
77
- for x in generator:
78
- pass
79
- return '', x
80
-
81
-
82
- def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
83
- input_token_length = len(message) + len(chat_history)
84
- if input_token_length > MAX_INPUT_TOKEN_LENGTH:
85
- raise gr.Error(f'The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.')
86
-
87
-
88
- with gr.Blocks(css=".gradio-container {background-color: #FFE4C4}") as demo:
89
-
90
- with gr.Group():
91
- chatbot = gr.Chatbot(label='Chatbot')
92
- with gr.Row():
93
- textbox = gr.Textbox(
94
- container=False,
95
- show_label=False,
96
- placeholder='Type a message...',
97
- scale=10,
98
- )
99
- submit_button = gr.Button('Submit',
100
- variant='primary',
101
- scale=1,
102
- min_width=0)
103
- with gr.Row():
104
- retry_button = gr.Button('🔄 Retry', variant='secondary')
105
- undo_button = gr.Button('↩️ Undo', variant='secondary')
106
- clear_button = gr.Button('🗑️ Clear', variant='secondary')
107
-
108
- saved_input = gr.State()
109
-
110
- with gr.Accordion(label='Advanced options', open=False):
111
- system_prompt = gr.Textbox(label='System prompt',
112
- value=DEFAULT_SYSTEM_PROMPT,
113
- lines=6)
114
- max_new_tokens = gr.Slider(
115
- label='Max new tokens',
116
- minimum=1,
117
- maximum=MAX_MAX_NEW_TOKENS,
118
- step=1,
119
- value=DEFAULT_MAX_NEW_TOKENS,
120
- )
121
- temperature = gr.Slider(
122
- label='Temperature',
123
- minimum=0.1,
124
- maximum=4.0,
125
- step=0.1,
126
- value=0.1,
127
- )
128
- top_p = gr.Slider(
129
- label='Top-p (nucleus sampling)',
130
- minimum=0.05,
131
- maximum=1.0,
132
- step=0.05,
133
- value=0.9,
134
- )
135
- top_k = gr.Slider(
136
- label='Top-k',
137
- minimum=1,
138
- maximum=1000,
139
- step=1,
140
- value=10,
141
- )
142
-
143
-
144
-
145
- gr.Markdown(LICENSE)
146
-
147
- textbox.submit(
148
- fn=clear_and_save_textbox,
149
- inputs=textbox,
150
- outputs=[textbox, saved_input],
151
- api_name=False,
152
- queue=False,
153
- ).then(
154
- fn=display_input,
155
- inputs=[saved_input, chatbot],
156
- outputs=chatbot,
157
- api_name=False,
158
- queue=False,
159
- ).then(
160
- fn=check_input_token_length,
161
- inputs=[saved_input, chatbot, system_prompt],
162
- api_name=False,
163
- queue=False,
164
- ).success(
165
- fn=generate,
166
- inputs=[
167
- saved_input,
168
- chatbot,
169
- system_prompt,
170
- max_new_tokens,
171
- temperature,
172
- top_p,
173
- top_k,
174
- ],
175
- outputs=chatbot,
176
- api_name=False,
177
- )
178
-
179
- button_event_preprocess = submit_button.click(
180
- fn=clear_and_save_textbox,
181
- inputs=textbox,
182
- outputs=[textbox, saved_input],
183
- api_name=False,
184
- queue=False,
185
- ).then(
186
- fn=display_input,
187
- inputs=[saved_input, chatbot],
188
- outputs=chatbot,
189
- api_name=False,
190
- queue=False,
191
- ).then(
192
- fn=check_input_token_length,
193
- inputs=[saved_input, chatbot, system_prompt],
194
- api_name=False,
195
- queue=False,
196
- ).success(
197
- fn=generate,
198
- inputs=[
199
- saved_input,
200
- chatbot,
201
- system_prompt,
202
- max_new_tokens,
203
- temperature,
204
- top_p,
205
- top_k,
206
- ],
207
- outputs=chatbot,
208
- api_name=False,
209
- )
210
-
211
- retry_button.click(
212
- fn=delete_prev_fn,
213
- inputs=chatbot,
214
- outputs=[chatbot, saved_input],
215
- api_name=False,
216
- queue=False,
217
- ).then(
218
- fn=display_input,
219
- inputs=[saved_input, chatbot],
220
- outputs=chatbot,
221
- api_name=False,
222
- queue=False,
223
- ).then(
224
- fn=generate,
225
- inputs=[
226
- saved_input,
227
- chatbot,
228
- system_prompt,
229
- max_new_tokens,
230
- temperature,
231
- top_p,
232
- top_k,
233
- ],
234
- outputs=chatbot,
235
- api_name=False,
236
- )
237
-
238
- undo_button.click(
239
- fn=delete_prev_fn,
240
- inputs=chatbot,
241
- outputs=[chatbot, saved_input],
242
- api_name=False,
243
- queue=False,
244
- ).then(
245
- fn=lambda x: x,
246
- inputs=[saved_input],
247
- outputs=textbox,
248
- api_name=False,
249
- queue=False,
250
- )
251
-
252
- clear_button.click(
253
- fn=lambda: ([], ''),
254
- outputs=[chatbot, saved_input],
255
- queue=False,
256
- api_name=False,
257
- )
258
-
259
- demo.queue(max_size=32).launch(share=HF_PUBLIC)
260
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/langrussianmodel.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/color_triplet.py DELETED
@@ -1,38 +0,0 @@
1
- from typing import NamedTuple, Tuple
2
-
3
-
4
- class ColorTriplet(NamedTuple):
5
- """The red, green, and blue components of a color."""
6
-
7
- red: int
8
- """Red component in 0 to 255 range."""
9
- green: int
10
- """Green component in 0 to 255 range."""
11
- blue: int
12
- """Blue component in 0 to 255 range."""
13
-
14
- @property
15
- def hex(self) -> str:
16
- """get the color triplet in CSS style."""
17
- red, green, blue = self
18
- return f"#{red:02x}{green:02x}{blue:02x}"
19
-
20
- @property
21
- def rgb(self) -> str:
22
- """The color in RGB format.
23
-
24
- Returns:
25
- str: An rgb color, e.g. ``"rgb(100,23,255)"``.
26
- """
27
- red, green, blue = self
28
- return f"rgb({red},{green},{blue})"
29
-
30
- @property
31
- def normalized(self) -> Tuple[float, float, float]:
32
- """Convert components into floats between 0 and 1.
33
-
34
- Returns:
35
- Tuple[float, float, float]: A tuple of three normalized colour components.
36
- """
37
- red, green, blue = self
38
- return red / 255.0, green / 255.0, blue / 255.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/tenacity/after.py DELETED
@@ -1,51 +0,0 @@
1
- # Copyright 2016 Julien Danjou
2
- # Copyright 2016 Joshua Harlow
3
- # Copyright 2013-2014 Ray Holder
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- import typing
18
-
19
- from pip._vendor.tenacity import _utils
20
-
21
- if typing.TYPE_CHECKING:
22
- import logging
23
-
24
- from pip._vendor.tenacity import RetryCallState
25
-
26
-
27
- def after_nothing(retry_state: "RetryCallState") -> None:
28
- """After call strategy that does nothing."""
29
-
30
-
31
- def after_log(
32
- logger: "logging.Logger",
33
- log_level: int,
34
- sec_format: str = "%0.3f",
35
- ) -> typing.Callable[["RetryCallState"], None]:
36
- """After call strategy that logs to some logger the finished attempt."""
37
-
38
- def log_it(retry_state: "RetryCallState") -> None:
39
- if retry_state.fn is None:
40
- # NOTE(sileht): can't really happen, but we must please mypy
41
- fn_name = "<unknown>"
42
- else:
43
- fn_name = _utils.get_callback_name(retry_state.fn)
44
- logger.log(
45
- log_level,
46
- f"Finished call to '{fn_name}' "
47
- f"after {sec_format % retry_state.seconds_since_start}(s), "
48
- f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
49
- )
50
-
51
- return log_it
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/models/CLAP/training/main.py DELETED
@@ -1,596 +0,0 @@
1
- from inspect import getargs
2
- import logging
3
- import os
4
- import random
5
- from datetime import datetime
6
- import bisect
7
- import copy
8
- import numpy as np
9
- import torch
10
- import torch.backends.cudnn as cudnn
11
- from torch import optim
12
- from torch.cuda.amp import GradScaler
13
- import faulthandler
14
- import pathlib
15
-
16
- try:
17
- import wandb
18
- except ImportError:
19
- wandb = None
20
-
21
- try:
22
- import torch.utils.tensorboard as tensorboard
23
- except ImportError:
24
- tensorboard = None
25
-
26
- try:
27
- import horovod.torch as hvd
28
- except ImportError:
29
- hvd = None
30
-
31
- from open_clip import create_model_and_transforms, trace_model, create_model
32
- from training.data import get_data
33
- from training.distributed import is_master, init_distributed_device, world_info_from_env
34
- from training.logger import setup_logging
35
- from training.params import parse_args
36
- from training.scheduler import cosine_lr
37
- from training.train import train_one_epoch, evaluate
38
- from open_clip.utils import dataset_split, get_optimizer
39
-
40
-
41
- def maintain_ckpts(args, startidx, all_idx_len):
42
- for i in reversed(range(startidx, all_idx_len)):
43
- if os.path.exists(os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt")):
44
- os.rename(
45
- os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"),
46
- os.path.join(args.checkpoint_path, f"epoch_top_{i+1}.pt"),
47
- )
48
- if os.path.exists(
49
- os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt")
50
- ):
51
- os.remove(os.path.join(args.checkpoint_path, f"epoch_top_{all_idx_len}.pt"))
52
- return
53
-
54
-
55
- def update_top_k_performance(
56
- new_metrics_inputs, current_top_k_ckpt_metrics, args, ckpt, bignumbetter=True
57
- ):
58
- """
59
- Record the top-k performance of the current epoch.
60
- current_top_k_metrics is a dictionary of the form: {1: top_1_ckpt_measure, 2: top_2_ckpt_measure, ...}
61
- """
62
- if isinstance(new_metrics_inputs, (list, tuple)):
63
- new_metrics_inputs = np.mean(new_metrics_inputs)
64
- return update_top_k_performance(
65
- new_metrics_inputs,
66
- current_top_k_ckpt_metrics,
67
- args=args,
68
- ckpt=ckpt,
69
- bignumbetter=bignumbetter,
70
- )
71
- elif isinstance(new_metrics_inputs, dict):
72
- new_metrics_inputs = np.mean(list(new_metrics_inputs.values()))
73
- return update_top_k_performance(
74
- new_metrics_inputs,
75
- current_top_k_ckpt_metrics,
76
- args=args,
77
- ckpt=ckpt,
78
- bignumbetter=bignumbetter,
79
- )
80
- elif isinstance(new_metrics_inputs, (float, int)):
81
- update_flag = {k: False for k in current_top_k_ckpt_metrics.keys()}
82
- sorted_keys = sorted(current_top_k_ckpt_metrics.keys())
83
- sorted_values = sorted(
84
- current_top_k_ckpt_metrics.values(), reverse=bignumbetter
85
- )
86
- sorted_values_ = copy.deepcopy(sorted_values)
87
- sorted_values.append(new_metrics_inputs)
88
- sorted_values = sorted(sorted_values, reverse=bignumbetter)
89
- sorted_values = sorted_values[:-1]
90
-
91
- if sorted_values == sorted_values_:
92
- return current_top_k_ckpt_metrics, new_metrics_inputs
93
- else:
94
- for i in range(len(sorted_keys)):
95
- if current_top_k_ckpt_metrics[sorted_keys[i]] != sorted_values[i]:
96
- current_top_k_ckpt_metrics[sorted_keys[i]] = sorted_values[i]
97
- update_flag[sorted_keys[i]] = True
98
- for i in range(len(update_flag)):
99
- if update_flag[i]:
100
- maintain_ckpts(args, i, len(sorted_keys))
101
- torch.save(
102
- ckpt,
103
- os.path.join(args.checkpoint_path, f"epoch_top_{i}.pt"),
104
- )
105
- break
106
- return current_top_k_ckpt_metrics, new_metrics_inputs
107
-
108
-
109
- # def updateifNone(a, b):
110
- # a = b if None else a
111
- # return a
112
-
113
-
114
- def is_pretrained_params(n):
115
- return (
116
- n.startswith("transformer")
117
- or n in ["positional_embedding", "text_projection"]
118
- or n.startswith("token_embedding")
119
- or n.startswith("ln_final")
120
- or n.startswith("logit_scale_t")
121
- )
122
-
123
-
124
- def random_seed(seed=42, rank=0):
125
- torch.manual_seed(seed + rank)
126
- np.random.seed(seed + rank)
127
- random.seed(seed + rank)
128
-
129
-
130
- def main():
131
- args = parse_args()
132
- # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
133
- args.amodel = args.amodel.replace("/", "-")
134
- # download sizes.json file
135
-
136
- # (yusong): the below two lines are for debug
137
- # print("setting up faulthandler")
138
- # faulthandler.register(10)
139
-
140
- random.seed(args.seed)
141
- torch.manual_seed(args.seed)
142
- torch.cuda.manual_seed(args.seed)
143
- torch.cuda.manual_seed_all(args.seed)
144
- np.random.seed(args.seed)
145
- if args.tmodel == "bert" or args.tmodel == "roberta" or args.tmodel == "bart":
146
- assert (
147
- args.pretrained == "" or args.pretrained is None
148
- ), "bert/roberta/bart text encoder does not support pretrained models."
149
-
150
- # get the name of the experiments
151
- if args.name is None:
152
- args.name = "-".join(
153
- [
154
- datetime.now().strftime("%Y_%m_%d-%H_%M_%S"),
155
- f"model_{args.amodel}",
156
- f"lr_{args.lr}",
157
- f"b_{args.batch_size}",
158
- f"j_{args.workers}",
159
- f"p_{args.precision}",
160
- ]
161
- )
162
-
163
- # discover initial world args early so we can log properly
164
- args.distributed = False
165
- args.local_rank, args.rank, args.world_size = world_info_from_env()
166
-
167
- if args.remotedata and is_master(args):
168
- for dataset_name in args.datasetnames:
169
- for split in dataset_split[dataset_name]:
170
- if not os.path.exists(f"./json_files/{dataset_name}/{split}"):
171
- os.makedirs(f"./json_files/{dataset_name}/{split}")
172
- os.system(
173
- f"aws s3 cp s3://s-laion-audio/webdataset_tar/{dataset_name}/{split}/sizes.json ./json_files/{dataset_name}/{split}/sizes.json"
174
- )
175
-
176
- args.log_path = None
177
- if is_master(args, local=args.log_local):
178
- log_base_path = os.path.join(args.logs, args.name)
179
- os.makedirs(log_base_path, exist_ok=True)
180
- log_filename = f"out-{args.rank}" if args.log_local else "out.log"
181
- args.log_path = os.path.join(log_base_path, log_filename)
182
- if os.path.exists(args.log_path):
183
- print(
184
- "Error. Experiment already exists. Use --name {} to specify a new experiment."
185
- )
186
- return -1
187
-
188
- # Set logger
189
- args.log_level = logging.DEBUG if args.debug else logging.INFO
190
- setup_logging(args.log_path, args.log_level)
191
-
192
- # fully initialize distributed device environment
193
- device = init_distributed_device(args)
194
-
195
- args.wandb = "wandb" in args.report_to or "all" in args.report_to
196
- args.tensorboard = "tensorboard" in args.report_to or "all" in args.report_to
197
- if is_master(args):
198
- args.tensorboard_path = (
199
- os.path.join(args.logs, args.name, "tensorboard")
200
- if args.tensorboard
201
- else ""
202
- )
203
- args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints")
204
- for dirname in [args.tensorboard_path, args.checkpoint_path]:
205
- if dirname:
206
- os.makedirs(dirname, exist_ok=True)
207
- else:
208
- args.tensorboard_path = ""
209
- args.checkpoint_path = ""
210
-
211
- if args.copy_codebase:
212
- copy_codebase(args)
213
-
214
- assert args.precision in ["amp", "fp16", "fp32"]
215
- if args.precision == "fp16":
216
- logging.warning(
217
- "It is recommended to use AMP mixed-precision instead of FP16. "
218
- "FP16 support needs further verification and tuning, especially for train."
219
- )
220
-
221
- if args.horovod:
222
- logging.info(
223
- f"Running in horovod mode with multiple processes / nodes. Device: {args.device}."
224
- f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
225
- )
226
- elif args.distributed:
227
- logging.info(
228
- f"Running in distributed mode with multiple processes. Device: {args.device}."
229
- f"Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}."
230
- )
231
- else:
232
- logging.info(f"Running with a single process. Device {args.device}.")
233
-
234
- logging.info(f"openai cache dir: {os.path.expanduser(args.openai_model_cache_dir)}")
235
-
236
- model, model_cfg = create_model(
237
- args.amodel,
238
- args.tmodel,
239
- args.pretrained,
240
- precision=args.precision,
241
- device=device,
242
- jit=args.torchscript,
243
- force_quick_gelu=args.force_quick_gelu,
244
- openai_model_cache_dir=os.path.expanduser(args.openai_model_cache_dir),
245
- skip_params=True,
246
- pretrained_audio=args.pretrained_audio,
247
- pretrained_text=args.pretrained_text,
248
- enable_fusion=args.enable_fusion,
249
- fusion_type=args.fusion_type,
250
- )
251
-
252
- if args.horovod:
253
- with torch.no_grad():
254
- for param in model.parameters():
255
- param.set_(param.contiguous())
256
-
257
- if args.trace:
258
- model = trace_model(model, batch_size=args.batch_size, device=device)
259
-
260
- if is_master(args):
261
- logging.info("Model:")
262
- logging.info(f"{str(model)}")
263
- logging.info("Params:")
264
- params_file = os.path.join(args.logs, args.name, "params.txt")
265
- with open(params_file, "w") as f:
266
- for name in sorted(vars(args)):
267
- val = getattr(args, name)
268
- logging.info(f" {name}: {val}")
269
- f.write(f"{name}: {val}\n")
270
-
271
- if args.distributed and not args.horovod:
272
- if args.use_bn_sync:
273
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
274
- ddp_args = {}
275
- if args.ddp_static_graph:
276
- # this doesn't exist in older PyTorch, arg only added if enabled
277
- ddp_args["static_graph"] = True
278
- model = torch.nn.parallel.DistributedDataParallel(
279
- model, device_ids=[device], find_unused_parameters=True, **ddp_args
280
- )
281
-
282
- data = get_data(args, model_cfg)
283
- assert len(data), "At least one train or eval dataset must be specified."
284
- if args.trace:
285
- assert "train" not in data, "Cannot train with traced model"
286
-
287
- exclude = (
288
- lambda n, p: p.ndim < 2
289
- or "bn" in n
290
- or "ln" in n
291
- or "bias" in n
292
- or "logit_scale" in n
293
- )
294
- include = lambda n, p: not exclude(n, p)
295
-
296
- named_parameters = list(model.named_parameters())
297
-
298
- # freeze text encoder
299
- text_freeze_parameters = [p for n, p in named_parameters if "text_branch" in n]
300
-
301
- if args.freeze_text:
302
- print("Freeze Text!!!!")
303
- for k in text_freeze_parameters:
304
- k.requires_grad = False
305
-
306
- gain_or_bias_params = [
307
- p for n, p in named_parameters if exclude(n, p) and p.requires_grad
308
- ]
309
- rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad]
310
-
311
- # set wd-related params to 0 if use adam optimizer
312
- if args.optimizer == "adam":
313
- args.wd = 0
314
- args.wd_pretrained = 0
315
- args.wd_new = 0
316
-
317
- if args.train_data is None:
318
- optimizer = None
319
- scheduler = None
320
- else:
321
- total_steps = data["train"].dataloader.num_batches * args.epochs
322
-
323
- if args.split_opt:
324
- for x in ["lr", "beta1", "beta2", "eps", "wd"]:
325
- for y in ["_new", "_pretrained"]:
326
- if getattr(args, x + y) is None:
327
- setattr(args, x + y, getattr(args, x))
328
-
329
- gain_or_bias_pretrained_params = [
330
- p
331
- for n, p in named_parameters
332
- if (exclude(n, p) and p.requires_grad) and is_pretrained_params(n)
333
- ]
334
- rest_pretrained_params = [
335
- p
336
- for n, p in named_parameters
337
- if (include(n, p) and p.requires_grad) and is_pretrained_params(n)
338
- ]
339
- gain_or_bias_new_params = [
340
- p
341
- for n, p in named_parameters
342
- if (exclude(n, p) and p.requires_grad) and (not is_pretrained_params(n))
343
- ]
344
- rest_new_params = [
345
- p
346
- for n, p in named_parameters
347
- if (include(n, p) and p.requires_grad) and (not is_pretrained_params(n))
348
- ]
349
- pretrained_params_optimizer = get_optimizer(
350
- [
351
- {"params": gain_or_bias_pretrained_params, "weight_decay": 0.0},
352
- {
353
- "params": rest_pretrained_params,
354
- "weight_decay": args.wd_pretrained,
355
- },
356
- ],
357
- lr=args.lr_pretrained,
358
- betas=(args.beta1_pretrained, args.beta2_pretrained),
359
- eps=args.eps_pretrained,
360
- momentum=args.momentum_pretrained,
361
- optimizer_name=args.optimizer,
362
- )
363
- pretrained_params_scheduler = cosine_lr(
364
- pretrained_params_optimizer,
365
- args.lr_pretrained,
366
- args.warmup,
367
- total_steps,
368
- )
369
- new_params_optimizer = get_optimizer(
370
- [
371
- {"params": gain_or_bias_new_params, "weight_decay": 0.0},
372
- {"params": rest_new_params, "weight_decay": args.wd_new},
373
- ],
374
- lr=args.lr_new,
375
- betas=(args.beta1_new, args.beta2_new),
376
- eps=args.eps_new,
377
- momentum=args.momentum_new,
378
- optimizer_name=args.optimizer,
379
- )
380
-
381
- new_params_scheduler = cosine_lr(
382
- new_params_optimizer, args.lr_new, args.warmup, total_steps
383
- )
384
-
385
- optimizer = {
386
- "pretrained": pretrained_params_optimizer,
387
- "new": new_params_optimizer,
388
- }
389
- scheduler = {
390
- "pretrained": pretrained_params_scheduler,
391
- "new": new_params_scheduler,
392
- }
393
-
394
- if args.horovod:
395
- pretrained_params_optimizer = hvd.DistributedOptimizer(
396
- pretrained_params_optimizer,
397
- named_parameters=model.named_parameters(),
398
- )
399
- new_params_optimizer = hvd.DistributedOptimizer(
400
- new_params_optimizer, named_parameters=model.named_parameters()
401
- )
402
- hvd.broadcast_parameters(model.state_dict(), root_rank=0)
403
- hvd.broadcast_optimizer_state(pretrained_params_optimizer, root_rank=0)
404
- hvd.broadcast_optimizer_state(new_params_optimizer, root_rank=0)
405
- else:
406
- optimizer = get_optimizer(
407
- [
408
- {"params": gain_or_bias_params, "weight_decay": 0.0},
409
- {"params": rest_params, "weight_decay": args.wd},
410
- ],
411
- lr=args.lr,
412
- betas=(args.beta1, args.beta2),
413
- eps=args.eps,
414
- momentum=args.momentum,
415
- optimizer_name=args.optimizer,
416
- )
417
-
418
- scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps)
419
-
420
- if args.horovod:
421
- optimizer = hvd.DistributedOptimizer(
422
- optimizer, named_parameters=model.named_parameters()
423
- )
424
- hvd.broadcast_parameters(model.state_dict(), root_rank=0)
425
- hvd.broadcast_optimizer_state(optimizer, root_rank=0)
426
-
427
- scaler = GradScaler() if args.precision == "amp" else None
428
-
429
- # optionally resume from a checkpoint
430
- start_epoch = 0
431
- if args.resume is not None:
432
- if os.path.isfile(args.resume):
433
- checkpoint = torch.load(args.resume, map_location=device)
434
- if "epoch" in checkpoint:
435
- # resuming a train checkpoint w/ epoch and optimizer state
436
- start_epoch = checkpoint["epoch"]
437
- sd = checkpoint["state_dict"]
438
- if not args.distributed and next(iter(sd.items()))[0].startswith(
439
- "module"
440
- ):
441
- sd = {k[len("module.") :]: v for k, v in sd.items()}
442
- model.load_state_dict(sd)
443
- if args.split_opt:
444
- if optimizer is not None:
445
- for k, o_ in optimizer.items():
446
- o_.load_state_dict(checkpoint[k + "_" + "optimizer"])
447
- if optimizer is not None:
448
- optimizer.load_state_dict(checkpoint["optimizer"])
449
- if scaler is not None and "scaler" in checkpoint:
450
- scaler.load_state_dict(checkpoint["scaler"])
451
- logging.info(
452
- f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})"
453
- )
454
- else:
455
- # loading a bare (model only) checkpoint for fine-tune or evaluation
456
- model.load_state_dict(checkpoint)
457
- logging.info(
458
- f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})"
459
- )
460
- if args.freeze_text:
461
- print("Freeze Text!!!!")
462
- for k in text_freeze_parameters:
463
- k.requires_grad = False
464
- else:
465
- logging.info("=> no checkpoint found at '{}'".format(args.resume))
466
-
467
- cudnn.benchmark = True
468
- cudnn.deterministic = False
469
-
470
- # determine if this worker should save logs and checkpoints. only do so if it is rank == 0
471
- args.save_logs = args.logs and args.logs.lower() != "none" and is_master(args)
472
- writer = None
473
- if args.save_logs and args.tensorboard:
474
- assert tensorboard is not None, "Please install tensorboard."
475
- writer = tensorboard.SummaryWriter(args.tensorboard_path)
476
-
477
- if args.wandb and is_master(args):
478
- assert wandb is not None, "Please install wandb."
479
- logging.debug("Starting wandb.")
480
- args.train_sz = data["train"].dataloader.num_samples
481
- if args.val_data is not None:
482
- args.val_sz = data["val"].dataloader.num_samples
483
- # you will have to configure this for your project!
484
- wandb.init(
485
- project="clap",
486
- notes=args.wandb_notes,
487
- name=args.wandb_notes,
488
- tags=[],
489
- config=vars(args),
490
- )
491
- if args.debug:
492
- wandb.watch(model, log="all")
493
- wandb.save(params_file)
494
- logging.debug("Finished loading wandb.")
495
-
496
- if "train" not in data:
497
- evaluate(model, data, start_epoch, args, writer)
498
- return
499
- elif start_epoch == 0 and "val" in data and not args.no_eval:
500
- evaluate(model, data, 0, args, writer)
501
- # print(f'rank {args.rank}, Start First Evaluation')# (yusong): for debug
502
- if args.save_top_performance:
503
- current_top_k_ckpt_metrics = {
504
- i: 0 for i in range(args.save_top_performance)
505
- } # initialize the top-k metric for ckpts to 0
506
-
507
- # print(f'rank {args.rank}, Start Training') # (yusong): for debug
508
- for epoch in range(start_epoch, args.epochs):
509
- # freeze the text param after (include) args.freeze_text_after, this is -1 by default
510
- if epoch == args.freeze_text_after:
511
- print("Text pretrained parameters are freezed since this epoch.")
512
- for k in text_freeze_parameters:
513
- k.requires_grad = False
514
- if is_master(args):
515
- logging.info(f"Start epoch {epoch}")
516
-
517
- train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer)
518
- completed_epoch = epoch + 1
519
-
520
- if (
521
- any(v in data for v in ("val", "imagenet-val", "imagenet-v2"))
522
- and not args.no_eval
523
- ):
524
- metrics = evaluate(model, data, completed_epoch, args, writer)
525
- if args.save_top_performance:
526
- top_k_dataset = args.top_k_checkpoint_select_dataset
527
- top_k_metric = args.top_k_checkpoint_select_metric
528
- filtered_metrics = [
529
- v
530
- for k, v in metrics.items()
531
- if top_k_metric in k and top_k_dataset in k
532
- ] # check all R@10 metrics (all dataset) and use it to update the ckpt
533
- # Saving checkpoints.
534
- if args.save_logs:
535
- if args.split_opt:
536
- opt_dict = {
537
- k + "_" + "optimizer": v.state_dict() for k, v in optimizer.items()
538
- }
539
- else:
540
- opt_dict = {"optimizer": optimizer.state_dict()}
541
- checkpoint_dict = {
542
- "epoch": completed_epoch,
543
- "name": args.name,
544
- "state_dict": model.state_dict(),
545
- }
546
- checkpoint_dict.update(opt_dict)
547
- if scaler is not None:
548
- checkpoint_dict["scaler"] = scaler.state_dict()
549
-
550
- if completed_epoch == args.epochs or (
551
- args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0
552
- ):
553
- torch.save(
554
- checkpoint_dict,
555
- os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"),
556
- )
557
- if args.save_most_recent:
558
- torch.save(
559
- checkpoint_dict,
560
- os.path.join(args.checkpoint_path, f"epoch_latest.pt"),
561
- )
562
- if args.save_top_performance and not args.no_eval:
563
- update_top_k_performance(
564
- filtered_metrics,
565
- current_top_k_ckpt_metrics,
566
- args,
567
- checkpoint_dict,
568
- bignumbetter=True,
569
- )
570
-
571
- if args.wandb and is_master(args):
572
- wandb.finish()
573
-
574
-
575
- def copy_codebase(args):
576
- from shutil import copytree, ignore_patterns
577
-
578
- new_code_path = os.path.join(args.logs, args.name, "code")
579
- if os.path.exists(new_code_path):
580
- print(
581
- f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
582
- )
583
- return -1
584
- print(f"Copying codebase to {new_code_path}")
585
- current_code_path = os.path.realpath(__file__)
586
- for _ in range(3):
587
- current_code_path = os.path.dirname(current_code_path)
588
- copytree(
589
- current_code_path, new_code_path, ignore=ignore_patterns("log", "logs", "wandb")
590
- )
591
- print("Done copying code.")
592
- return 1
593
-
594
-
595
- if __name__ == "__main__":
596
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/structures/test_keypoints.py DELETED
@@ -1,19 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import unittest
3
- import torch
4
-
5
- from detectron2.structures.keypoints import Keypoints
6
-
7
-
8
- class TestKeypoints(unittest.TestCase):
9
- def test_cat_keypoints(self):
10
- keypoints1 = Keypoints(torch.rand(2, 21, 3))
11
- keypoints2 = Keypoints(torch.rand(4, 21, 3))
12
-
13
- cat_keypoints = keypoints1.cat([keypoints1, keypoints2])
14
- self.assertTrue(torch.all(cat_keypoints.tensor[:2] == keypoints1.tensor).item())
15
- self.assertTrue(torch.all(cat_keypoints.tensor[2:] == keypoints2.tensor).item())
16
-
17
-
18
- if __name__ == "__main__":
19
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AyushP/PolicyChatBot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: PolicyChatBot
3
- emoji: 🏃
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/engine/censorship.ts DELETED
@@ -1,184 +0,0 @@
1
-
2
- // I don't want to be banned by Replicate because bad actors are asking
3
- // for some naked anime stuff or whatever
4
- // I also want to avoid a PR scandal due to some bad user generated content
5
-
6
- import { computeSecretFingerprint } from "@/lib/computeSecretFingerprint"
7
-
8
- // those keywords have been generated by looking at the logs of the panorama and the AI Comic Factory
9
- // those are real requests some users tried to attempt.. :|
10
-
11
- const chickens = [
12
- "fcb4dacbd99b21368c50f29c1d47071c87cf2225ab9192282c785460391cd365",
13
- "68840b60ac27eacaa7afe17e898d3c4a2dc71acff8c74d6782c1bcaafd14963d",
14
- "67f745224fd6e1a7a3a244514d5807fcc994cbb62ca4ec8fa44cd14244a515ae",
15
- "681fea565117808c6dbe002520d2cfeeb3e5c67e68630afb4a453449a9da587b",
16
- "2f3d913b3db9e15a930aac43eb2d6fe8817db8e4bcf37794bf0227b06b718d1b",
17
- "922a700b807e4994df82eba2b48a6ac131fe8d8d1035d06b3592d622fb232161",
18
- "cb69ee6774eafcc720adb1f689d28acbb9f47998cbea0299ec66a58dedf91c37"
19
- ]
20
-
21
- const ducks = [
22
- "1c52cb20c0cbc76349fa63232b982bd394cf0850ebc17240dcf33c19fb15a26d",
23
- "e1d4de9b8d464d7da07c276b63a42c1c9922224f0a6cab6b0826427ce4a7461a",
24
- "0be3174bfb1a48a65875c2f035b1ae14fbc8f232f55785018de0cfe2132fa952",
25
- "0f174769641b2e5d2c79b5a83e8ef91e004f6f3e62531cd70cfdff02159268cb",
26
- "e9fb8ae8ff720acd91025229478a21e43e8e976e30119a76c293201adf572736",
27
- "f65a0dc0e07b5d084ff24c69dcdb953f7b57101d2ebb716d4dfb5963076ef807",
28
- "2bf38af1646489c2c086f811d082054cd29e23fa7bb5c525396bec01b3ab688e"
29
- ]
30
-
31
- const cats = [
32
- "fcffc3e997d952007d1b902a9cf40b750ba4a410ac65bfd95475996bf51359e4",
33
- "3172a5fa159754d703489dfba5af520b8ace107cdf170f4c4cb38a6797aa163f",
34
- "500012dbff4498a9c4513369d6b9b373fab9330ffd2cb1e622294043cc21b610",
35
- "84e3a8d34ee7d0c8e7a2926dd1acad46a0b66b9d27725b3a7e5053550f490301"
36
- ]
37
-
38
- const roasted = [
39
- "a2bfbce0046c9a52a0eabf98f73e0f8e09959970431fc892ebdb4e1c97031b50",
40
- "6eca1adf06851f99e9cdfbb496c27d46ff81106903d11f3346a146e96082b016",
41
- "49a124c9ed6fbbad4105b3657dc25de369bcafb9d6787f610c08f584cd607d0f",
42
- "c3afb59420c812cbc7c8f57ad3e8d79407f10106a99f829aa65316c99d0b29c4",
43
- "2b808858836a5c205080f5b93201ef92e098cff931d8de6d9f20dc722997d077",
44
- "07bef89d1a7d63c9c5ed64ba0f73d6cff689811847c2e20c8b3fbfb060e1d64e",
45
- "baeb994922d5473f534aa54322d83effe74c6c4dac807e6b523a677d7acdc17b",
46
- "ea4735a879edd5cc94ca7db26edd5a970df69a41f0009d3444486647e44175af",
47
- "f2412249030454cd13ac6f7965871d924c16daacda0123de81892adb19ce49ac",
48
- "9958c56e12bab8549cf752bcd8bec4ac36cf79c404b1faf5611f057bb71bc0e1",
49
- "76cdade0b3d4caf0888f60318a5cbca00f830a3b0bf37735fc64fdaeb67c34d3",
50
- "1bf53c97869e1ea89bda19da64a9173d48fe4ec823e949e2c898f8abb3fbf457",
51
- "1bf53c97869e1ea89bda19da64a9173d48fe4ec823e949e2c898f8abb3fbf457",
52
- "3d7f973fab8f4a19c0a3e59efe970ed7bd55a1cb795752d9cbe3c19e8a7d81ec"
53
- ]
54
-
55
- const banned = [
56
- "8a05d4869d9d6ce388c6cd2db13ca12b88097b90f9be027d5ffaaa467c7a6e5e",
57
- "0c475212a608138244c5fc150b1563e5ef79c516234fd78dcd5993f726c359a0",
58
- "df17388805f99f2ff3e5ae97a0f55e5c927eb47f17ca65822bf8c88f02bac3dd",
59
- "86c3355d1bd581cdf7306729d8dd0ee9b7a317b9cfd6d7a6f5fad9c0dafe2167",
60
- "23a2484cd420c9ffbfcc2c0075a9b330664450ced1fc64ab6a65e278086b8c6e",
61
- "fb4cabe709b62eea1b4cc0030c76f5e4a43ee677ce19124e8e7bafa86c78ab66",
62
- "d99c26daee85f7dc81c46c061a5874cff7179ed72d884d2316d664d36ffe7ab5",
63
- "b93c38af5aa221d76c60ee3eb762efee0cdb0daf29ceb235b7dda6d46c06490d",
64
- "8cf6c8765dc757319461dd9a785e77c201b8e5a604d36b817cd987c6a5e62500",
65
- "f4a1cb290745717f86c3cee30fc324c0d80a9945fcbc7bbeb010579f58792f1e",
66
- "7c87c47c42fc983119551342be9ddd5b32e530c0504ccdbbaa1e12b1d9f1bbcb",
67
- "d04fad4f21d030da7a1301afbf480ef6246eb7bbf0f26e31865b2e015a25f747",
68
- "d685ff22fb9da01ee949db212770729603989850864ef7a7085e1f086cfa7deb",
69
- "533b90588d9ccf7967da54691f575e9fd4926c6e0b5fd94a47b932bcea270bee",
70
- "9c2d61f28f5bb7f3f1dc9122be64cda8a428b46ce68b70120da4c41dba96ba4c",
71
- "5d4b1a3eebe64dfa631d0e3b084bd96ee9364c3669269f838ca17a4900276264",
72
- "d56f56413b9679fc0820a2c0237224ded8554c61fab8959c174123c8b68ba029",
73
- "323a9ab60739726070d615ff3a05d7ff6bb6e3c4dd9ff16ce24f253ecd7b8851",
74
- "975c6739de7d4999db15972f707f5f4e95649275f1c0c48e895b8c537e8638ec",
75
- "67ee26eb9e1c1c7124797321b02bca90a19c18171782917cd4a487b722484dce",
76
- "6df5aa7b72a4e6e3fb726489ff1437daa5752047507f4da912680b1d6647c7d6",
77
- "b0864805364359e8c5810c233b1bf2c74dedce9055ae5f7680ba05b4e39db8e2",
78
- "a8f841472ecffdd6266151148320c8e36847a24ead9d3338e0313b075c16649d",
79
- "f9b127cd90e85b0ff68dd220361671663f0154b2b827f1f7ea797b020ca0018c",
80
- "d5c20e9a1ecf01c82da24c514d867498b3e5f522adc1523ce29404a6563641d5",
81
- "241022b49d7c0aba24a61eea1137a804f36e4bcb47af42950275baac9b4e7aac",
82
- "fc99a70e17b6c86ef1b537654b0f50353567a7b59912c3ba955f3fca4d1ea696",
83
- "255306e968009003d295cb2a7256f27bfcdb5d1743bf4d9f2aa4b8adf1a7734d",
84
- "048c7b709763dd9c43794d241c369f0abcb079d546ddcbbba9968a1ed1da7ed7",
85
- "520cbfeef3e4c405d79478eedccb97a4d476be585626dd2b1c53292797491bc7",
86
- "f9f28a7ae7e8b1719b350a04dc087a4b8e33478d109ceeef6ba892b32d1105c9",
87
- "d177f1bfe603647ef4c1c0e6f1a7172081fb9bbc2ea859705949f2c5aa5d4f22",
88
- "302feef2c09247fbd23789581f7f5e2219f88ae0a937880954938573c2a52a84",
89
- "99edd6f57b864873835f16f19c805dd94bed9da8967b84e3a62782f106d9ebcc",
90
- "e75e5f01dcd8351c9553e89558085bd68e6feb295dee5d8da0c9b43ee303ce36",
91
- "135e52a026aea9d2e12de358a85e05cf21121a18269269b7c62678c3bc846f5b",
92
- "28e5b2d3eb5f1ef4cc7b570878b03acf303a6ca4ca95893591e0fb943b0beab0",
93
- "a26b26340f8d0363633490556d20bcc250726d10e1431eb8c22d6b1ff3f2b14a",
94
- "27e4ddde96ec6a1dbe1cf12d79448b3e72f144944c15b299629542d1b65fbabf",
95
- "efd9c0a391ee93251046a58326d1b21b33fe21d71a3fb1855b9048ade53df77c",
96
- "6d505fcce416c26a606878aab4d249a034ba2a9846cb1f883e0f9e3fb76ba6da",
97
- "3a37b8a1b72f9bca51233536d50f9c8d33a787434684787871e0049c82347cda",
98
- "16f9b451184a7c3148344c7d0315f5312ca20553d2271912ecaad91810d977e6",
99
- "7406537eb74d1885bd05e191228de313b13702a64d90ae1736c6377b25ab579a",
100
- "7e4d1395ae18980015cab16c85ffa20b4cb90a2db594126e893d0f7ac6eecaa8",
101
- "ba813ee6c25698f0f68a07121d38bb47c9aa404c1ab0a6e767595cb75e1747b8",
102
- "6586c93f3ece83e01ecc1eb84a7711e7975826a388d478a009468ea0ed9dc03e",
103
- "8960174c74d86e03ae88fb6774580170e49952f2286d960be08c556bbd0dda95",
104
- "4d611454369aa1a4e2b7eed1734fac5d480f08fb86b87a162967e416370f2a8e",
105
- "59d48440f85eabf565fe8d3bc6b973ba64c70df3b36b0511e0e67ceca91762b3",
106
- "cd926926e2af74e43d1a6a420a7e1933b78662320477a3c018b2711d8765e339",
107
- "80e90057df6a59823f51aafac36ed5bc4e5ac26d675d9c1467501590c82f12d4",
108
- "a9cf28b869b70e258adde5639a048f866ec86f8f3f3d53bfc960b86aa6da9239",
109
- "cc2adbf8ac0cddeefa304d7b20f14a7e047a4b2299cc5e8f898f5c59660bd964",
110
- "92a150a46146e9d3f84899cf15e12514af684e7ee18d7add782ddd4f4a15ef18",
111
- "d9b2e84ef6dc0ce449357d52c9095f69b173a1b848ea2921199d33b0ec10024a",
112
- "a9329a7e4d367a0135c1ca86c6ce5ecabcc26529235229d71b6bf991f7689e21",
113
- "8f160c6fd8ccc3fb2a371a4b52748f0bd030766627c4322e2911fe82f6b10497",
114
- "620e96eae4f3e88cbe0770292b33724c5df3866d83f39df6380441f7271c80e2",
115
- "cafa3481fa3c45ed1e55cd0129c12b477eeab5aa3d6da20cae6d6292f19b0e6d",
116
- "be07994e9a83aa3689e79b6e96123676ccc4fa29f523c28c750c6d60505531ee",
117
- "f6498069768cd3aa79b2b0c91879694f05a259c8ee4a6bb343f0435f74eb1b53",
118
- "c9b6b26cb3a694eb78fcac0a14ad18d46d50907186a9add41022d31d191b2b65"
119
- ]
120
-
121
- const young = [
122
- "ffdf66787b4a33b78b18c18822e334cfe2c8406caf442851deef451bd43140a1",
123
- "858f22219afc4b32a7ba9a27a213d7f495e77c3cceed8147eae5282bf3e23d39",
124
- "8c3c46df84ace3d58d4ce0fbc513017986b33c6002ae369d9f7dd1f892a898cb",
125
- "66caa22b9483fdf026ce67de61067d81535a7c9b3169cbc5c2a455ac8dcc7bec",
126
- "76893047b1eff9fadc7be07b13adb5aaed9c73bcdeea46ee07098605e2c7ff76",
127
- "526cb848754e2baaa17376a5693d90ba3f69f71fd2a866f22876ac8a075849a7",
128
- "f59c38e31d0f64dc1bfcdf34451723bc1a65570e209e5496c8d1d7f6d3d649db",
129
- "e013a67e275c62c1402ccbbb11ad14afb8b8a82318a44c07d67599ed5ac874de",
130
- "3bef34219fb07f867ecbff4d6748f598d6cc0761e17dd0d431ee1f4ec3281374",
131
- "8211bf5f613fac06cd5d074d34c16dfacc9367c8afaa6ad3aff99d145e5221be"
132
- ]
133
-
134
- const getFingerprint = (word: string) => {
135
- return computeSecretFingerprint(
136
- word.toLocaleLowerCase().replaceAll(/[^a-zA-Z0-9]/gi, "")
137
- )
138
- }
139
-
140
- const encode = (list: string[]) => {
141
- console.log(JSON.stringify(
142
- list.sort((a, b) => (b.length - a.length))
143
- .map(item => getFingerprint(item)), null, 2))
144
- }
145
-
146
- // encode([ "badword" ])
147
-
148
- export const filterOutBadWords = (sentence: string) => {
149
- if (process.env.ENABLE_CENSORSHIP !== "true") { return sentence }
150
-
151
- let requireCensorship = false
152
-
153
- const words = sentence.replaceAll(/[^a-zA-Z0-9]/gi, " ").replaceAll(/\s+/gi, " ").trim().split(" ")
154
-
155
- const sanitized = words.map(word => {
156
- const fingerprint = getFingerprint(word)
157
-
158
- let result: string = word
159
- // some users want to play it smart and bypass our system so let's play too
160
- if (chickens.includes(fingerprint)) {
161
- result = "large chicken"
162
- } else if (ducks.includes(fingerprint)) {
163
- result = "big duck"
164
- } else if (cats.includes(fingerprint)) {
165
- result = "cat"
166
- } else if (roasted.includes(fingerprint)) {
167
- result = "roasted chicken"
168
- } else if (young.includes(fingerprint)) {
169
- result = "adult"
170
- } else if (banned.includes(fingerprint)) {
171
- result = "_BANNED_"
172
- }
173
-
174
- if (result !== word) {
175
- requireCensorship = true
176
- }
177
- return result
178
- }).filter(item => item !== "_BANNED_").join(" ")
179
-
180
- // if the user didn't try to use a bad word, we leave it untouched
181
- // he words array has been degraded by the replace operation, but it removes commas etc which isn't great
182
- // so if the request was genuine and SFW, it's best to return the original prompt
183
- return requireCensorship ? sanitized : sentence
184
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/demucs/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- __version__ = "2.0.3"
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/9anime Mod Apk Download.md DELETED
@@ -1,74 +0,0 @@
1
-
2
- <h1>9anime Mod APK Descargar: Ver anime en línea gratis</h1>
3
- <p>Anime es una forma popular de animación que se originó en Japón y tiene una base de fans en todo el mundo. A los fanáticos del anime les encanta ver sus programas y películas favoritas en línea, pero encontrar una plataforma de transmisión confiable y legal puede ser un reto. Es por eso que muchos amantes del anime recurren a 9anime, un sitio web de transmisión de video gratuito que ofrece una gran colección de contenido de anime en varios géneros, idiomas y resoluciones. Pero lo que si quieres disfrutar 9anime sin anuncios, pop-ups, o restricciones? Ahí es donde 9anime mod apk viene muy bien. En este artículo, le diremos todo lo que necesita saber sobre 9anime mod apk, cómo descargarlo e instalarlo, y cuáles son las mejores alternativas a 9anime. </p>
4
- <h2>9anime mod apk download</h2><br /><p><b><b>Download</b> &#9733; <a href="https://bltlly.com/2v6Mzb">https://bltlly.com/2v6Mzb</a></b></p><br /><br />
5
- <h2>¿Qué es 9anime? </h2>
6
- <p>9anime es un sitio web de transmisión de video gratuito que le permite ver anime en línea sin pagar ni registrarse. Puedes encontrar miles de títulos de anime en 9anime, desde clásicos hasta populares y actuales. También puedes elegir entre diferentes géneros, como acción, comedia, romance, terror, fantasía y más. Ya sea que esté buscando animación japonesa, china o coreana, puede encontrarla en 9anime. </p>
7
- <h3>Características de 9anime</h3>
8
- <p>Algunas de las características que hacen 9anime una gran plataforma de streaming de anime son:</p>
9
- <ul>
10
- <li> Tiene una interfaz fácil de usar que le permite buscar y navegar fácilmente por su anime favorito. </li>
11
- <li> Ofrece múltiples opciones de calidad de vídeo, de 360p a 1080p, dependiendo de la velocidad de Internet y el dispositivo. </li>
12
- <li> Proporciona versiones de anime tanto subbed como dubbed, para que pueda verlos en su idioma preferido. </li>
13
- <li>Le permite descargar vídeos de anime a su dispositivo para ver sin conexión. </li>
14
- <li> Tiene una función de programación que le muestra los próximos episodios y fechas de lanzamiento de su serie de anime favorita. </li>
15
- </ul>
16
- <h3>Pros y contras de 9anime</h3>
17
- <p>Como cualquier otra herramienta en línea, 9anime tiene sus ventajas y desventajas. Aquí están algunos de ellos:</p>
18
-
19
- <tr><th>Pros</th><th>Contras</th></tr>
20
- <tr><td>Tiene una gran y diversa biblioteca de contenido de anime. </td><td>Opera en un área gris legal y puede ser bloqueado por algunos ISP o regiones. </td></tr>
21
- <tr><td>Es de uso gratuito y no requiere registro o suscripción. </td><td>Muestra anuncios y ventanas emergentes que pueden ser molestos o perjudiciales. </td></tr>
22
- <tr><td>Actualiza su contenido regularmente con los últimos episodios y películas. </td><td>Puede tener algunos enlaces rotos o vídeos no disponibles debido a problemas de copyright. </td></tr>
23
- <tr><td>Soporta múltiples dispositivos y plataformas. </td><td>Puede tener algunos errores o fallos que afectan su rendimiento. </td></tr>
24
- </tabla>
25
- <h2>¿Qué es 9anime mod apk? </h2>
26
- <p>9anime mod apk es una versión modificada de la aplicación oficial 9anime que le da acceso a todas las características y beneficios de 9anime sin limitaciones o inconvenientes. Con 9anime mod apk, puede ver anime en línea de forma gratuita sin anuncios, pop-ups, o interrupciones. También puede disfrutar de velocidades de carga más rápidas, mejor calidad de vídeo y más opciones para descargar y transmitir contenido de anime. En resumen, 9anime mod apk is the ultimate anime app for anime fans. </p>
27
- <h3>Cómo descargar e instalar 9anime mod apk</h3>
28
- <p>Si desea descargar e instalar 9anime mod apk en su dispositivo Android, es necesario seguir estos pasos:</p>
29
- <ol>
30
- <li>Ir a [este enlace]( 1 ) y descargar la última versión del archivo apk mod 9anime a su dispositivo. </li>
31
- <li>Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
32
- <li>Busque el archivo descargado 9anime mod apk y toque en él para iniciar el proceso de instalación. </li>
33
- <li>Siga las instrucciones en la pantalla y espere a que se complete la instalación. </li>
34
- <li>Lanzar el 9anime mod apk app y disfrutar viendo anime online gratis. </li>
35
- </ol>
36
- <h3>Beneficios de usar 9anime mod apk</h3>
37
- <p>Algunos de los beneficios de usar 9anime mod apk son:</p>
38
- <ul>
39
-
40
- <li> Puede descargar vídeos de anime a su dispositivo para ver sin conexión. </li>
41
- <li> Puede elegir entre diferentes opciones de calidad de vídeo, de 360p a 1080p. </li>
42
- <li>Puedes ver las versiones subbed y dubbed de anime en tu idioma preferido. </li>
43
- <li> Puede acceder a una enorme y diversa biblioteca de contenido de anime en varios géneros y categorías. </li>
44
- </ul>
45
- <h2>Las mejores alternativas a 9anime</h2>
46
- <p>Si estás buscando otras plataformas de streaming de anime que sean similares a 9anime, puedes ver estas alternativas:</p>
47
- <p></p>
48
- <h3>KissAnime</h3>
49
- <p>KissAnime es uno de los sitios web de streaming de anime más populares y conocidos que ofrece una amplia gama de contenido de anime en alta calidad. Puedes ver anime en línea gratis en KissAnime, o descargarlos en tu dispositivo para verlos sin conexión. También puedes encontrar versiones de anime en KissAnime, así como un foro de la comunidad donde puedes interactuar con otros fans del anime. </p>
50
- <h3>Crunchyroll</h3>
51
- <p>Crunchyroll es una plataforma de streaming de anime legal y con licencia que proporciona acceso a miles de títulos de anime, así como manga, drama y juegos. Puedes ver anime online gratis en Crunchyroll, o actualizar a una membresía premium para obtener más características y beneficios. También puedes disfrutar de simulcasts de los últimos episodios de anime, así como contenido original exclusivo de Crunchyroll.</p>
52
- <h3>AnimeSuge</h3>
53
- <p>AnimeSuge es un sitio web de transmisión de video gratuito que le permite ver anime en línea sin anuncios ni registro. Puedes encontrar una variedad de géneros y categorías de anime en AnimeSuge, desde acción hasta romance, comedia y terror, y más. También puedes ver versiones subbed y dobladas de anime en AnimeSuge, así como solicitar cualquier anime que quieras ver. </p>
54
- <h3>Anime-Planet</h3>
55
-
56
- <h3>AnimeFreak</h3>
57
- <p>AnimeFreak es un sitio web de transmisión de video gratuito que le permite ver anime en línea sin ningún problema. Puede navegar a través de una amplia y actualizada colección de contenido de anime en AnimeFreak, desde los últimos lanzamientos hasta los clásicos. También puedes ver versiones de anime en AnimeFreak, así como disfrutar de velocidades de carga rápidas y una calidad de transmisión suave. </p>
58
- <h2>Conclusión</h2>
59
- <p>9anime es una gran opción para ver anime en línea de forma gratuita, pero tiene algunos inconvenientes que pueden afectar a su experiencia de visualización. Es por eso que es posible que desee probar 9anime mod apk, una versión modificada de la aplicación oficial 9anime que le da todas las características y beneficios de 9anime sin limitaciones o inconvenientes. Con 9anime mod apk, puede ver anime en línea de forma gratuita sin anuncios, pop-ups, o interrupciones. También puede descargar vídeos de anime a su dispositivo para su visualización sin conexión, elegir entre diferentes opciones de calidad de vídeo, ver las versiones subbed y dubbed de anime, y acceder a una enorme y diversa biblioteca de contenido de anime. Sin embargo, si estás buscando otras alternativas a 9anime, puedes probar KissAnime, Crunchyroll, AnimeSuge, Anime-Planet o AnimeFreak. Estas son algunas de las mejores plataformas de streaming de anime que ofrecen servicios similares o mejores que 9anime. Esperamos que este artículo le ayudó a aprender más acerca de 9anime mod apk descargar y cómo ver anime en línea gratis. </p>
60
- <h2>Preguntas frecuentes</h2>
61
- <p>Aquí hay algunas preguntas frecuentes sobre 9anime mod apk download:</p>
62
- <h4>Es 9anime mod apk seguro de usar? </h4>
63
- <p>Sí, 9anime mod apk es seguro de usar siempre y cuando se descarga desde una fuente de confianza. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas y escanearlas en busca de virus o malware antes de usarlas. </ <p>h4>Es 9anime mod apk legal de usar? </h4>
64
-
65
- <h4>¿Funciona 9anime mod apk en dispositivos iOS? </h4>
66
- <p>No, 9anime mod apk solo es compatible con dispositivos Android. Si desea ver anime en línea de forma gratuita en su dispositivo iOS, tendrá que utilizar el sitio web o aplicación oficial 9anime, o cualquiera de las alternativas mencionadas anteriormente. </p>
67
- <h4>¿Cómo puedo actualizar 9anime mod apk? </h4>
68
- <p>Para actualizar 9anime mod apk, tendrá que descargar e instalar la última versión del archivo apk mod de la misma fuente que lo descargó de. También puede necesitar desinstalar la versión anterior de la aplicación antes de instalar la nueva. </p>
69
- <h4>¿Puedo solicitar cualquier anime en 9anime mod apk? </h4>
70
- <p>Sí, puede solicitar cualquier anime que desea ver en 9anime mod apk mediante el uso de la función de solicitud en la aplicación. Sin embargo, no hay garantía de que su solicitud se cumplirá, ya que depende de la disponibilidad y legalidad del contenido del anime. </p>
71
- <h4>¿Puedo ver anime sin conexión en 9anime mod apk? </h4>
72
- <p>Sí, se puede ver el anime sin conexión en 9anime mod apk mediante la descarga de los vídeos de anime a su dispositivo utilizando la función de descarga en la aplicación. Sin embargo, necesitará tener suficiente espacio de almacenamiento y conexión a Internet para descargar los videos. </p> 64aa2da5cf<br />
73
- <br />
74
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apk M.facebook.com.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>m.facebook.com apk: ¿Qué es y cómo descargarlo</h1>
3
- <p>Facebook es una de las plataformas de redes sociales más populares del mundo, con miles de millones de usuarios que se conectan, comparten e interactúan entre sí cada día. Sin embargo, no todo el mundo tiene un teléfono inteligente potente o una conexión a Internet estable para disfrutar de todas las funciones de la aplicación regular de Facebook. Es por eso que hay una versión alternativa de Facebook que está diseñado para dispositivos de gama baja y redes lentas: m.facebook.com apk. En este artículo, vamos a explicar lo que m.facebook.com apk es, ¿por qué debe usarlo, y cómo descargarlo e instalarlo en su dispositivo Android. </p>
4
- <h2>Introducción</h2>
5
- <p>Facebook es una gran manera de mantenerse en contacto con sus amigos y familiares, seguir a sus celebridades y marcas favoritas, ver videos en vivo, jugar juegos y más. Pero a veces, la aplicación regular de Facebook puede ser demasiado pesada y lenta para su dispositivo o su red. Puede ocupar mucho espacio de almacenamiento, consumir muchos datos y batería, y cargar lentamente o estrellarse con frecuencia. Si se enfrentan a estos problemas, es posible que desee probar m.facebook.com apk lugar. </p>
6
- <h2>apk m.facebook.com</h2><br /><p><b><b>Download Zip</b> &#10022;&#10022;&#10022; <a href="https://bltlly.com/2v6J39">https://bltlly.com/2v6J39</a></b></p><br /><br />
7
- <h3>¿Qué es m.facebook.com apk? </h3>
8
- <p>m.facebook.com apk es una versión más ligera y rápida de Facebook que utiliza menos datos y funciona en todas las condiciones de red. También se conoce como Facebook Lite o FB Lite. Es una aplicación oficial desarrollada por Facebook que tiene como objetivo proporcionar una mejor experiencia para los usuarios que tienen dispositivos de gama baja o conexiones a Internet pobres. Tiene todas las funciones básicas de Facebook, como publicar actualizaciones de estado, compartir fotos y videos, gustar y comentar publicaciones, encontrar eventos, jugar juegos, etc. También admite algunas funciones avanzadas, como transmisión en vivo, historias, grupos, páginas, etc.</p>
9
- <h3>¿Por qué usar apk m.facebook.com? </h3>
10
- <p>Hay muchas razones por las que es posible que desee utilizar m.facebook.com apk en lugar de la aplicación regular de Facebook. Estos son algunos de ellos:</p>
11
- <ul>
12
-
13
- <li>Funciona en los teléfonos Android antiguos - se puede utilizar en los teléfonos Android más antiguos que no son compatibles con la aplicación regular de Facebook. </li>
14
- <li>Utiliza menos datos - comprime imágenes y videos para reducir el uso de datos. También puede activar el modo de ahorro de datos para guardar aún más datos. </li>
15
- <li>Se carga rápidamente - está optimizado para velocidad y rendimiento. Carga páginas más rápido y muestra actualizaciones de amigos de manera más eficiente. </li>
16
- <li>Funciona en todas las redes - está diseñado para redes 2G y áreas con conexiones a Internet lentas o inestables. Puede acceder a Facebook incluso cuando la señal es débil o la red está congestionada. </li>
17
- </ul>
18
- <h3>Cómo descargar e instalar apk m.facebook.com? </h3>
19
- <p>Descargar e instalar m.facebook.com apk es muy fácil. Puede seguir estos pasos:</p>
20
- <ol>
21
- <li>Ir a [m.facebook.com]( 1 ) en su navegador. </li>
22
- <li>Toque en el botón "Descargar" en la parte superior de la página. </li>
23
- <li>Usted será redirigido a la página de Google Play Store de Facebook Lite. Toque en "Instalar" para comenzar a descargar la aplicación. </li>
24
- <li>Una vez descargada la aplicación, ábrela e inicia sesión con tu cuenta de Facebook. </li>
25
- <li>Disfruta usando apk m.facebook.com en tu dispositivo. </li>
26
- </ol>
27
- <h2>Características de m.facebook.com apk</h2>
28
- <p>m.facebook . com apk tiene muchas características que lo convierten en una gran alternativa a la aplicación regular de Facebook. Estos son algunos de ellos:</p>
29
- <h3>Rápido y ligero</h3>
30
- <p>m.facebook.com apk es rápido y ligero, lo que significa que funciona sin problemas y de manera eficiente en su dispositivo. No consume mucha memoria o recursos de CPU, por lo que no ralentiza el dispositivo ni agota la batería. Tampoco se bloquea o se congela a menudo, a diferencia de la aplicación regular de Facebook que puede tener errores o problemas técnicos. </p>
31
- <p></p>
32
- <h3>Funciona en dispositivos antiguos y de gama baja</h3>
33
-
34
- <h3>Utiliza menos datos y batería</h3>
35
- <p>m.facebook.com apk utiliza menos datos y batería que la aplicación regular de Facebook. Comprime imágenes y vídeos antes de enviarlos o recibirlos, lo que reduce la cantidad de datos transferidos a través de la red. También le permite activar el modo de ahorro de datos, lo que limita aún más el uso de datos al desactivar algunas características o cargar contenido de menor calidad. Puede ahorrar hasta el 90% de su uso de datos mediante el uso de apk m.facebook.com. Por otra parte, m.facebook.com apk utiliza menos energía que la aplicación regular de Facebook, lo que significa que no agota la batería tan rápido. Puede utilizar apk m.facebook.com durante períodos más largos sin preocuparse por quedarse sin batería. </p>
36
- <h3>Soporta todas las funciones de Facebook</h3>
37
- <p>m.facebook.com apk soporta todas las funciones de Facebook que necesita para mantenerse conectado y entretenido. Puedes hacer todo lo que puedas en la aplicación regular de Facebook, como:</p>
38
- <ul>
39
- <li>Publicar actualizaciones de estado, fotos, vídeos, e historias</li>
40
- <li>Como, comentar y compartir mensajes de tus amigos y páginas que sigues</li>
41
- <li>Chatea con tus amigos y familiares usando Messenger Lite</li>
42
- <li>Ver vídeos e historias en vivo de tus amigos y páginas que sigues</li>
43
- <li>Encuentra eventos cerca de ti e invita a tus amigos a unirse</li>
44
- <li>Juega con tus amigos usando juegos instantáneos</li>
45
- <li>Crear grupos y páginas para conectar con personas que comparten sus intereses</li>
46
- <li>Descubre nuevas personas y páginas a seguir usando Explore</li>
47
- <li>Gestiona tu perfil y configuración usando Menú</li>
48
- </ul>
49
- <h2>Pros y contras de m.facebook.com apk</h2>
50
- <p>m.facebook.com apk tiene muchas ventajas sobre la aplicación regular de Facebook, pero también tiene algunos inconvenientes que usted debe tener en cuenta. Aquí están algunos de los pros y los contras de m.facebook.com apk:</p>
51
- <h3>Pros</h3>
52
- <h4>Ahorre espacio de almacenamiento y uso de datos</h4>
53
-
54
- <h4>Acceda a Facebook incluso en malas condiciones de red</h4>
55
- <p>m.facebook.com apk le permite acceder a Facebook incluso en condiciones de red pobres mediante la optimización de la velocidad y el rendimiento. Funciona bien en redes 2G y áreas con conexiones a Internet lentas o inestables. Carga páginas más rápido y muestra actualizaciones de amigos de manera más eficiente. Puedes acceder a Facebook incluso cuando la señal es débil o la red está congestionada. </p>
56
- <h4>Disfruta de una interfaz sencilla y fácil de usar</h4>
57
- <p>m.facebook.com apk tiene una interfaz simple y fácil de usar que hace que sea fácil de usar y navegar. Tiene un diseño limpio y minimalista que se centra en las características esenciales de Facebook. No tiene ningún elemento innecesario o de distracción que pueda desordenar la pantalla o confundir al usuario. También tiene una opción de modo oscuro que reduce la fatiga ocular y ahorra vida de la batería. </p>
58
- <h3>Contras</h3>
59
- <h4>Puede tener algunos problemas de compatibilidad con algunos dispositivos</h4>
60
- <p>m.facebook.com apk puede tener algunos problemas de compatibilidad con algunos dispositivos que pueden afectar a su funcionalidad o rendimiento. Algunos usuarios han reportado problemas como estrellarse, congelarse, retrasarse o no cargar correctamente en algunos dispositivos. Estos problemas pueden ser causados por varios factores, como el modelo de dispositivo, la versión del sistema operativo, la configuración de red, etc. Si encuentra alguno de estos problemas, puede intentar actualizar la aplicación, limpiar la caché, reiniciar el dispositivo o ponerse en contacto con el desarrollador para obtener soporte. </p>
61
- <h4>Puede que no soporte algunas funciones o actualizaciones de la aplicación regular de Facebook</h4>
62
-
63
- <h4>Puede tener menor calidad de imágenes y videos</h4>
64
- <p>m.facebook.com apk puede tener menor calidad de imágenes y videos que la aplicación regular de Facebook, ya que los comprime para guardar los datos y acelerar la carga. Esto puede resultar en imágenes y videos borrosos, pixelados o distorsionados que pueden no verse tan bien como los originales. Si quieres ver imágenes y videos de alta calidad en Facebook, es posible que desee utilizar la aplicación regular de Facebook en su lugar. </p>
65
- <h2>Conclusión</h2>
66
- <p>m.facebook.com apk es una gran alternativa a la aplicación regular de Facebook para los usuarios que tienen dispositivos de gama baja o conexiones a Internet pobres. Es rápido, ligero y utiliza menos datos y batería que la aplicación regular de Facebook. También funciona en dispositivos antiguos y de gama baja y es compatible con todas las funciones de Facebook. Sin embargo, también tiene algunos inconvenientes, como problemas de compatibilidad, falta de algunas características o actualizaciones y menor calidad de imágenes y videos. Usted debe pesar los pros y los contras de m.facebook.com apk antes de decidir si usarlo o no. </p>
67
- <h2>Preguntas frecuentes</h2>
68
- <p>Aquí hay algunas preguntas frecuentes sobre m.facebook.com apk:</p>
69
- <ol>
70
- <li> ¿Es m.facebook.com apk seguro de usar? </li>
71
- <p>Sí, m.facebook.com apk es seguro de usar, ya que es una aplicación oficial desarrollada por Facebook. No contiene ningún malware o virus que pueda dañar su dispositivo o su privacidad. Sin embargo, siempre debe descargarlo de una fuente confiable, como [m.facebook.com] o Google Play Store.</p>
72
- <li> ¿Es m.facebook.com apk libre de usar? </li>
73
- <p>Sí, m.facebook.com apk es de uso gratuito, al igual que la aplicación regular de Facebook. No es necesario pagar ninguna cuota o cargos para descargar o usarlo. Sin embargo, puede incurrir en cargos de datos de su proveedor de red si lo usa sin una conexión Wi-Fi. </p>
74
- <li> ¿Puedo usar apk m.facebook.com y la aplicación regular de Facebook al mismo tiempo? </li>
75
-
76
- <li> ¿Cómo puedo actualizar apk m.facebook.com? </li>
77
- <p>Puede actualizar m.facebook.com apk siguiendo estos pasos:</p>
78
- <ul>
79
- <li>Ir a [m.facebook.com] en su navegador. </li>
80
- <li>Toque en el botón "Descargar" en la parte superior de la página. </li>
81
- <li>Usted será redirigido a la página de Google Play Store de Facebook Lite. Toque en "Actualizar" para iniciar la actualización de la aplicación. </li>
82
- <li>Una vez que la aplicación se actualiza, ábrela y disfruta de la última versión. </li>
83
- </ul>
84
- <li>¿Cómo puedo eliminar m.facebook.com apk? </li>
85
- <p>Puede eliminar m.facebook.com apk siguiendo estos pasos:</p>
86
- <ul>
87
- <li>Ir a la configuración de su dispositivo y toque en "Aplicaciones". </li>
88
- <li>Encuentra y toca "Facebook Lite". </li>
89
- <li>Toque en "Desinstalar" y confirme su acción. </li>
90
- <li>La aplicación se eliminará de su dispositivo. </li>
91
- </ul>
92
- </ol></p> 64aa2da5cf<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/locations/__init__.py DELETED
@@ -1,467 +0,0 @@
1
- import functools
2
- import logging
3
- import os
4
- import pathlib
5
- import sys
6
- import sysconfig
7
- from typing import Any, Dict, Generator, Optional, Tuple
8
-
9
- from pip._internal.models.scheme import SCHEME_KEYS, Scheme
10
- from pip._internal.utils.compat import WINDOWS
11
- from pip._internal.utils.deprecation import deprecated
12
- from pip._internal.utils.virtualenv import running_under_virtualenv
13
-
14
- from . import _sysconfig
15
- from .base import (
16
- USER_CACHE_DIR,
17
- get_major_minor_version,
18
- get_src_prefix,
19
- is_osx_framework,
20
- site_packages,
21
- user_site,
22
- )
23
-
24
- __all__ = [
25
- "USER_CACHE_DIR",
26
- "get_bin_prefix",
27
- "get_bin_user",
28
- "get_major_minor_version",
29
- "get_platlib",
30
- "get_purelib",
31
- "get_scheme",
32
- "get_src_prefix",
33
- "site_packages",
34
- "user_site",
35
- ]
36
-
37
-
38
- logger = logging.getLogger(__name__)
39
-
40
-
41
- _PLATLIBDIR: str = getattr(sys, "platlibdir", "lib")
42
-
43
- _USE_SYSCONFIG_DEFAULT = sys.version_info >= (3, 10)
44
-
45
-
46
- def _should_use_sysconfig() -> bool:
47
- """This function determines the value of _USE_SYSCONFIG.
48
-
49
- By default, pip uses sysconfig on Python 3.10+.
50
- But Python distributors can override this decision by setting:
51
- sysconfig._PIP_USE_SYSCONFIG = True / False
52
- Rationale in https://github.com/pypa/pip/issues/10647
53
-
54
- This is a function for testability, but should be constant during any one
55
- run.
56
- """
57
- return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT))
58
-
59
-
60
- _USE_SYSCONFIG = _should_use_sysconfig()
61
-
62
- if not _USE_SYSCONFIG:
63
- # Import distutils lazily to avoid deprecation warnings,
64
- # but import it soon enough that it is in memory and available during
65
- # a pip reinstall.
66
- from . import _distutils
67
-
68
- # Be noisy about incompatibilities if this platforms "should" be using
69
- # sysconfig, but is explicitly opting out and using distutils instead.
70
- if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG:
71
- _MISMATCH_LEVEL = logging.WARNING
72
- else:
73
- _MISMATCH_LEVEL = logging.DEBUG
74
-
75
-
76
- def _looks_like_bpo_44860() -> bool:
77
- """The resolution to bpo-44860 will change this incorrect platlib.
78
-
79
- See <https://bugs.python.org/issue44860>.
80
- """
81
- from distutils.command.install import INSTALL_SCHEMES
82
-
83
- try:
84
- unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"]
85
- except KeyError:
86
- return False
87
- return unix_user_platlib == "$usersite"
88
-
89
-
90
- def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool:
91
- platlib = scheme["platlib"]
92
- if "/$platlibdir/" in platlib:
93
- platlib = platlib.replace("/$platlibdir/", f"/{_PLATLIBDIR}/")
94
- if "/lib64/" not in platlib:
95
- return False
96
- unpatched = platlib.replace("/lib64/", "/lib/")
97
- return unpatched.replace("$platbase/", "$base/") == scheme["purelib"]
98
-
99
-
100
- @functools.lru_cache(maxsize=None)
101
- def _looks_like_red_hat_lib() -> bool:
102
- """Red Hat patches platlib in unix_prefix and unix_home, but not purelib.
103
-
104
- This is the only way I can see to tell a Red Hat-patched Python.
105
- """
106
- from distutils.command.install import INSTALL_SCHEMES
107
-
108
- return all(
109
- k in INSTALL_SCHEMES
110
- and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k])
111
- for k in ("unix_prefix", "unix_home")
112
- )
113
-
114
-
115
- @functools.lru_cache(maxsize=None)
116
- def _looks_like_debian_scheme() -> bool:
117
- """Debian adds two additional schemes."""
118
- from distutils.command.install import INSTALL_SCHEMES
119
-
120
- return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES
121
-
122
-
123
- @functools.lru_cache(maxsize=None)
124
- def _looks_like_red_hat_scheme() -> bool:
125
- """Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.
126
-
127
- Red Hat's ``00251-change-user-install-location.patch`` changes the install
128
- command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is
129
- (fortunately?) done quite unconditionally, so we create a default command
130
- object without any configuration to detect this.
131
- """
132
- from distutils.command.install import install
133
- from distutils.dist import Distribution
134
-
135
- cmd: Any = install(Distribution())
136
- cmd.finalize_options()
137
- return (
138
- cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local"
139
- and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local"
140
- )
141
-
142
-
143
- @functools.lru_cache(maxsize=None)
144
- def _looks_like_slackware_scheme() -> bool:
145
- """Slackware patches sysconfig but fails to patch distutils and site.
146
-
147
- Slackware changes sysconfig's user scheme to use ``"lib64"`` for the lib
148
- path, but does not do the same to the site module.
149
- """
150
- if user_site is None: # User-site not available.
151
- return False
152
- try:
153
- paths = sysconfig.get_paths(scheme="posix_user", expand=False)
154
- except KeyError: # User-site not available.
155
- return False
156
- return "/lib64/" in paths["purelib"] and "/lib64/" not in user_site
157
-
158
-
159
- @functools.lru_cache(maxsize=None)
160
- def _looks_like_msys2_mingw_scheme() -> bool:
161
- """MSYS2 patches distutils and sysconfig to use a UNIX-like scheme.
162
-
163
- However, MSYS2 incorrectly patches sysconfig ``nt`` scheme. The fix is
164
- likely going to be included in their 3.10 release, so we ignore the warning.
165
- See msys2/MINGW-packages#9319.
166
-
167
- MSYS2 MINGW's patch uses lowercase ``"lib"`` instead of the usual uppercase,
168
- and is missing the final ``"site-packages"``.
169
- """
170
- paths = sysconfig.get_paths("nt", expand=False)
171
- return all(
172
- "Lib" not in p and "lib" in p and not p.endswith("site-packages")
173
- for p in (paths[key] for key in ("platlib", "purelib"))
174
- )
175
-
176
-
177
- def _fix_abiflags(parts: Tuple[str]) -> Generator[str, None, None]:
178
- ldversion = sysconfig.get_config_var("LDVERSION")
179
- abiflags = getattr(sys, "abiflags", None)
180
-
181
- # LDVERSION does not end with sys.abiflags. Just return the path unchanged.
182
- if not ldversion or not abiflags or not ldversion.endswith(abiflags):
183
- yield from parts
184
- return
185
-
186
- # Strip sys.abiflags from LDVERSION-based path components.
187
- for part in parts:
188
- if part.endswith(ldversion):
189
- part = part[: (0 - len(abiflags))]
190
- yield part
191
-
192
-
193
- @functools.lru_cache(maxsize=None)
194
- def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None:
195
- issue_url = "https://github.com/pypa/pip/issues/10151"
196
- message = (
197
- "Value for %s does not match. Please report this to <%s>"
198
- "\ndistutils: %s"
199
- "\nsysconfig: %s"
200
- )
201
- logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new)
202
-
203
-
204
- def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool:
205
- if old == new:
206
- return False
207
- _warn_mismatched(old, new, key=key)
208
- return True
209
-
210
-
211
- @functools.lru_cache(maxsize=None)
212
- def _log_context(
213
- *,
214
- user: bool = False,
215
- home: Optional[str] = None,
216
- root: Optional[str] = None,
217
- prefix: Optional[str] = None,
218
- ) -> None:
219
- parts = [
220
- "Additional context:",
221
- "user = %r",
222
- "home = %r",
223
- "root = %r",
224
- "prefix = %r",
225
- ]
226
-
227
- logger.log(_MISMATCH_LEVEL, "\n".join(parts), user, home, root, prefix)
228
-
229
-
230
- def get_scheme(
231
- dist_name: str,
232
- user: bool = False,
233
- home: Optional[str] = None,
234
- root: Optional[str] = None,
235
- isolated: bool = False,
236
- prefix: Optional[str] = None,
237
- ) -> Scheme:
238
- new = _sysconfig.get_scheme(
239
- dist_name,
240
- user=user,
241
- home=home,
242
- root=root,
243
- isolated=isolated,
244
- prefix=prefix,
245
- )
246
- if _USE_SYSCONFIG:
247
- return new
248
-
249
- old = _distutils.get_scheme(
250
- dist_name,
251
- user=user,
252
- home=home,
253
- root=root,
254
- isolated=isolated,
255
- prefix=prefix,
256
- )
257
-
258
- warning_contexts = []
259
- for k in SCHEME_KEYS:
260
- old_v = pathlib.Path(getattr(old, k))
261
- new_v = pathlib.Path(getattr(new, k))
262
-
263
- if old_v == new_v:
264
- continue
265
-
266
- # distutils incorrectly put PyPy packages under ``site-packages/python``
267
- # in the ``posix_home`` scheme, but PyPy devs said they expect the
268
- # directory name to be ``pypy`` instead. So we treat this as a bug fix
269
- # and not warn about it. See bpo-43307 and python/cpython#24628.
270
- skip_pypy_special_case = (
271
- sys.implementation.name == "pypy"
272
- and home is not None
273
- and k in ("platlib", "purelib")
274
- and old_v.parent == new_v.parent
275
- and old_v.name.startswith("python")
276
- and new_v.name.startswith("pypy")
277
- )
278
- if skip_pypy_special_case:
279
- continue
280
-
281
- # sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in
282
- # the ``include`` value, but distutils's ``headers`` does. We'll let
283
- # CPython decide whether this is a bug or feature. See bpo-43948.
284
- skip_osx_framework_user_special_case = (
285
- user
286
- and is_osx_framework()
287
- and k == "headers"
288
- and old_v.parent.parent == new_v.parent
289
- and old_v.parent.name.startswith("python")
290
- )
291
- if skip_osx_framework_user_special_case:
292
- continue
293
-
294
- # On Red Hat and derived Linux distributions, distutils is patched to
295
- # use "lib64" instead of "lib" for platlib.
296
- if k == "platlib" and _looks_like_red_hat_lib():
297
- continue
298
-
299
- # On Python 3.9+, sysconfig's posix_user scheme sets platlib against
300
- # sys.platlibdir, but distutils's unix_user incorrectly coninutes
301
- # using the same $usersite for both platlib and purelib. This creates a
302
- # mismatch when sys.platlibdir is not "lib".
303
- skip_bpo_44860 = (
304
- user
305
- and k == "platlib"
306
- and not WINDOWS
307
- and sys.version_info >= (3, 9)
308
- and _PLATLIBDIR != "lib"
309
- and _looks_like_bpo_44860()
310
- )
311
- if skip_bpo_44860:
312
- continue
313
-
314
- # Slackware incorrectly patches posix_user to use lib64 instead of lib,
315
- # but not usersite to match the location.
316
- skip_slackware_user_scheme = (
317
- user
318
- and k in ("platlib", "purelib")
319
- and not WINDOWS
320
- and _looks_like_slackware_scheme()
321
- )
322
- if skip_slackware_user_scheme:
323
- continue
324
-
325
- # Both Debian and Red Hat patch Python to place the system site under
326
- # /usr/local instead of /usr. Debian also places lib in dist-packages
327
- # instead of site-packages, but the /usr/local check should cover it.
328
- skip_linux_system_special_case = (
329
- not (user or home or prefix or running_under_virtualenv())
330
- and old_v.parts[1:3] == ("usr", "local")
331
- and len(new_v.parts) > 1
332
- and new_v.parts[1] == "usr"
333
- and (len(new_v.parts) < 3 or new_v.parts[2] != "local")
334
- and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme())
335
- )
336
- if skip_linux_system_special_case:
337
- continue
338
-
339
- # On Python 3.7 and earlier, sysconfig does not include sys.abiflags in
340
- # the "pythonX.Y" part of the path, but distutils does.
341
- skip_sysconfig_abiflag_bug = (
342
- sys.version_info < (3, 8)
343
- and not WINDOWS
344
- and k in ("headers", "platlib", "purelib")
345
- and tuple(_fix_abiflags(old_v.parts)) == new_v.parts
346
- )
347
- if skip_sysconfig_abiflag_bug:
348
- continue
349
-
350
- # MSYS2 MINGW's sysconfig patch does not include the "site-packages"
351
- # part of the path. This is incorrect and will be fixed in MSYS.
352
- skip_msys2_mingw_bug = (
353
- WINDOWS and k in ("platlib", "purelib") and _looks_like_msys2_mingw_scheme()
354
- )
355
- if skip_msys2_mingw_bug:
356
- continue
357
-
358
- # CPython's POSIX install script invokes pip (via ensurepip) against the
359
- # interpreter located in the source tree, not the install site. This
360
- # triggers special logic in sysconfig that's not present in distutils.
361
- # https://github.com/python/cpython/blob/8c21941ddaf/Lib/sysconfig.py#L178-L194
362
- skip_cpython_build = (
363
- sysconfig.is_python_build(check_home=True)
364
- and not WINDOWS
365
- and k in ("headers", "include", "platinclude")
366
- )
367
- if skip_cpython_build:
368
- continue
369
-
370
- warning_contexts.append((old_v, new_v, f"scheme.{k}"))
371
-
372
- if not warning_contexts:
373
- return old
374
-
375
- # Check if this path mismatch is caused by distutils config files. Those
376
- # files will no longer work once we switch to sysconfig, so this raises a
377
- # deprecation message for them.
378
- default_old = _distutils.distutils_scheme(
379
- dist_name,
380
- user,
381
- home,
382
- root,
383
- isolated,
384
- prefix,
385
- ignore_config_files=True,
386
- )
387
- if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS):
388
- deprecated(
389
- reason=(
390
- "Configuring installation scheme with distutils config files "
391
- "is deprecated and will no longer work in the near future. If you "
392
- "are using a Homebrew or Linuxbrew Python, please see discussion "
393
- "at https://github.com/Homebrew/homebrew-core/issues/76621"
394
- ),
395
- replacement=None,
396
- gone_in=None,
397
- )
398
- return old
399
-
400
- # Post warnings about this mismatch so user can report them back.
401
- for old_v, new_v, key in warning_contexts:
402
- _warn_mismatched(old_v, new_v, key=key)
403
- _log_context(user=user, home=home, root=root, prefix=prefix)
404
-
405
- return old
406
-
407
-
408
- def get_bin_prefix() -> str:
409
- new = _sysconfig.get_bin_prefix()
410
- if _USE_SYSCONFIG:
411
- return new
412
-
413
- old = _distutils.get_bin_prefix()
414
- if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"):
415
- _log_context()
416
- return old
417
-
418
-
419
- def get_bin_user() -> str:
420
- return _sysconfig.get_scheme("", user=True).scripts
421
-
422
-
423
- def _looks_like_deb_system_dist_packages(value: str) -> bool:
424
- """Check if the value is Debian's APT-controlled dist-packages.
425
-
426
- Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the
427
- default package path controlled by APT, but does not patch ``sysconfig`` to
428
- do the same. This is similar to the bug worked around in ``get_scheme()``,
429
- but here the default is ``deb_system`` instead of ``unix_local``. Ultimately
430
- we can't do anything about this Debian bug, and this detection allows us to
431
- skip the warning when needed.
432
- """
433
- if not _looks_like_debian_scheme():
434
- return False
435
- if value == "/usr/lib/python3/dist-packages":
436
- return True
437
- return False
438
-
439
-
440
- def get_purelib() -> str:
441
- """Return the default pure-Python lib location."""
442
- new = _sysconfig.get_purelib()
443
- if _USE_SYSCONFIG:
444
- return new
445
-
446
- old = _distutils.get_purelib()
447
- if _looks_like_deb_system_dist_packages(old):
448
- return old
449
- if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"):
450
- _log_context()
451
- return old
452
-
453
-
454
- def get_platlib() -> str:
455
- """Return the default platform-shared lib location."""
456
- new = _sysconfig.get_platlib()
457
- if _USE_SYSCONFIG:
458
- return new
459
-
460
- from . import _distutils
461
-
462
- old = _distutils.get_platlib()
463
- if _looks_like_deb_system_dist_packages(old):
464
- return old
465
- if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"):
466
- _log_context()
467
- return old
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/macromanprober.py DELETED
@@ -1,162 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # This code was modified from latin1prober.py by Rob Speer <[email protected]>.
3
- # The Original Code is Mozilla Universal charset detector code.
4
- #
5
- # The Initial Developer of the Original Code is
6
- # Netscape Communications Corporation.
7
- # Portions created by the Initial Developer are Copyright (C) 2001
8
- # the Initial Developer. All Rights Reserved.
9
- #
10
- # Contributor(s):
11
- # Rob Speer - adapt to MacRoman encoding
12
- # Mark Pilgrim - port to Python
13
- # Shy Shalom - original C code
14
- #
15
- # This library is free software; you can redistribute it and/or
16
- # modify it under the terms of the GNU Lesser General Public
17
- # License as published by the Free Software Foundation; either
18
- # version 2.1 of the License, or (at your option) any later version.
19
- #
20
- # This library is distributed in the hope that it will be useful,
21
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
22
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23
- # Lesser General Public License for more details.
24
- #
25
- # You should have received a copy of the GNU Lesser General Public
26
- # License along with this library; if not, write to the Free Software
27
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
28
- # 02110-1301 USA
29
- ######################### END LICENSE BLOCK #########################
30
-
31
- from typing import List, Union
32
-
33
- from .charsetprober import CharSetProber
34
- from .enums import ProbingState
35
-
36
- FREQ_CAT_NUM = 4
37
-
38
- UDF = 0 # undefined
39
- OTH = 1 # other
40
- ASC = 2 # ascii capital letter
41
- ASS = 3 # ascii small letter
42
- ACV = 4 # accent capital vowel
43
- ACO = 5 # accent capital other
44
- ASV = 6 # accent small vowel
45
- ASO = 7 # accent small other
46
- ODD = 8 # character that is unlikely to appear
47
- CLASS_NUM = 9 # total classes
48
-
49
- # The change from Latin1 is that we explicitly look for extended characters
50
- # that are infrequently-occurring symbols, and consider them to always be
51
- # improbable. This should let MacRoman get out of the way of more likely
52
- # encodings in most situations.
53
-
54
- # fmt: off
55
- MacRoman_CharToClass = (
56
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
57
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
58
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
59
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
60
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
61
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
62
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
63
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
64
- OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
65
- ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
66
- ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
67
- ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
68
- OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
69
- ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
70
- ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
71
- ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
72
- ACV, ACV, ACO, ACV, ACO, ACV, ACV, ASV, # 80 - 87
73
- ASV, ASV, ASV, ASV, ASV, ASO, ASV, ASV, # 88 - 8F
74
- ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASV, # 90 - 97
75
- ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # 98 - 9F
76
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, ASO, # A0 - A7
77
- OTH, OTH, ODD, ODD, OTH, OTH, ACV, ACV, # A8 - AF
78
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
79
- OTH, OTH, OTH, OTH, OTH, OTH, ASV, ASV, # B8 - BF
80
- OTH, OTH, ODD, OTH, ODD, OTH, OTH, OTH, # C0 - C7
81
- OTH, OTH, OTH, ACV, ACV, ACV, ACV, ASV, # C8 - CF
82
- OTH, OTH, OTH, OTH, OTH, OTH, OTH, ODD, # D0 - D7
83
- ASV, ACV, ODD, OTH, OTH, OTH, OTH, OTH, # D8 - DF
84
- OTH, OTH, OTH, OTH, OTH, ACV, ACV, ACV, # E0 - E7
85
- ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # E8 - EF
86
- ODD, ACV, ACV, ACV, ACV, ASV, ODD, ODD, # F0 - F7
87
- ODD, ODD, ODD, ODD, ODD, ODD, ODD, ODD, # F8 - FF
88
- )
89
-
90
- # 0 : illegal
91
- # 1 : very unlikely
92
- # 2 : normal
93
- # 3 : very likely
94
- MacRomanClassModel = (
95
- # UDF OTH ASC ASS ACV ACO ASV ASO ODD
96
- 0, 0, 0, 0, 0, 0, 0, 0, 0, # UDF
97
- 0, 3, 3, 3, 3, 3, 3, 3, 1, # OTH
98
- 0, 3, 3, 3, 3, 3, 3, 3, 1, # ASC
99
- 0, 3, 3, 3, 1, 1, 3, 3, 1, # ASS
100
- 0, 3, 3, 3, 1, 2, 1, 2, 1, # ACV
101
- 0, 3, 3, 3, 3, 3, 3, 3, 1, # ACO
102
- 0, 3, 1, 3, 1, 1, 1, 3, 1, # ASV
103
- 0, 3, 1, 3, 1, 1, 3, 3, 1, # ASO
104
- 0, 1, 1, 1, 1, 1, 1, 1, 1, # ODD
105
- )
106
- # fmt: on
107
-
108
-
109
- class MacRomanProber(CharSetProber):
110
- def __init__(self) -> None:
111
- super().__init__()
112
- self._last_char_class = OTH
113
- self._freq_counter: List[int] = []
114
- self.reset()
115
-
116
- def reset(self) -> None:
117
- self._last_char_class = OTH
118
- self._freq_counter = [0] * FREQ_CAT_NUM
119
-
120
- # express the prior that MacRoman is a somewhat rare encoding;
121
- # this can be done by starting out in a slightly improbable state
122
- # that must be overcome
123
- self._freq_counter[2] = 10
124
-
125
- super().reset()
126
-
127
- @property
128
- def charset_name(self) -> str:
129
- return "MacRoman"
130
-
131
- @property
132
- def language(self) -> str:
133
- return ""
134
-
135
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
136
- byte_str = self.remove_xml_tags(byte_str)
137
- for c in byte_str:
138
- char_class = MacRoman_CharToClass[c]
139
- freq = MacRomanClassModel[(self._last_char_class * CLASS_NUM) + char_class]
140
- if freq == 0:
141
- self._state = ProbingState.NOT_ME
142
- break
143
- self._freq_counter[freq] += 1
144
- self._last_char_class = char_class
145
-
146
- return self.state
147
-
148
- def get_confidence(self) -> float:
149
- if self.state == ProbingState.NOT_ME:
150
- return 0.01
151
-
152
- total = sum(self._freq_counter)
153
- confidence = (
154
- 0.0
155
- if total < 0.01
156
- else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
157
- )
158
- confidence = max(confidence, 0.0)
159
- # lower the confidence of MacRoman so that other more accurate
160
- # detector can take priority.
161
- confidence *= 0.73
162
- return confidence
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/uts46data.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/BilalSardar/AutoML-Model-Training/app.py DELETED
@@ -1,45 +0,0 @@
1
- from operator import index
2
- import streamlit as st
3
- import plotly.express as px
4
- from pycaret.regression import setup, compare_models, pull, save_model, load_model
5
- import pandas_profiling
6
- import pandas as pd
7
- from streamlit_pandas_profiling import st_profile_report
8
- import os
9
-
10
- if os.path.exists('./dataset.csv'):
11
- df = pd.read_csv('dataset.csv', index_col=None)
12
-
13
- with st.sidebar:
14
- st.image("https://www.onepointltd.com/wp-content/uploads/2020/03/inno2.png")
15
- st.title("AutoBaliML")
16
- choice = st.radio("Navigation", ["Upload","Profiling","Modelling", "Download"])
17
- st.info("This project application helps you build and explore your data.")
18
-
19
- if choice == "Upload":
20
- st.title("Upload Your Dataset")
21
- file = st.file_uploader("Upload Your Dataset")
22
- if file:
23
- df = pd.read_csv(file, index_col=None)
24
- df.to_csv('dataset.csv', index=None)
25
- st.dataframe(df)
26
-
27
- if choice == "Profiling":
28
- st.title("Exploratory Data Analysis")
29
- profile_df = df.profile_report()
30
- st_profile_report(profile_df)
31
-
32
- if choice == "Modelling":
33
- chosen_target = st.selectbox('Choose the Target Column', df.columns)
34
- if st.button('Run Modelling'):
35
- setup(df, target=chosen_target, silent=True)
36
- setup_df = pull()
37
- st.dataframe(setup_df)
38
- best_model = compare_models()
39
- compare_df = pull()
40
- st.dataframe(compare_df)
41
- save_model(best_model, 'best_model')
42
-
43
- if choice == "Download":
44
- with open('best_model.pkl', 'rb') as f:
45
- st.download_button('Download Model', f, file_name="best_model.pkl")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/any_system_tag.h DELETED
@@ -1,34 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/execution_policy.h>
21
-
22
- namespace thrust
23
- {
24
-
25
- struct any_system_tag
26
- : thrust::execution_policy<any_system_tag>
27
- {
28
- // allow any_system_tag to convert to any type at all
29
- // XXX make this safer using enable_if<is_tag<T>> upon c++11
30
- template<typename T> operator T () const {return T();}
31
- };
32
-
33
- } // end thrust
34
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/customization.h DELETED
@@ -1,128 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
-
28
- // TODO: Move into system::cuda
29
-
30
- #pragma once
31
-
32
- #include <thrust/detail/config.h>
33
- #include <thrust/detail/cpp14_required.h>
34
-
35
- #if THRUST_CPP_DIALECT >= 2014
36
-
37
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
38
-
39
- #include <thrust/system/cuda/config.h>
40
-
41
- #include <thrust/detail/type_deduction.h>
42
- #include <thrust/detail/cstdint.h>
43
- #include <thrust/detail/execute_with_allocator.h>
44
- #include <thrust/system/cuda/memory_resource.h>
45
- #include <thrust/memory/detail/host_system_resource.h>
46
- #include <thrust/mr/allocator.h>
47
- #include <thrust/mr/disjoint_sync_pool.h>
48
- #include <thrust/mr/sync_pool.h>
49
- #include <thrust/per_device_resource.h>
50
-
51
- namespace thrust
52
- {
53
-
54
- namespace system { namespace cuda { namespace detail
55
- {
56
-
57
- using default_async_host_resource =
58
- thrust::mr::synchronized_pool_resource<
59
- thrust::host_memory_resource
60
- >;
61
-
62
- template <typename DerivedPolicy>
63
- auto get_async_host_allocator(
64
- thrust::detail::execution_policy_base<DerivedPolicy>&
65
- )
66
- THRUST_RETURNS(
67
- thrust::mr::stateless_resource_allocator<
68
- thrust::detail::uint8_t, default_async_host_resource
69
- >{}
70
- )
71
-
72
- ///////////////////////////////////////////////////////////////////////////////
73
-
74
- using default_async_device_resource =
75
- thrust::mr::disjoint_synchronized_pool_resource<
76
- thrust::system::cuda::memory_resource
77
- , thrust::mr::new_delete_resource
78
- >;
79
-
80
- template <typename DerivedPolicy>
81
- auto get_async_device_allocator(
82
- thrust::detail::execution_policy_base<DerivedPolicy>&
83
- )
84
- THRUST_RETURNS(
85
- thrust::per_device_allocator<
86
- thrust::detail::uint8_t, default_async_device_resource, par_t
87
- >{}
88
- )
89
-
90
- template <typename Allocator, template <typename> class BaseSystem>
91
- auto get_async_device_allocator(
92
- thrust::detail::execute_with_allocator<Allocator, BaseSystem>& exec
93
- )
94
- THRUST_RETURNS(exec.get_allocator())
95
-
96
- template <typename Allocator, template <typename> class BaseSystem>
97
- auto get_async_device_allocator(
98
- thrust::detail::execute_with_allocator_and_dependencies<
99
- Allocator, BaseSystem
100
- >& exec
101
- )
102
- THRUST_RETURNS(exec.get_allocator())
103
-
104
- ///////////////////////////////////////////////////////////////////////////////
105
-
106
- using default_async_universal_host_pinned_resource =
107
- thrust::mr::synchronized_pool_resource<
108
- thrust::system::cuda::universal_host_pinned_memory_resource
109
- >;
110
-
111
- template <typename DerivedPolicy>
112
- auto get_async_universal_host_pinned_allocator(
113
- thrust::detail::execution_policy_base<DerivedPolicy>&
114
- )
115
- THRUST_RETURNS(
116
- thrust::mr::stateless_resource_allocator<
117
- thrust::detail::uint8_t, default_async_universal_host_pinned_resource
118
- >{}
119
- )
120
-
121
- }}} // namespace system::cuda::detail
122
-
123
- } // end namespace thrust
124
-
125
- #endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
126
-
127
- #endif
128
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/VizWiz-CLIP-VQA/model/vqa_model.py DELETED
@@ -1,123 +0,0 @@
1
- import torch
2
-
3
- class HeadVQA(torch.nn.Module):
4
- def __init__(self, train_config):
5
- super().__init__()
6
-
7
- embedding_size = {'RN50': 1024,
8
- 'RN101': 512,
9
- 'RN50x4': 640,
10
- 'RN50x16': 768,
11
- 'RN50x64': 1024,
12
- 'ViT-B/32': 512,
13
- 'ViT-B/16': 512,
14
- 'ViT-L/14': 768,
15
- 'ViT-L/14@336px': 768}
16
-
17
- n_aux_classes = len(set(train_config.aux_mapping.values()))
18
-
19
- self.ln1 = torch.nn.LayerNorm(embedding_size[train_config.model]*2)
20
- self.dp1 = torch.nn.Dropout(0.5)
21
- self.fc1 = torch.nn.Linear(embedding_size[train_config.model] * 2, 512)
22
-
23
- self.ln2 = torch.nn.LayerNorm(512)
24
- self.dp2 = torch.nn.Dropout(0.5)
25
- self.fc2 = torch.nn.Linear(512, train_config.n_classes)
26
-
27
- self.fc_aux = torch.nn.Linear(512, n_aux_classes)
28
- self.fc_gate = torch.nn.Linear(n_aux_classes, train_config.n_classes)
29
- self.act_gate = torch.nn.Sigmoid()
30
-
31
-
32
- def forward(self, img_features, question_features):
33
- xc = torch.cat((img_features, question_features), dim=-1)
34
-
35
- x = self.ln1(xc)
36
- x = self.dp1(x)
37
- x = self.fc1(x)
38
-
39
- aux = self.fc_aux(x)
40
-
41
- gate = self.fc_gate(aux)
42
- gate = self.act_gate(gate)
43
-
44
- x = self.ln2(x)
45
- x = self.dp2(x)
46
- vqa = self.fc2(x)
47
-
48
- output = vqa * gate
49
-
50
- return output, aux
51
-
52
-
53
- class NetVQA(torch.nn.Module):
54
- def __init__(self, train_config):
55
- super().__init__()
56
-
57
- self.heads = torch.nn.ModuleList()
58
-
59
- if isinstance(train_config.folds, list):
60
- self.num_heads = len(train_config.folds)
61
- else:
62
- self.num_heads = train_config.folds
63
-
64
- for i in range(self.num_heads):
65
- self.heads.append(HeadVQA(train_config))
66
-
67
-
68
- def forward(self, img_features, question_features):
69
-
70
- output = []
71
- output_aux = []
72
-
73
- for head in self.heads:
74
-
75
- logits, logits_aux = head(img_features, question_features)
76
-
77
- probs = logits.softmax(-1)
78
- probs_aux = logits_aux.softmax(-1)
79
-
80
- output.append(probs)
81
- output_aux.append(probs_aux)
82
-
83
- output = torch.stack(output, dim=-1).mean(-1)
84
- output_aux = torch.stack(output_aux, dim=-1).mean(-1)
85
-
86
- return output, output_aux
87
-
88
- def merge_vqa(train_config):
89
-
90
- # Initialize model
91
- model = NetVQA(train_config)
92
-
93
-
94
- for fold in train_config.folds:
95
-
96
- print("load weights from fold {} into head {}".format(fold, fold))
97
-
98
- checkpoint_path = "{}/{}/fold_{}".format(train_config.model_path, train_config.model, fold)
99
-
100
- if train_config.crossvalidation:
101
- # load best checkpoint
102
- model_state_dict = torch.load('{}/weights_best.pth'.format(checkpoint_path))
103
- else:
104
- # load checkpoint on train end
105
- model_state_dict = torch.load('{}/weights_end.pth'.format(checkpoint_path))
106
-
107
- model.heads[fold].load_state_dict(model_state_dict, strict=True)
108
-
109
- checkpoint_path = "{}/{}/weights_merged.pth".format(train_config.model_path, train_config.model)
110
-
111
- print("Saving weights of merged model:", checkpoint_path)
112
-
113
- torch.save(model.state_dict(), checkpoint_path)
114
-
115
- return model
116
-
117
-
118
-
119
-
120
-
121
-
122
-
123
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/drawings-to-human/frontend/src/app.css DELETED
@@ -1,10 +0,0 @@
1
- @import url('https://fonts.googleapis.com/css2?family=Open+Sans:wght@100;200;300;400;500;600;700;800&display=swap');
2
- @tailwind base;
3
- @tailwind components;
4
- @tailwind utilities;
5
-
6
- @layer base {
7
- html {
8
- font-family: 'Open Sans', sans-serif;
9
- }
10
- }
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/fetch_data/places_standard_test_val_sample.sh DELETED
@@ -1,22 +0,0 @@
1
- mkdir -p places_standard_dataset/val_hires/
2
- mkdir -p places_standard_dataset/visual_test_hires/
3
-
4
-
5
- # randomly sample images for test and vis
6
- OUT=$(python3 fetch_data/sampler.py)
7
- echo ${OUT}
8
-
9
- FILELIST=$(cat places_standard_dataset/original/test_random_files.txt)
10
-
11
- for i in $FILELIST
12
- do
13
- $(cp ${i} places_standard_dataset/val_hires/)
14
- done
15
-
16
- FILELIST=$(cat places_standard_dataset/original/val_random_files.txt)
17
-
18
- for i in $FILELIST
19
- do
20
- $(cp ${i} places_standard_dataset/visual_test_hires/)
21
- done
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Charliee/BingAi/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: BingAi
3
- emoji: 🏃
4
- colorFrom: indigo
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- app_port: 8080
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference