parquet-converter commited on
Commit
9543646
·
1 Parent(s): daf991b

Update parquet files (step 60 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/README.md +0 -26
  2. spaces/101-5/gpt4free/g4f/.v1/unfinished/chatpdf/__init__.py +0 -82
  3. spaces/101-5/gpt4free/testing/wewordle/README.md +0 -1
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Everything You Need to Know About X Particles Download for Cinema 4D.md +0 -31
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Nancy Drew Games Full Version The History and Legacy of Nancy Drew.md +0 -132
  6. spaces/1gistliPinn/ChatGPT4/Examples/Disney Characters 3d Models Free Download Maya.md +0 -6
  7. spaces/1gistliPinn/ChatGPT4/Examples/Evermotion - 3D People V.1 - C4D.rar.md +0 -94
  8. spaces/1line/AutoGPT/Dockerfile +0 -38
  9. spaces/1phancelerku/anime-remove-background/Emsa-Register-Dll-Tool-Crack.md +0 -84
  10. spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py +0 -16
  11. spaces/AIFILMS/Pix2Pix-Video/style.css +0 -101
  12. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/__init__.py +0 -24
  13. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/model.py +0 -295
  14. spaces/AILab-CVC/SEED-Bench_Leaderboard/constants.py +0 -87
  15. spaces/AIQuest/lungCancerVgg19/app.py +0 -34
  16. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/__init__.py +0 -0
  17. spaces/AlekseyKorshuk/gai-project/README.md +0 -13
  18. spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/korean.py +0 -210
  19. spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py +0 -4
  20. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/sync_bn.py +0 -279
  21. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/_functions.py +0 -79
  22. spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/visualize.py +0 -247
  23. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py +0 -21
  24. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py +0 -18
  25. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/status.py +0 -132
  26. spaces/Audio-AGI/AudioSep/models/CLAP/training/params.py +0 -563
  27. spaces/AutoGeneralAI/ChatGPT/README.md +0 -13
  28. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_roi_pooler.py +0 -165
  29. spaces/Ayush113/cricket_matchups/README.md +0 -13
  30. spaces/Bart92/RVC_HF/Fixes/local_fixes.py +0 -136
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/base.py +0 -688
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/utf1632prober.py +0 -225
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/other.py +0 -161
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/sphinxext.py +0 -217
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/testing.py +0 -331
  36. spaces/Blessin/drama-director/app.py +0 -56
  37. spaces/Boadiwaa/Recipes/openai/error.py +0 -164
  38. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/__init__.py +0 -18
  39. spaces/CVPR/LIVE/pybind11/tests/test_kwargs_and_defaults.py +0 -192
  40. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par.h +0 -125
  41. spaces/CVPR/regionclip-demo/detectron2/layers/roi_align_rotated.py +0 -93
  42. spaces/ChrisPreston/diff-svc_minato_aqua/modules/commons/common_layers.py +0 -675
  43. spaces/CikeyQI/Yunzai/Yunzai/lib/config/config.js +0 -174
  44. spaces/CjangCjengh/Sanskrit-TTS/text/__init__.py +0 -32
  45. spaces/Cong723/gpt-academic-public/crazy_functions/crazy_utils.py +0 -608
  46. spaces/Cyril666/ContourNet-ABI/modules/model_alignment.py +0 -34
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/linear-58a44b5e.js +0 -2
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/UploadText-28892309.js +0 -2
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/utils.py +0 -575
  50. spaces/Deci/DeciLM-6b-instruct/USE_POLICY.md +0 -50
spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/README.md DELETED
@@ -1,26 +0,0 @@
1
- # DeepAI Wrapper
2
- Written by [ading2210](https://github.com/ading2210/).
3
-
4
- ## Examples:
5
- These functions are generators which yield strings containing the newly generated text.
6
-
7
- ### Completion:
8
- ```python
9
- for chunk in deepai.Completion.create("Who are you?"):
10
- print(chunk, end="", flush=True)
11
- print()
12
- ```
13
-
14
- ### Chat Completion:
15
- Use the same format for the messages as you would for the [official OpenAI API](https://platform.openai.com/docs/guides/chat/introduction).
16
- ```python
17
- messages = [
18
- {"role": "system", "content": "You are a helpful assistant."},
19
- {"role": "user", "content": "Who won the world series in 2020?"},
20
- {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
21
- {"role": "user", "content": "Where was it played?"}
22
- ]
23
- for chunk in deepai.ChatCompletion.create(messages):
24
- print(chunk, end="", flush=True)
25
- print()
26
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/g4f/.v1/unfinished/chatpdf/__init__.py DELETED
@@ -1,82 +0,0 @@
1
- import requests
2
- import json
3
-
4
- from queue import Queue, Empty
5
- from threading import Thread
6
- from json import loads
7
- from re import findall
8
-
9
-
10
- class Completion:
11
-
12
- def request(prompt: str):
13
- '''TODO: some sort of authentication + upload PDF from URL or local file
14
- Then you should get the atoken and chat ID
15
- '''
16
-
17
- token = "your_token_here"
18
- chat_id = "your_chat_id_here"
19
-
20
- url = "https://chat-pr4yueoqha-ue.a.run.app/"
21
-
22
- payload = json.dumps({
23
- "v": 2,
24
- "chatSession": {
25
- "type": "join",
26
- "chatId": chat_id
27
- },
28
- "history": [
29
- {
30
- "id": "VNsSyJIq_0",
31
- "author": "p_if2GPSfyN8hjDoA7unYe",
32
- "msg": "<start>",
33
- "time": 1682672009270
34
- },
35
- {
36
- "id": "Zk8DRUtx_6",
37
- "author": "uplaceholder",
38
- "msg": prompt,
39
- "time": 1682672181339
40
- }
41
- ]
42
- })
43
-
44
- # TODO: fix headers, use random user-agent, streaming response, etc
45
- headers = {
46
- 'authority': 'chat-pr4yueoqha-ue.a.run.app',
47
- 'accept': '*/*',
48
- 'accept-language': 'en-US,en;q=0.9',
49
- 'atoken': token,
50
- 'content-type': 'application/json',
51
- 'origin': 'https://www.chatpdf.com',
52
- 'referer': 'https://www.chatpdf.com/',
53
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
54
- 'sec-ch-ua-mobile': '?0',
55
- 'sec-ch-ua-platform': '"Windows"',
56
- 'sec-fetch-dest': 'empty',
57
- 'sec-fetch-mode': 'cors',
58
- 'sec-fetch-site': 'cross-site',
59
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
60
- }
61
-
62
- response = requests.request(
63
- "POST", url, headers=headers, data=payload).text
64
- Completion.stream_completed = True
65
- return {'response': response}
66
-
67
- @staticmethod
68
- def create(prompt: str):
69
- Thread(target=Completion.request, args=[prompt]).start()
70
-
71
- while Completion.stream_completed != True or not Completion.message_queue.empty():
72
- try:
73
- message = Completion.message_queue.get(timeout=0.01)
74
- for message in findall(Completion.regex, message):
75
- yield loads(Completion.part1 + message + Completion.part2)['delta']
76
-
77
- except Empty:
78
- pass
79
-
80
- @staticmethod
81
- def handle_stream_response(response):
82
- Completion.message_queue.put(response.decode())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/testing/wewordle/README.md DELETED
@@ -1 +0,0 @@
1
- original from website https://chat-gpt.com/chat https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431, i got api https://wewordle.org/gptapi/v1/web/turbo but it got limit so i try to try reverse they android app and i got api https://wewordle.org/gptapi/v1/android/turbo and just randomize user id to bypass limit
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Everything You Need to Know About X Particles Download for Cinema 4D.md DELETED
@@ -1,31 +0,0 @@
1
-
2
- <h1>X Particles Download: How to Get the Best Plugin for Cinema 4D</h1>
3
- <p>X Particles is a powerful and versatile plugin for Cinema 4D that allows you to create stunning particle effects and simulations. Whether you want to create fire, smoke, fluids, cloth, trails, or abstract art, X Particles has the tools you need to bring your vision to life.</p>
4
- <p>But how can you get X Particles download for your Cinema 4D project? In this article, we will show you the best way to download and install X Particles, as well as some tips and tricks to make the most of this amazing plugin.</p>
5
- <h2>x particles download</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://byltly.com/2uKwQ7">https://byltly.com/2uKwQ7</a></b></p><br /><br />
6
- <h2>How to Download X Particles</h2>
7
- <p>The first step to get X Particles download is to visit the official website of the plugin: <a href="https://insydium.ltd/products/x-particles/">https://insydium.ltd/products/x-particles/</a>. Here you can find all the information about the features, pricing, and system requirements of X Particles.</p>
8
- <p>To download X Particles, you need to purchase a license from the website. You can choose between a perpetual license or a subscription license, depending on your budget and needs. A perpetual license gives you lifetime access to X Particles and all the updates for the current version, while a subscription license gives you access to X Particles and all the updates for as long as you pay the monthly or yearly fee.</p>
9
- <p>Once you have purchased a license, you will receive an email with a link to download X Particles. You can also log in to your account on the website and access the download link from there. The download file is a zip file that contains the plugin files and an installer.</p>
10
- <h2>How to Install X Particles</h2>
11
- <p>The next step to get X Particles download is to install the plugin on your Cinema 4D software. To do this, you need to follow these steps:</p>
12
- <ol>
13
- <li>Unzip the download file and run the installer. The installer will guide you through the installation process and ask you to enter your license key.</li>
14
- <li>Choose the Cinema 4D version that you want to install X Particles on. You can install X Particles on multiple versions of Cinema 4D if you have them on your computer.</li>
15
- <li>Choose the location where you want to install X Particles. The default location is the plugins folder of your Cinema 4D installation.</li>
16
- <li>Click on Install and wait for the installation to finish.</li>
17
- <li>Restart Cinema 4D and check if X Particles is available in your plugins menu.</li>
18
- </ol>
19
- <p>Congratulations! You have successfully installed X Particles on your Cinema 4D software. Now you can start creating amazing particle effects and simulations with X Particles.</p>
20
- <h2>How to Use X Particles</h2>
21
- <p>X Particles is a very intuitive and user-friendly plugin that lets you create particle effects and simulations with ease. You can use X Particles in two ways: by using the built-in presets or by creating your own custom setups.</p>
22
- <p>The built-in presets are ready-made particle effects that you can apply to any object or scene in Cinema 4D. You can find them in the content browser of Cinema 4D under the X Particles folder. There are hundreds of presets available for different types of effects, such as fire, smoke, fluids, cloth, trails, and more. You can simply drag and drop a preset onto your object or scene and adjust the parameters as you like.</p>
23
- <p>The custom setups are particle effects that you can create from scratch using the various tools and modifiers of X Particles. You can find them in the objects menu of Cinema 4D under the X-Particles menu. There are four main types of objects that you can use to create custom setups: emitters, generators, modifiers, and questions & actions.</p>
24
- <p></p>
25
- <ul>
26
- <li>Emitters are objects that emit particles from a source point or area. You can control the number, size, shape, color, speed, direction, and lifespan of the particles using the emitter settings.</li>
27
- <li>Generators are objects that create geometry from particles. You can use generators to create meshes, splines, trails, or sprites from particles.</li>
28
- <li>Modifiers are objects that affect particles in various ways. You can use modifiers to add forces, collisions, deformations, dynamics, fields, or shaders to particles.</li>
29
- <li>Questions & actions are objects that control the behavior of particles based on certain</p> ddb901b051<br />
30
- <br />
31
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Nancy Drew Games Full Version The History and Legacy of Nancy Drew.md DELETED
@@ -1,132 +0,0 @@
1
- <br />
2
- <h1>Free Download Nancy Drew Games Full Version</h1>
3
- <p>If you are a fan of mystery, adventure, and puzzle-solving games, you might have heard of Nancy Drew games. These are a series of video games based on the popular books by Carolyn Keene, featuring the teenage detective Nancy Drew. In this article, we will tell you what are Nancy Drew games, why are they popular, and how to download them for free. We will also give you a list of the top 5 Nancy Drew games to play in 2021.</p>
4
- <h2>Free Download Nancy Drew Games Full Version</h2><br /><p><b><b>DOWNLOAD</b> &harr; <a href="https://byltly.com/2uKx6N">https://byltly.com/2uKx6N</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What are Nancy Drew games?</h3>
7
- <p>Nancy Drew games are point-and-click adventure games that put you in the shoes of Nancy Drew, a young sleuth who travels around the world and solves various mysteries. The games are developed by Her Interactive and have been released since 1998. There are currently 33 games in the main series, plus some spin-offs and remakes. The games are suitable for players of all ages and genders, as they offer different difficulty levels and modes.</p>
8
- <h3>Why are they popular?</h3>
9
- <p>Nancy Drew games are popular because they combine engaging stories, immersive environments, challenging puzzles, and educational elements. The games let you explore different cultures, locations, and historical periods, while learning about topics such as art, science, literature, and more. The games also have a loyal fan base that enjoys the characters, the humor, and the references to the original books. The games have won several awards and have been praised by critics and players alike.</p>
10
- <h3>How to download them for free?</h3>
11
- <p>If you want to download Nancy Drew games for free, you have a few options. One is to use a torrent site or a file-sharing platform that hosts the game files. However, this is not recommended, as it is illegal and risky. You might end up downloading viruses or malware that can harm your computer or compromise your personal data. Another option is to use a free trial or a demo version of the game. This way, you can play the game for a limited time or with limited features, without paying anything. However, this is also not ideal, as you might miss out on some content or experience glitches or bugs. The best option is to use a legitimate site that offers free downloads of Nancy Drew games. For example, you can use GameTop.com, which is a safe and reliable site that has a large collection of Nancy Drew games that you can download for free. You can choose from different genres and themes, such as mystery, horror, romance, and more. You can also enjoy high-quality graphics and sound effects, as well as full compatibility with your Windows PC.</p>
12
- <h2>Top 5 Nancy Drew Games to Play in 2021</h2>
13
- <p>Now that you know how to download Nancy Drew games for free, you might be wondering which ones to play first. To help you decide, we have compiled a list of the top 5 Nancy Drew games to play in 2021. These are based on our personal preferences and opinions, as well as on user ratings and reviews.</p>
14
- <h3>The Silent Spy</h3>
15
- <h4>Plot</h4>
16
- <p>In this game, you play as Nancy Drew who travels to Scotland to investigate the mysterious death of her mother, who was a spy. You will have to uncover secrets from your mother's past, while avoiding danger from an unknown enemy. You will also have to deal with your father's disapproval and your boyfriend's jealousy.</p>
17
- <h4>Features</h4>
18
- <ul>
19
- <li>A thrilling story that mixes espionage and family drama.</li>
20
- <li>A beautiful setting that showcases the Scottish culture and landscape.</li>
21
- <li>A variety of puzzles that test your logic, memory, and creativity.</li>
22
- <li>A choice-based system that affects the outcome of the game.</li>
23
- <li>A spy gadget kit that includes a phone, a camera, a lock pick, and more.</li>
24
- </ul>
25
- <h3>The Haunting of Castle Malloy</h3>
26
- <h4>Plot</h4>
27
- <p>In this game, you play as Nancy Drew who travels to Ireland to attend the wedding of her friend Kyler Mallory. However, things go wrong when the groom disappears on the eve of the wedding. You will have to find out what happened to him, while exploring the haunted castle and its surroundings. You will also have to deal with legends of banshees, fairies, and leprechauns.</p>
28
- <h4>Features</h4>
29
- <ul>
30
- <li>A spooky story that mixes mystery and folklore.</li>
31
- <li>A stunning setting that showcases the Irish culture and landscape.</li>
32
- <li>A variety of puzzles that test your observation, deduction, and coordination.</li>
33
- <li>A jet pack that lets you fly around the castle grounds.</li>
34
- <li>A sheep-shearing mini-game that is fun and challenging.</li>
35
- </ul>
36
- <h3>Ghost of Thornton Hall</h3>
37
- <h4>Plot</h4>
38
- <h4>Features</h4>
39
- <ul>
40
- <li>A creepy story that mixes horror and family drama.</li>
41
- <li>A gloomy setting that showcases the Southern Gothic style and atmosphere.</li>
42
- <li>A variety of puzzles that test your courage, intuition, and skill.</li>
43
- <li>A ghost-hunting device that lets you communicate with the spirits.</li>
44
- <li>A phone charm that changes color depending on your mood.</li>
45
- </ul>
46
- <h3>The Captive Curse</h3>
47
- <h4>Plot</h4>
48
- <p>In this game, you play as Nancy Drew who travels to Germany to investigate a series of attacks at a castle. You will have to find out who or what is behind the attacks, while staying at the castle as a guest. You will also have to deal with legends of a monster, a curse, and a hidden treasure.</p>
49
- <h4>Features</h4>
50
- <ul>
51
- <li>A captivating story that mixes fantasy and history.</li>
52
- <li>A magnificent setting that showcases the German culture and landscape.</li>
53
- <li>A variety of puzzles that test your knowledge, logic, and patience.</li>
54
- <li>A board game that lets you play against other characters.</li>
55
- <li>A costume trunk that lets you dress up as different characters.</li>
56
- </ul>
57
- <h3>Shadow at the Water's Edge</h3>
58
- <h4>Plot</h4>
59
- <p>In this game, you play as Nancy Drew who travels to Japan to teach English at a school. You will have to find out why the students are scared of staying at a nearby inn, while staying there yourself. You will also have to deal with the culture shock, the language barrier, and the secrets of your host family.</p>
60
- <p>How to get Nancy Drew games for free on PC<br />
61
- Nancy Drew mystery games free download full version<br />
62
- Download Nancy Drew games for Mac free<br />
63
- Best site to download Nancy Drew games for free<br />
64
- Nancy Drew games free online no download<br />
65
- Free Nancy Drew games download for Windows 10<br />
66
- Nancy Drew games download free full version torrent<br />
67
- Nancy Drew games free trial download<br />
68
- Download Nancy Drew games for Android free<br />
69
- Nancy Drew games free download full version crack<br />
70
- Nancy Drew games collection free download<br />
71
- Nancy Drew games free download full version iso<br />
72
- Download Nancy Drew games for iPad free<br />
73
- Nancy Drew games free download full version rar<br />
74
- Nancy Drew games free download full version mega<br />
75
- Nancy Drew games free download full version zip<br />
76
- Download Nancy Drew games for iPhone free<br />
77
- Nancy Drew games free download full version no survey<br />
78
- Nancy Drew games free download full version mediafire<br />
79
- Nancy Drew games free download full version utorrent<br />
80
- Download all Nancy Drew games for free<br />
81
- Nancy Drew games free download full version highly compressed<br />
82
- Nancy Drew games free download full version direct link<br />
83
- Download Nancy Drew games for Kindle Fire free<br />
84
- Nancy Drew games free download full version no virus<br />
85
- Nancy Drew games free download full version with key<br />
86
- Download old Nancy Drew games for free<br />
87
- Nancy Drew games free download full version skidrow<br />
88
- Nancy Drew games free download full version no password<br />
89
- Download new Nancy Drew games for free<br />
90
- Nancy Drew hidden object games free download full version<br />
91
- Nancy Drew games free download full version repack<br />
92
- Download classic Nancy Drew games for free<br />
93
- Nancy Drew adventure games free download full version<br />
94
- Nancy Drew detective games free download full version<br />
95
- Download latest Nancy Drew games for free<br />
96
- Nancy Drew puzzle games free download full version<br />
97
- Nancy Drew interactive games free download full version<br />
98
- Download original Nancy Drew games for free<br />
99
- Nancy Drew point and click games free download full version<br />
100
- Download complete Nancy Drew games for free<br />
101
- Nancy Drew strategy games free download full version<br />
102
- Download best Nancy Drew games for free<br />
103
- Nancy Drew horror games free download full version<br />
104
- Download rare Nancy Drew games for free<br />
105
- Nancy Drew mystery stories game books pdf ebook epub mobi kindle azw3 docx txt lit rtf djvu fb2 html xhtml odt prc pdb chm cbr cbz epub3 kf8 azw tcr lrf ibooks ibook pdb pml rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2 tr3 pdb pmlz rb snb tpz tr2</p>
106
- <h4>Features</h4>
107
- <ul>
108
- <li>A chilling story that mixes mystery and culture.</li>
109
- <li>A colorful setting that showcases the Japanese culture and landscape.</li>
110
- <li>A variety of puzzles that test your memory, math, and origami skills.</li>
111
- <li>A digital camera that lets you take pictures and edit them.</li>
112
- <li>A bento box that lets you make your own lunch.</li>
113
- </ul>
114
- <h2>Conclusion</h2>
115
- <p>Nancy Drew games are a great way to enjoy mystery, adventure, and puzzle-solving games. They offer engaging stories, immersive environments, challenging puzzles, and educational elements. They are suitable for players of all ages and genders, as they offer different difficulty levels and modes. You can download them for free from GameTop.com, which is a safe and reliable site that has a large collection of Nancy Drew games. You can also try out some of the top 5 Nancy Drew games to play in 2021, such as The Silent Spy, The Haunting of Castle Malloy, Ghost of Thornton Hall, The Captive Curse, and Shadow at the Water's Edge. We hope you have fun playing these games and solving these mysteries!</p>
116
- <h2>FAQs</h2>
117
- <p>Here are some frequently asked questions about Nancy Drew games:</p>
118
- <ol>
119
- <li>Q: How long does it take to finish a Nancy Drew game?</li>
120
- <li>A: It depends on the game, the difficulty level, and your playing style. On average, it takes about 10 hours to finish a Nancy Drew game.</li>
121
- <li>Q: Can I play Nancy Drew games on my Mac or mobile device?</li>
122
- <li>A: Some Nancy Drew games are compatible with Mac or mobile devices, but not all of them. You can check the system requirements for each game on the official website or on GameTop.com.</li>
123
- <li>Q: Can I play Nancy Drew games with my friends or family?</li>
124
- <li>A: Yes, you can play Nancy Drew games with your friends or family. Some games have a multiplayer mode that lets you cooperate or compete with other players online or offline. You can also share your progress and achievements with other players on social media or on the official forum.</li>
125
- <li>Q: What is the order of the Nancy Drew games?</li>
126
- <li>A: The order of the Nancy Drew games is based on their release date. The first game in the main series is Secrets Can Kill (1998), and the latest game is Midnight in Salem (2019). You can find the complete list of the Nancy Drew games on Wikipedia or on GameTop.com.</li>
127
- <li>Q: What is the best Nancy Drew game?</li>
128
- <li>A: There is no definitive answer to this question, as different players might have different preferences and opinions. However, some of the most popular and highly rated Nancy Drew games are The Final Scene (2001), Curse of Blackmoor Manor (2004), Last Train to Blue Moon Canyon (2005), Warnings at Waverly Academy (2009), and Sea of Darkness (2015).</li>
129
- </ol>
130
- </p> 0a6ba089eb<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Disney Characters 3d Models Free Download Maya.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>disney characters 3d models free download maya</h2><br /><p><b><b>Download File</b> ->>->>->> <a href="https://imgfil.com/2uxZ25">https://imgfil.com/2uxZ25</a></b></p><br /><br />
2
- <br />
3
- 3D movie character models download , free movie character 3d models and 3d objects for computer graphics ... Li Shang from Disney Mulan 3d preview. 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Evermotion - 3D People V.1 - C4D.rar.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Evermotion - 3D People V.1 - C4D.rar: A Review of the 3D Models Collection for Cinema 4D</h1>
3
- <p>Are you looking for realistic and high-quality 3D models of people for your Cinema 4D projects? Do you want to create stunning and dynamic scenes with 3D human characters? Do you want to save time and money by using ready-made and shadered models of people? If you answered yes to any of these questions, then you may be interested in Evermotion - 3D People V.1 - C4D.rar. This is a collection of 50 highly detailed and shadered models of people for Cinema 4D. In this article, we will review the features, benefits and drawbacks of Evermotion - 3D People V.1 - C4D.rar.</p>
4
- <h2>Evermotion - 3D People V.1 - C4D.rar</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734; <a href="https://imgfil.com/2uy0R5">https://imgfil.com/2uy0R5</a></b></p><br /><br />
5
- <h2>What is Evermotion - 3D People V.1 - C4D.rar?</h2>
6
- <p>Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. Evermotion - 3D People V.1 - C4D.rar contains 50 models of different ages, genders, ethnicities and poses. Each model is shadered and ready to render in Cinema 4D. The models are compatible with Scanline, V-Ray and Mental Ray render engines. The models are also optimized for low polygon count and fast rendering.</p>
7
- <h2>What are the features of Evermotion - 3D People V.1 - C4D.rar?</h2>
8
- <p>Evermotion - 3D People V.1 - C4D.rar has many features that make it a valuable and versatile collection of 3D models of people for Cinema 4D. Some of them are:</p>
9
- <ul>
10
- <li>It contains 50 highly detailed and shadered models of people for Cinema 4D.</li>
11
- <li>It covers different ages, genders, ethnicities and poses.</li>
12
- <li>It is shadered and ready to render in Cinema 4D.</li>
13
- <li>It is compatible with Scanline, V-Ray and Mental Ray render engines.</li>
14
- <li>It is optimized for low polygon count and fast rendering.</li>
15
- <li>It includes a catalog with previews and information about each model.</li>
16
- <li>It includes a download link with a .rar file that contains the models in .c4d format.</li>
17
- </ul>
18
- <h2>What are the benefits of Evermotion - 3D People V.1 - C4D.rar?</h2>
19
- <p>Evermotion - 3D People V.1 - C4D.rar has many benefits that make it a worthwhile investment for Cinema 4D users. Some of them are:</p>
20
- <ul>
21
- <li>It can save you time by using ready-made and shadered models of people for your Cinema 4D projects.</li>
22
- <li>It can save you money by using high-quality models of people that are cheaper than hiring or creating your own.</li>
23
- <li>It can enhance your creativity by giving you a variety of models of people to choose from and combine in your scenes.</li>
24
- <li>It can improve your realism by using realistic and natural models of people that match your scenes and lighting.</li>
25
- <li>It can increase your performance by using optimized models of people that do not slow down your rendering or editing.</li>
26
- </ul>
27
- <h2>What are the drawbacks of Evermotion - 3D People V.1 - C4D.rar?</h2>
28
- <p>Evermotion - 3D People V.1 - C4D.rar has some drawbacks that you should be aware of before buying it. Some of them are:</p>
29
- <p></p>
30
- <ul>
31
- <li>It requires a Cinema 4D software to use it. You cannot use it with other 3D software or applications.</li>
32
- <li>It may not suit your specific needs or preferences. You may not find the exact model or pose that you want in the collection.</li>
33
- <li>It may not be updated or supported by Evermotion in the future. You may not get new models or fixes for the existing ones.</li>
34
- <li>It may not be legal or ethical to use it for some purposes or projects. You may need to check the license terms and conditions before using it.</li>
35
- </ul>
36
- <h2>Conclusion</h2>
37
- <p>Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time, money and creativity by using ready-made and realistic models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.</p>
38
- <h2>How to use Evermotion - 3D People V.1 - C4D.rar?</h2>
39
- <p>Using Evermotion - 3D People V.1 - C4D.rar is very easy and simple. You just need to follow these steps:</p>
40
- <ol>
41
- <li>Download Evermotion - 3D People V.1 - C4D.rar from the link provided in this article or from the official website of Evermotion.</li>
42
- <li>Extract the .rar file using a software like WinRAR or 7-Zip.</li>
43
- <li>Open Cinema 4D and create a new project or open an existing one.</li>
44
- <li>Go to File > Merge and browse to the folder where you extracted the .rar file.</li>
45
- <li>Select the model of your choice from the list and click Open.</li>
46
- <li>The model will be imported into your scene with all the shaders and textures applied.</li>
47
- <li>You can adjust the position, rotation, scale and other parameters of the model as you wish.</li>
48
- <li>You can also add lights, cameras, animations and other elements to your scene.</li>
49
- <li>When you are satisfied with your scene, you can render it using your preferred render engine.</li>
50
- </ol>
51
- <h2>What are the alternatives to Evermotion - 3D People V.1 - C4D.rar?</h2>
52
- <p>If you are not satisfied with Evermotion - 3D People V.1 - C4D.rar or you want to try other collections of 3D models of people for Cinema 4D, you have some alternatives to choose from. Some of them are:</p>
53
- <ul>
54
- <li>Viz-People: This is a company that offers high-quality 3D models of people, cars, furniture and other objects for various 3D software and applications. They have a free non-commercial version of their HDRI collection that contains 10 high-resolution spherical environmental maps.</li>
55
- <li>Dosch Design: This is a company that provides high-quality 3D models, textures, HDRI and sound effects for various 3D software and applications. They have a collection of 3D people that contains over 100 realistic and fully textured models of people in different poses and clothing styles.</li>
56
- <li>Renderpeople: This is a company that specializes in creating realistic and lifelike 3D models of people for various 3D software and applications. They have a collection of 3D people that contains over 5000 models of people in different poses, clothing styles, ethnicities and ages.</li>
57
- </ul>
58
- <h2>Conclusion</h2>
59
- <p>Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time, money and creativity by using ready-made and realistic models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.</p>
60
- <h2>How to download Evermotion - 3D People V.1 - C4D.rar?</h2>
61
- <p>If you want to download Evermotion - 3D People V.1 - C4D.rar, you have two options. You can either buy it from the official website of Evermotion or you can download it from a third-party link provided in this article. Both options have their advantages and disadvantages. Let's see them in detail.</p>
62
- <p>If you buy Evermotion - 3D People V.1 - C4D.rar from the official website of Evermotion, you will get the following benefits:</p>
63
- <ul>
64
- <li>You will get the original and updated version of the collection.</li>
65
- <li>You will get a secure and easy payment method.</li>
66
- <li>You will get a download link with a .rar file that contains the models in .c4d format.</li>
67
- <li>You will get access to the catalog with previews and information about each model.</li>
68
- <li>You will get support and updates from Evermotion in case of any issues or questions.</li>
69
- </ul>
70
- <p>However, buying Evermotion - 3D People V.1 - C4D.rar from the official website of Evermotion also has some drawbacks:</p>
71
- <ul>
72
- <li>You will have to pay a certain amount of money to buy the collection.</li>
73
- <li>You will have to register an account on Evermotion website and provide your personal information.</li>
74
- <li>You will have to agree to the license terms and conditions of Evermotion before using the collection.</li>
75
- </ul>
76
- <p>If you download Evermotion - 3D People V.1 - C4D.rar from a third-party link provided in this article, you will get the following benefits:</p>
77
- <ul>
78
- <li>You will get the collection for free without paying any money.</li>
79
- <li>You will get the collection instantly without waiting for any delivery.</li>
80
- <li>You will not have to register an account or provide any personal information on any website.</li>
81
- </ul>
82
- <p>However, downloading Evermotion - 3D People V.1 - C4D.rar from a third-party link also has some drawbacks:</p>
83
- <ul>
84
- <li>You may not get the original or updated version of the collection.</li>
85
- <li>You may not get access to the catalog or any support or updates from Evermotion.</li>
86
- <li>You may get a virus or malware along with the collection that can harm your computer or steal your data.</li>
87
- <li>You may violate some laws or terms of service by downloading pirated or illegal content.</li>
88
- </ul>
89
- <h2>Is Evermotion - 3D People V.1 - C4D.rar worth it?</h2>
90
- <p>The answer to this question depends on your needs and preferences. If you are looking for realistic and high-quality 3D models of people for your Cinema 4D projects, Evermotion - 3D People V.1 - C4D.rar may be worth it. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time and money by using ready-made and shadered models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.</p>
91
- <h2>Conclusion</h2>
92
- <p>Evermotion - 3D People V.1 - C4D.rar is a collection of 50 highly detailed and shadered models of people for Cinema 4D. It is part of the Evermotion product range, which is a well-known company that produces high-quality 3D models and assets for architectural visualizations, animations, games and more. It has many features, benefits and drawbacks that you should consider before buying it. It can save you time, money and creativity by using ready-made and realistic models of people for your Cinema 4D projects. However, it may also not suit your specific needs or preferences, not be updated or supported by Evermotion in the future, or not be legal or ethical to use it for some purposes or projects. You can either buy it from the official website of Evermotion or download it from a third-party link provided in this article. Both options have their advantages and disadvantages. You should weigh them carefully and decide what is best for you and your Cinema 4D projects.</p> 3cee63e6c2<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/Dockerfile DELETED
@@ -1,38 +0,0 @@
1
- # Use an official Python base image from the Docker Hub
2
- FROM python:3.10-slim
3
-
4
- # Install git
5
- RUN apt-get -y update
6
- RUN apt-get -y install git chromium-driver
7
-
8
- # Install Xvfb and other dependencies for headless browser testing
9
- RUN apt-get update \
10
- && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates
11
-
12
- # Install Firefox / Chromium
13
- RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
14
- && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
15
- && apt-get update \
16
- && apt-get install -y chromium firefox-esr
17
-
18
- # Set environment variables
19
- ENV PIP_NO_CACHE_DIR=yes \
20
- PYTHONUNBUFFERED=1 \
21
- PYTHONDONTWRITEBYTECODE=1
22
-
23
- # Create a non-root user and set permissions
24
- RUN useradd --create-home appuser
25
- WORKDIR /home/appuser
26
- RUN chown appuser:appuser /home/appuser
27
- USER appuser
28
-
29
- # Copy the requirements.txt file and install the requirements
30
- COPY --chown=appuser:appuser requirements.txt .
31
- RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
32
- pip install --no-cache-dir --user -r requirements.txt
33
-
34
- # Copy the application files
35
- COPY --chown=appuser:appuser autogpt/ ./autogpt
36
-
37
- # Set the entrypoint
38
- ENTRYPOINT ["python", "-m", "autogpt"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Emsa-Register-Dll-Tool-Crack.md DELETED
@@ -1,84 +0,0 @@
1
- ## Emsa Register Dll Tool Crack
2
-
3
-
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
- **Click Here 🌟 [https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2txiC0&sa=D&sntz=1&usg=AOvVaw2tOYkFopTq9fhcDyUqgUmE](https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2txiC0&sa=D&sntz=1&usg=AOvVaw2tOYkFopTq9fhcDyUqgUmE)**
12
-
13
-
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
- # How to Use EMSA Register DLL Tool to Register and Unregister ActiveX Files
26
-
27
-
28
-
29
- EMSA Register DLL Tool is a free and multipurpose tool for Windows that allows you to register and unregister ActiveX files, such as dll, ocx and exe files. ActiveX files are components that enable various functions and features in Windows applications. Sometimes, you may need to manually register or unregister these files if they are corrupted, missing or causing errors. In this article, we will show you how to use EMSA Register DLL Tool to perform these tasks easily and quickly.
30
-
31
-
32
-
33
- ## Download and Install EMSA Register DLL Tool
34
-
35
-
36
-
37
- The first step is to download and install EMSA Register DLL Tool from the official website of Emsai Industrial[^1^]. The tool is compatible with Windows 98/ME/NT/2000/XP/2003. The installation process is simple and straightforward. Just follow the instructions on the screen and choose the destination folder for the tool.
38
-
39
-
40
-
41
- ## Enable Shell Extensions
42
-
43
-
44
-
45
- One of the features of EMSA Register DLL Tool is that it integrates with the Windows Explorer context menu, which means you can right-click on any ActiveX file and choose to register or unregister it. To enable this feature, you need to enable shell extensions in the tool. To do this, launch the tool and go to the Options & Help tab. Check the box that says "Enable Shell Extensions" and click OK. You may need to restart your computer for the changes to take effect.
46
-
47
-
48
-
49
- ## Register or Unregister ActiveX Files
50
-
51
-
52
-
53
- There are two ways to register or unregister ActiveX files with EMSA Register DLL Tool. One way is to use the tool's GUI, and the other way is to use the Windows Explorer context menu.
54
-
55
-
56
-
57
- To use the tool's GUI, launch the tool and go to the Reg & Tool File Info tab. Click on the Browse button and select the ActiveX file you want to register or unregister. The tool will display detailed information about the file, such as its name, type, version, description, etc. You can also compare two ActiveX files with identical filenames by using the File Comparison tab. To register or unregister the file, click on the appropriate button at the bottom of the window. You will see a confirmation message if the operation is successful.
58
-
59
-
60
-
61
- To use the Windows Explorer context menu, locate the ActiveX file you want to register or unregister in your file system. Right-click on it and select Register or Unregister from the menu. You will see a confirmation message if the operation is successful.
62
-
63
-
64
-
65
- ## Generate an ActiveX Report
66
-
67
-
68
-
69
- Another feature of EMSA Register DLL Tool is that it can generate a report of all ActiveX files in a folder. This can be useful if you want to check the status of multiple files at once. To generate an ActiveX report, right-click on any folder that contains ActiveX files and select ActiveX Report from the menu. The tool will scan the folder for ActiveX files and create a text file (output.txt) containing the report in the same folder. The report will also be opened automatically for viewing. The report will show information such as file name, type, registration status, GUID, etc.
70
-
71
-
72
-
73
- ## Conclusion
74
-
75
-
76
-
77
- EMSA Register DLL Tool is a handy tool for anyone who needs to register or unregister ActiveX files in Windows. It provides extensive information about these files and allows you to perform these tasks easily and quickly. It also integrates with the Windows Explorer context menu for convenience. You can download EMSA Register DLL Tool for free from Emsai Industrial's website[^1^] and try it out yourself.
78
-
79
- 1b8d091108
80
-
81
-
82
-
83
-
84
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py DELETED
@@ -1,16 +0,0 @@
1
- class F0Predictor(object):
2
- def compute_f0(self, wav, p_len):
3
- """
4
- input: wav:[signal_length]
5
- p_len:int
6
- output: f0:[signal_length//hop_length]
7
- """
8
- pass
9
-
10
- def compute_f0_uv(self, wav, p_len):
11
- """
12
- input: wav:[signal_length]
13
- p_len:int
14
- output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
15
- """
16
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/Pix2Pix-Video/style.css DELETED
@@ -1,101 +0,0 @@
1
- #col-container {max-width: 820px; margin-left: auto; margin-right: auto;}
2
- #duplicate-container{
3
- display: flex;
4
- justify-content: space-between;
5
- align-items: center;
6
- line-height: 1em;
7
- flex-direction: row-reverse;
8
- font-size:1em;
9
- }
10
- a, a:hover, a:visited {
11
- text-decoration-line: underline;
12
- font-weight: 600;
13
- color: #1f2937 !important;
14
- }
15
-
16
- .dark a, .dark a:hover, .dark a:visited {
17
- color: #f3f4f6 !important;
18
- }
19
-
20
- .footer {
21
- margin-bottom: 45px;
22
- margin-top: 10px;
23
- text-align: center;
24
- border-bottom: 1px solid #e5e5e5;
25
- }
26
-
27
- .footer>p {
28
- font-size: .8rem!important;
29
- display: inline-block;
30
- padding: 0 10px;
31
- transform: translateY(26px);
32
- background: white;
33
- }
34
- .dark .footer {
35
- border-color: #303030;
36
- }
37
- .dark .footer>p {
38
- background: #0b0f19;
39
- }
40
-
41
- div#may-like-container > p {
42
- font-size: .8em;
43
- margin-bottom: 4px;
44
- }
45
-
46
- .animate-spin {
47
- animation: spin 1s linear infinite;
48
- }
49
-
50
- @keyframes spin {
51
- from {
52
- transform: rotate(0deg);
53
- }
54
- to {
55
- transform: rotate(360deg);
56
- }
57
- }
58
-
59
- #share-btn-container {
60
- display: flex;
61
- padding-left: 0.5rem !important;
62
- padding-right: 0.5rem !important;
63
- background-color: #000000;
64
- justify-content: center;
65
- align-items: center;
66
- border-radius: 9999px !important;
67
- max-width: 13rem;
68
- }
69
-
70
- #share-btn-container:hover {
71
- background-color: #060606;
72
- }
73
-
74
- #share-btn {
75
- all: initial;
76
- color: #ffffff;
77
- font-weight: 600;
78
- cursor:pointer;
79
- font-family: 'IBM Plex Sans', sans-serif;
80
- margin-left: 0.5rem !important;
81
- padding-top: 0.5rem !important;
82
- padding-bottom: 0.5rem !important;
83
- right:0;
84
- }
85
-
86
- #share-btn * {
87
- all: unset;
88
- }
89
-
90
- #share-btn-container div:nth-child(-n+2){
91
- width: auto !important;
92
- min-height: 0px !important;
93
- }
94
-
95
- #share-btn-container .wrap {
96
- display: none !important;
97
- }
98
-
99
- #share-btn-container.hidden {
100
- display: none!important;
101
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/__init__.py DELETED
@@ -1,24 +0,0 @@
1
- from .camera import (Camera, PerspectiveCamera, OrthographicCamera,
2
- IntrinsicsCamera)
3
- from .light import Light, PointLight, DirectionalLight, SpotLight
4
- from .sampler import Sampler
5
- from .texture import Texture
6
- from .material import Material, MetallicRoughnessMaterial
7
- from .primitive import Primitive
8
- from .mesh import Mesh
9
- from .node import Node
10
- from .scene import Scene
11
- from .renderer import Renderer
12
- from .viewer import Viewer
13
- from .offscreen import OffscreenRenderer
14
- from .version import __version__
15
- from .constants import RenderFlags, TextAlign, GLTF
16
-
17
- __all__ = [
18
- 'Camera', 'PerspectiveCamera', 'OrthographicCamera', 'IntrinsicsCamera',
19
- 'Light', 'PointLight', 'DirectionalLight', 'SpotLight',
20
- 'Sampler', 'Texture', 'Material', 'MetallicRoughnessMaterial',
21
- 'Primitive', 'Mesh', 'Node', 'Scene', 'Renderer', 'Viewer',
22
- 'OffscreenRenderer', '__version__', 'RenderFlags', 'TextAlign',
23
- 'GLTF'
24
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/model.py DELETED
@@ -1,295 +0,0 @@
1
- import functools
2
- import torch.nn as nn
3
-
4
-
5
- class ActNorm(nn.Module):
6
- def __init__(self, num_features, logdet=False, affine=True,
7
- allow_reverse_init=False):
8
- assert affine
9
- super().__init__()
10
- self.logdet = logdet
11
- self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
12
- self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
13
- self.allow_reverse_init = allow_reverse_init
14
-
15
- self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
16
-
17
- def initialize(self, input):
18
- with torch.no_grad():
19
- flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
20
- mean = (
21
- flatten.mean(1)
22
- .unsqueeze(1)
23
- .unsqueeze(2)
24
- .unsqueeze(3)
25
- .permute(1, 0, 2, 3)
26
- )
27
- std = (
28
- flatten.std(1)
29
- .unsqueeze(1)
30
- .unsqueeze(2)
31
- .unsqueeze(3)
32
- .permute(1, 0, 2, 3)
33
- )
34
-
35
- self.loc.data.copy_(-mean)
36
- self.scale.data.copy_(1 / (std + 1e-6))
37
-
38
- def forward(self, input, reverse=False):
39
- if reverse:
40
- return self.reverse(input)
41
- if len(input.shape) == 2:
42
- input = input[:, :, None, None]
43
- squeeze = True
44
- else:
45
- squeeze = False
46
-
47
- _, _, height, width = input.shape
48
-
49
- if self.training and self.initialized.item() == 0:
50
- self.initialize(input)
51
- self.initialized.fill_(1)
52
-
53
- h = self.scale * (input + self.loc)
54
-
55
- if squeeze:
56
- h = h.squeeze(-1).squeeze(-1)
57
-
58
- if self.logdet:
59
- log_abs = torch.log(torch.abs(self.scale))
60
- logdet = height * width * torch.sum(log_abs)
61
- logdet = logdet * torch.ones(input.shape[0]).to(input)
62
- return h, logdet
63
-
64
- return h
65
-
66
- def reverse(self, output):
67
- if self.training and self.initialized.item() == 0:
68
- if not self.allow_reverse_init:
69
- raise RuntimeError(
70
- "Initializing ActNorm in reverse direction is "
71
- "disabled by default. Use allow_reverse_init=True to enable."
72
- )
73
- else:
74
- self.initialize(output)
75
- self.initialized.fill_(1)
76
-
77
- if len(output.shape) == 2:
78
- output = output[:, :, None, None]
79
- squeeze = True
80
- else:
81
- squeeze = False
82
-
83
- h = output / self.scale - self.loc
84
-
85
- if squeeze:
86
- h = h.squeeze(-1).squeeze(-1)
87
- return h
88
-
89
- def weights_init(m):
90
- classname = m.__class__.__name__
91
- if classname.find('Conv') != -1:
92
- nn.init.normal_(m.weight.data, 0.0, 0.02)
93
- elif classname.find('BatchNorm') != -1:
94
- nn.init.normal_(m.weight.data, 1.0, 0.02)
95
- nn.init.constant_(m.bias.data, 0)
96
-
97
-
98
- class NLayerDiscriminator(nn.Module):
99
- """Defines a PatchGAN discriminator as in Pix2Pix
100
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
101
- """
102
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
103
- """Construct a PatchGAN discriminator
104
- Parameters:
105
- input_nc (int) -- the number of channels in input images
106
- ndf (int) -- the number of filters in the last conv layer
107
- n_layers (int) -- the number of conv layers in the discriminator
108
- norm_layer -- normalization layer
109
- """
110
- super(NLayerDiscriminator, self).__init__()
111
- if not use_actnorm:
112
- norm_layer = nn.BatchNorm2d
113
- else:
114
- norm_layer = ActNorm
115
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
116
- use_bias = norm_layer.func != nn.BatchNorm2d
117
- else:
118
- use_bias = norm_layer != nn.BatchNorm2d
119
-
120
- kw = 4
121
- padw = 1
122
- sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
123
- nf_mult = 1
124
- nf_mult_prev = 1
125
- for n in range(1, n_layers): # gradually increase the number of filters
126
- nf_mult_prev = nf_mult
127
- nf_mult = min(2 ** n, 8)
128
- sequence += [
129
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
130
- norm_layer(ndf * nf_mult),
131
- nn.LeakyReLU(0.2, True)
132
- ]
133
-
134
- nf_mult_prev = nf_mult
135
- nf_mult = min(2 ** n_layers, 8)
136
- sequence += [
137
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
138
- norm_layer(ndf * nf_mult),
139
- nn.LeakyReLU(0.2, True)
140
- ]
141
- # output 1 channel prediction map
142
- sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
143
- self.main = nn.Sequential(*sequence)
144
-
145
- def forward(self, input):
146
- """Standard forward."""
147
- return self.main(input)
148
-
149
- class NLayerDiscriminator1dFeats(NLayerDiscriminator):
150
- """Defines a PatchGAN discriminator as in Pix2Pix
151
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
152
- """
153
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
154
- """Construct a PatchGAN discriminator
155
- Parameters:
156
- input_nc (int) -- the number of channels in input feats
157
- ndf (int) -- the number of filters in the last conv layer
158
- n_layers (int) -- the number of conv layers in the discriminator
159
- norm_layer -- normalization layer
160
- """
161
- super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
162
-
163
- if not use_actnorm:
164
- norm_layer = nn.BatchNorm1d
165
- else:
166
- norm_layer = ActNorm
167
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
168
- use_bias = norm_layer.func != nn.BatchNorm1d
169
- else:
170
- use_bias = norm_layer != nn.BatchNorm1d
171
-
172
- kw = 4
173
- padw = 1
174
- sequence = [nn.Conv1d(input_nc, input_nc//2, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
175
- nf_mult = input_nc//2
176
- nf_mult_prev = 1
177
- for n in range(1, n_layers): # gradually decrease the number of filters
178
- nf_mult_prev = nf_mult
179
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
180
- sequence += [
181
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
182
- norm_layer(nf_mult),
183
- nn.LeakyReLU(0.2, True)
184
- ]
185
-
186
- nf_mult_prev = nf_mult
187
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
188
- sequence += [
189
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
190
- norm_layer(nf_mult),
191
- nn.LeakyReLU(0.2, True)
192
- ]
193
- nf_mult_prev = nf_mult
194
- nf_mult = max(nf_mult_prev // (2 ** n), 8)
195
- sequence += [
196
- nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
197
- norm_layer(nf_mult),
198
- nn.LeakyReLU(0.2, True)
199
- ]
200
- # output 1 channel prediction map
201
- sequence += [nn.Conv1d(nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
202
- self.main = nn.Sequential(*sequence)
203
-
204
-
205
- class NLayerDiscriminator1dSpecs(NLayerDiscriminator):
206
- """Defines a PatchGAN discriminator as in Pix2Pix
207
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
208
- """
209
- def __init__(self, input_nc=80, ndf=64, n_layers=3, use_actnorm=False):
210
- """Construct a PatchGAN discriminator
211
- Parameters:
212
- input_nc (int) -- the number of channels in input specs
213
- ndf (int) -- the number of filters in the last conv layer
214
- n_layers (int) -- the number of conv layers in the discriminator
215
- norm_layer -- normalization layer
216
- """
217
- super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
218
-
219
- if not use_actnorm:
220
- norm_layer = nn.BatchNorm1d
221
- else:
222
- norm_layer = ActNorm
223
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
224
- use_bias = norm_layer.func != nn.BatchNorm1d
225
- else:
226
- use_bias = norm_layer != nn.BatchNorm1d
227
-
228
- kw = 4
229
- padw = 1
230
- sequence = [nn.Conv1d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
231
- nf_mult = 1
232
- nf_mult_prev = 1
233
- for n in range(1, n_layers): # gradually decrease the number of filters
234
- nf_mult_prev = nf_mult
235
- nf_mult = min(2 ** n, 8)
236
- sequence += [
237
- nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
238
- norm_layer(ndf * nf_mult),
239
- nn.LeakyReLU(0.2, True)
240
- ]
241
-
242
- nf_mult_prev = nf_mult
243
- nf_mult = min(2 ** n_layers, 8)
244
- sequence += [
245
- nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
246
- norm_layer(ndf * nf_mult),
247
- nn.LeakyReLU(0.2, True)
248
- ]
249
- # output 1 channel prediction map
250
- sequence += [nn.Conv1d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
251
- self.main = nn.Sequential(*sequence)
252
-
253
- def forward(self, input):
254
- """Standard forward."""
255
- # (B, C, L)
256
- input = input.squeeze(1)
257
- input = self.main(input)
258
- return input
259
-
260
-
261
- if __name__ == '__main__':
262
- import torch
263
-
264
- ## FEATURES
265
- disc_in_channels = 2048
266
- disc_num_layers = 2
267
- use_actnorm = False
268
- disc_ndf = 64
269
- discriminator = NLayerDiscriminator1dFeats(input_nc=disc_in_channels, n_layers=disc_num_layers,
270
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
271
- inputs = torch.rand((6, 2048, 212))
272
- outputs = discriminator(inputs)
273
- print(outputs.shape)
274
-
275
- ## AUDIO
276
- disc_in_channels = 1
277
- disc_num_layers = 3
278
- use_actnorm = False
279
- disc_ndf = 64
280
- discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
281
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
282
- inputs = torch.rand((6, 1, 80, 848))
283
- outputs = discriminator(inputs)
284
- print(outputs.shape)
285
-
286
- ## IMAGE
287
- disc_in_channels = 3
288
- disc_num_layers = 3
289
- use_actnorm = False
290
- disc_ndf = 64
291
- discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
292
- use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
293
- inputs = torch.rand((6, 3, 256, 256))
294
- outputs = discriminator(inputs)
295
- print(outputs.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-Bench_Leaderboard/constants.py DELETED
@@ -1,87 +0,0 @@
1
- # this is .py for store constants
2
- MODEL_INFO = ["Model Type", "Model", "Language Model"]
3
- TASK_INFO = ["Scene Understanding", "Instance Identity", "Instance Attributes", "Instance Localization", "Instance Counting", "Spatial Relation", "Instance Interaction", "Visual Reasoning", "Text Recognition", "Avg. Img", "Action Recognition", "Action Prediction", "Procedure Understanding", "Avg. Video", "Avg. All"]
4
- TASK_INFO_v2 = ["Avg. All", "Avg. Img", "Avg. Video", "Scene Understanding", "Instance Identity", "Instance Attributes", "Instance Localization", "Instance Counting", "Spatial Relation", "Instance Interaction", "Visual Reasoning", "Text Recognition", "Action Recognition", "Action Prediction", "Procedure Understanding"]
5
-
6
- AVG_INFO = ["Avg. All", "Avg. Img", "Avg. Video"]
7
- DATA_TITILE_TYPE = ["markdown", "markdown", "markdown", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number"]
8
- CSV_DIR = "./file/result.csv"
9
-
10
- # COLUMN_NAMES = MODEL_INFO + TASK_INFO
11
- COLUMN_NAMES = MODEL_INFO + TASK_INFO_v2
12
-
13
- DATA_NUM = [3158, 1831, 4649, 978, 2447, 657, 97, 331, 85, 1740, 2077, 1192]
14
-
15
- UNTUNED_MODEL_RESULTS = '''LLM & Flan-T5 & Flan-T5-XL &23.0 &29.0 &32.8 &31.8 &20.5 &31.8 &33.0 &18.2 &19.4 &23.2 &34.9 &25.4 \\
16
- LLM & Vicuna & Vicuna-7B &23.4 &30.7 &29.7 &30.9 &30.8 &28.6 &29.8 &18.5 &13.4 &27.3 &34.5 &23.8 \\
17
- LLM & LLaMA & LLaMA-7B &26.3 &27.4 &26.2 &28.3 &25.1 &28.8 &19.2 &37.0 & 9.0 &33.0 &23.1 &26.2 \\
18
- ImageLLM & BLIP-2 & Flan-T5-XL &59.1 &53.9 &49.2 &42.3 &43.2 &36.7 &55.7 &45.6 &25.9 &32.6 &47.5 &24.0 \\
19
- ImageLLM & InstructBLIP & Flan-T5-XL &60.3 &58.5 &63.4 &40.6 &58.4 &38.7 &51.6 &45.9 &25.9 &33.1 &49.1 &27.1 \\
20
- ImageLLM & InstructBLIP-Vicuna & Vicuna-7B &60.2 &58.9 &65.6 &43.6 &57.2 &40.3 &52.6 &47.7 &43.5 &34.5 &49.6 &23.1 \\
21
- ImageLLM & LLaVA & LLaMA-7B &42.7 &34.9 &33.5 &28.4 &41.9 &30.8 &27.8 &46.8 &27.7 &29.7 &21.4 &19.1 \\
22
- ImageLLM & MiniGPT-4 & Flan-T5-XL &56.3 &49.2 &45.8 &37.9 &45.3 &32.6 &47.4 &57.1 &11.8 &38.2 &24.5 &27.1 \\
23
- ImageLLM & VPGTrans & LLaMA-7B &51.9 &44.1 &39.9 &36.1 &33.7 &36.4 &32.0 &53.2 &30.6 &39.5 &24.3 &31.9 \\
24
- ImageLLM & MultiModal-GPT & LLaMA-7B &43.6 &37.9 &31.5 &30.8 &27.3 &30.1 &29.9 &51.4 &18.8 &36.9 &25.8 &24.0 \\
25
- ImageLLM & Otter & LLaMA-7B &44.9 &38.6 &32.2 &30.9 &26.3 &31.8 &32.0 &51.4 &31.8 &37.9 &27.2 &24.8 \\
26
- ImageLLM & OpenFlamingo & LLaMA-7B &43.9 &38.1 &31.3 &30.1 &27.3 &30.6 &29.9 &50.2 &20.0 &37.2 &25.4 &24.2 \\
27
- ImageLLM & LLaMA-Adapter V2 & LLaMA-7B &45.2 &38.5 &29.3 &33.0 &29.7 &35.5 &39.2 &52.0 &24.7 &38.6 &18.5 &19.6 \\
28
- ImageLLM & GVT & Vicuna-7B &41.7 &35.5 &31.8 &29.5 &36.2 &32.0 &32.0 &51.1 &27.1 &33.9 &25.4 &23.0 \\
29
- ImageLLM & mPLUG-Owl & LLaMA-7B &49.7 &45.3 &32.5 &36.7 &27.3 &32.7 &44.3 &54.7 &28.8 &26.7 &17.9 &26.5 \\
30
- VideoLLM & VideoChat & Vicuna-7B &47.1 &43.8 &34.9 &40.0 &32.8 &34.6 &42.3 &50.5 &17.7 &34.9 &36.4 &27.3 \\
31
- VideoLLM & Video-ChatGPT & LLaMA-7B &37.2 &31.4 &33.2 &28.4 &35.5 &29.5 &23.7 &42.3 &25.9 &27.6 &21.3 &21.1 \\
32
- VideoLLM & Valley & LLaMA-13B &39.3 &32.9 &31.6 &27.9 &24.2 &30.1 &27.8 &43.8 &11.8 &31.3 &23.2 &20.7 \\'''
33
-
34
-
35
- LEADERBORAD_INTRODUCTION = """# SEED-Bench Leaderboard
36
-
37
- Welcome to the leaderboard of the SEED-Bench! 🏆
38
- SEED-Bench consists of 19K multiple-choice questions with accurate human annotations for evaluating Multimodal LLMs, covering 12 evaluation dimensions including both the spatial and temporal understanding.
39
- Please refer to [our paper](https://arxiv.org/abs/2307.16125) for more details.
40
- """
41
-
42
- SUBMIT_INTRODUCTION = """# Submit Introduction
43
- 1. Obtain JSON file from our [github repository](https://github.com/AILab-CVC/SEED-Bench#leaderboard-submit) after evaluation. For example, you can obtain InstructBLIP's JSON file as results/results.json after running
44
- ```shell
45
- python eval.py --model instruct_blip --anno_path SEED-Bench.json --output-dir results
46
- ```
47
- 2. If you want to update model performance by uploading new results, please ensure 'Model Name Revision' is the same as what's shown in the leaderboard. For example, if you want to modify InstructBLIP's performance, you need to fill in 'InstructBLIP' in 'Revision Model Name'.
48
- 3. Please provide the correct link of your model's repository for each submission.
49
- 4. For the evaluation dimension, you can choose "All/Image/Video", and the results of dimensions that are not evaluated will be set to zero.
50
- 5. After clicking 'Submit Eval', you can click 'Refresh' to obtain the latest result in the leaderboard.
51
-
52
- ## Submit Example
53
- For example, if you want to upload InstructBLIP's result in the leaderboard, you need to:
54
- 1. Fill in 'InstructBLIP' in 'Model Name' if it is your first time to submit your result (You can leave 'Revision Model Name' blank).
55
- 2. Fill in 'InstructBLIP' in 'Revision Model Name' if you want to update your result (You can leave 'Model Name' blank).
56
- 2. Select 'ImageLLM' in 'Model Type'.
57
- 3. Fill in 'https://github.com/salesforce/LAVIS' in 'Model Link'.
58
- 4. Select 'Flan-T5-XL' in 'LLM Type'.
59
- 5. Select 'All' in 'Evaluation Dimension'.
60
- 6. Upload results.json.
61
- 7. Click the 'Submit Eval' button.
62
- 8. Click 'Refresh' to obtain the uploaded leaderboard.
63
- """
64
-
65
- TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models.
66
- We use accurancy(%) as the primary evaluation metric for each tasks.
67
- """
68
-
69
- LEADERBORAD_INFO = """
70
- Based on powerful Large Language Models (LLMs), recent generative Multimodal Large Language Models (MLLMs) have gained prominence as a pivotal research area, exhibiting remarkable capability for both comprehension and generation.
71
- In this work, we address the evaluation of generative comprehension in MLLMs as a preliminary step towards a comprehensive assessment of generative models, by introducing a benchmark named SEED-Bench.
72
- SEED-Bench consists of 19K multiple choice questions with accurate human annotations (x6 larger than existing benchmarks), which spans 12 evaluation dimensions including the comprehension of both the image and video modality.
73
- We develop an advanced pipeline for generating multiple-choice questions that target specific evaluation dimensions, integrating both automatic filtering and manual verification processes.
74
- Multiple-choice questions with groundtruth options derived from human annotation enables an objective and efficient assessment of model performance, eliminating the need for human or GPT intervention during evaluation.
75
- We further evaluate the performance of 18 models across all 12 dimensions, covering both the spatial and temporal understanding.
76
- By revealing the limitations of existing MLLMs through evaluation results, we aim for SEED-Bench to provide insights for motivating future research.
77
- """
78
-
79
-
80
-
81
- CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
82
- CITATION_BUTTON_TEXT = r"""@article{li2023seed,
83
- title={SEED-Bench: Benchmarking Multimodal LLMs with Generative Comprehension},
84
- author={Li, Bohao and Wang, Rui and Wang, Guangzhi and Ge, Yuying and Ge, Yixiao and Shan, Ying},
85
- journal={arXiv preprint arXiv:2307.16125},
86
- year={2023}
87
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIQuest/lungCancerVgg19/app.py DELETED
@@ -1,34 +0,0 @@
1
- # this is the custome function to return pre-process the image to size (150 150 3)
2
-
3
- import numpy as np
4
- from tensorflow.keras.preprocessing import image
5
- from PIL import Image
6
- import gradio as gr
7
- from keras.models import load_model
8
-
9
- def custom_Image_preprocessing(image_data, target_size=(150, 150)):
10
- img = image.array_to_img(image_data, data_format='channels_last')
11
- img = img.resize(target_size) # Resize the image if needed
12
- img_arr = image.img_to_array(img)
13
- img_arr = img_arr * 1./255
14
- img_arr = np.expand_dims(img_arr, axis=0)
15
- return img_arr
16
-
17
- # function to predict the custome image
18
-
19
- def image_predict(image_path):
20
- model = load_model("Second_model.h5")
21
- image_preprocess = custom_Image_preprocessing(image_path)
22
- result = model.predict(image_preprocess)
23
- if ( result <= 0.5 ):
24
- return 'Negative',round(result[0][0]*100,2),'%'
25
- else:
26
- return 'Positive',round(result[0][0]*100,2),'%'
27
-
28
-
29
- # Define Gradio interface
30
- input_component = gr.components.Image(label = "Upload the X-Ray")
31
- output_component = gr.components.Textbox(label = "Result")
32
- interface = gr.Interface(fn=image_predict, inputs=input_component, outputs=output_component,title = "Lung Cancer x-Ray Classification",description = "This web app provides predictions based on X-Ray images and predict either the X-ray contains sympotms of lung cancer or not ")
33
- interface.launch()
34
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_sling_256x192/__init__.py DELETED
File without changes
spaces/AlekseyKorshuk/gai-project/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Gai Project
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/korean.py DELETED
@@ -1,210 +0,0 @@
1
- import re
2
- from jamo import h2j, j2hcj
3
- import ko_pron
4
-
5
-
6
- # This is a list of Korean classifiers preceded by pure Korean numerals.
7
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
8
-
9
- # List of (hangul, hangul divided) pairs:
10
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
11
- ('ㄳ', 'ㄱㅅ'),
12
- ('ㄵ', 'ㄴㅈ'),
13
- ('ㄶ', 'ㄴㅎ'),
14
- ('ㄺ', 'ㄹㄱ'),
15
- ('ㄻ', 'ㄹㅁ'),
16
- ('ㄼ', 'ㄹㅂ'),
17
- ('ㄽ', 'ㄹㅅ'),
18
- ('ㄾ', 'ㄹㅌ'),
19
- ('ㄿ', 'ㄹㅍ'),
20
- ('ㅀ', 'ㄹㅎ'),
21
- ('ㅄ', 'ㅂㅅ'),
22
- ('ㅘ', 'ㅗㅏ'),
23
- ('ㅙ', 'ㅗㅐ'),
24
- ('ㅚ', 'ㅗㅣ'),
25
- ('ㅝ', 'ㅜㅓ'),
26
- ('ㅞ', 'ㅜㅔ'),
27
- ('ㅟ', 'ㅜㅣ'),
28
- ('ㅢ', 'ㅡㅣ'),
29
- ('ㅑ', 'ㅣㅏ'),
30
- ('ㅒ', 'ㅣㅐ'),
31
- ('ㅕ', 'ㅣㅓ'),
32
- ('ㅖ', 'ㅣㅔ'),
33
- ('ㅛ', 'ㅣㅗ'),
34
- ('ㅠ', 'ㅣㅜ')
35
- ]]
36
-
37
- # List of (Latin alphabet, hangul) pairs:
38
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
39
- ('a', '에이'),
40
- ('b', '비'),
41
- ('c', '시'),
42
- ('d', '디'),
43
- ('e', '이'),
44
- ('f', '에프'),
45
- ('g', '지'),
46
- ('h', '에이치'),
47
- ('i', '아이'),
48
- ('j', '제이'),
49
- ('k', '케이'),
50
- ('l', '엘'),
51
- ('m', '엠'),
52
- ('n', '엔'),
53
- ('o', '오'),
54
- ('p', '피'),
55
- ('q', '큐'),
56
- ('r', '아르'),
57
- ('s', '에스'),
58
- ('t', '티'),
59
- ('u', '유'),
60
- ('v', '브이'),
61
- ('w', '더블유'),
62
- ('x', '엑스'),
63
- ('y', '와이'),
64
- ('z', '제트')
65
- ]]
66
-
67
- # List of (ipa, lazy ipa) pairs:
68
- _ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
69
- ('t͡ɕ','ʧ'),
70
- ('d͡ʑ','ʥ'),
71
- ('ɲ','n^'),
72
- ('ɕ','ʃ'),
73
- ('ʷ','w'),
74
- ('ɭ','l`'),
75
- ('ʎ','ɾ'),
76
- ('ɣ','ŋ'),
77
- ('ɰ','ɯ'),
78
- ('ʝ','j'),
79
- ('ʌ','ə'),
80
- ('ɡ','g'),
81
- ('\u031a','#'),
82
- ('\u0348','='),
83
- ('\u031e',''),
84
- ('\u0320',''),
85
- ('\u0339','')
86
- ]]
87
-
88
-
89
- def latin_to_hangul(text):
90
- for regex, replacement in _latin_to_hangul:
91
- text = re.sub(regex, replacement, text)
92
- return text
93
-
94
-
95
- def divide_hangul(text):
96
- text = j2hcj(h2j(text))
97
- for regex, replacement in _hangul_divided:
98
- text = re.sub(regex, replacement, text)
99
- return text
100
-
101
-
102
- def hangul_number(num, sino=True):
103
- '''Reference https://github.com/Kyubyong/g2pK'''
104
- num = re.sub(',', '', num)
105
-
106
- if num == '0':
107
- return '영'
108
- if not sino and num == '20':
109
- return '스무'
110
-
111
- digits = '123456789'
112
- names = '일이삼사오육칠팔구'
113
- digit2name = {d: n for d, n in zip(digits, names)}
114
-
115
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
116
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
117
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
118
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
119
-
120
- spelledout = []
121
- for i, digit in enumerate(num):
122
- i = len(num) - i - 1
123
- if sino:
124
- if i == 0:
125
- name = digit2name.get(digit, '')
126
- elif i == 1:
127
- name = digit2name.get(digit, '') + '십'
128
- name = name.replace('일십', '십')
129
- else:
130
- if i == 0:
131
- name = digit2mod.get(digit, '')
132
- elif i == 1:
133
- name = digit2dec.get(digit, '')
134
- if digit == '0':
135
- if i % 4 == 0:
136
- last_three = spelledout[-min(3, len(spelledout)):]
137
- if ''.join(last_three) == '':
138
- spelledout.append('')
139
- continue
140
- else:
141
- spelledout.append('')
142
- continue
143
- if i == 2:
144
- name = digit2name.get(digit, '') + '백'
145
- name = name.replace('일백', '백')
146
- elif i == 3:
147
- name = digit2name.get(digit, '') + '천'
148
- name = name.replace('일천', '천')
149
- elif i == 4:
150
- name = digit2name.get(digit, '') + '만'
151
- name = name.replace('일만', '만')
152
- elif i == 5:
153
- name = digit2name.get(digit, '') + '십'
154
- name = name.replace('일십', '십')
155
- elif i == 6:
156
- name = digit2name.get(digit, '') + '백'
157
- name = name.replace('일백', '백')
158
- elif i == 7:
159
- name = digit2name.get(digit, '') + '천'
160
- name = name.replace('일천', '천')
161
- elif i == 8:
162
- name = digit2name.get(digit, '') + '억'
163
- elif i == 9:
164
- name = digit2name.get(digit, '') + '십'
165
- elif i == 10:
166
- name = digit2name.get(digit, '') + '백'
167
- elif i == 11:
168
- name = digit2name.get(digit, '') + '천'
169
- elif i == 12:
170
- name = digit2name.get(digit, '') + '조'
171
- elif i == 13:
172
- name = digit2name.get(digit, '') + '십'
173
- elif i == 14:
174
- name = digit2name.get(digit, '') + '백'
175
- elif i == 15:
176
- name = digit2name.get(digit, '') + '천'
177
- spelledout.append(name)
178
- return ''.join(elem for elem in spelledout)
179
-
180
-
181
- def number_to_hangul(text):
182
- '''Reference https://github.com/Kyubyong/g2pK'''
183
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
184
- for token in tokens:
185
- num, classifier = token
186
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
187
- spelledout = hangul_number(num, sino=False)
188
- else:
189
- spelledout = hangul_number(num, sino=True)
190
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
191
- # digit by digit for remaining digits
192
- digits = '0123456789'
193
- names = '영일이삼사오육칠팔구'
194
- for d, n in zip(digits, names):
195
- text = text.replace(d, n)
196
- return text
197
-
198
-
199
- def korean_to_lazy_ipa(text):
200
- text = latin_to_hangul(text)
201
- text = number_to_hangul(text)
202
- text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)
203
- for regex, replacement in _ipa_to_lazy_ipa:
204
- text = re.sub(regex, replacement, text)
205
- return text
206
-
207
-
208
- def korean_to_ipa(text):
209
- text = korean_to_lazy_ipa(text)
210
- return text.replace('ʧ','tʃ').replace('ʥ','dʑ')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/sync_bn.py DELETED
@@ -1,279 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch
3
- import torch.distributed as dist
4
- import torch.nn.functional as F
5
- from torch.autograd import Function
6
- from torch.autograd.function import once_differentiable
7
- from torch.nn.modules.module import Module
8
- from torch.nn.parameter import Parameter
9
-
10
- from annotator.uniformer.mmcv.cnn import NORM_LAYERS
11
- from ..utils import ext_loader
12
-
13
- ext_module = ext_loader.load_ext('_ext', [
14
- 'sync_bn_forward_mean', 'sync_bn_forward_var', 'sync_bn_forward_output',
15
- 'sync_bn_backward_param', 'sync_bn_backward_data'
16
- ])
17
-
18
-
19
- class SyncBatchNormFunction(Function):
20
-
21
- @staticmethod
22
- def symbolic(g, input, running_mean, running_var, weight, bias, momentum,
23
- eps, group, group_size, stats_mode):
24
- return g.op(
25
- 'mmcv::MMCVSyncBatchNorm',
26
- input,
27
- running_mean,
28
- running_var,
29
- weight,
30
- bias,
31
- momentum_f=momentum,
32
- eps_f=eps,
33
- group_i=group,
34
- group_size_i=group_size,
35
- stats_mode=stats_mode)
36
-
37
- @staticmethod
38
- def forward(self, input, running_mean, running_var, weight, bias, momentum,
39
- eps, group, group_size, stats_mode):
40
- self.momentum = momentum
41
- self.eps = eps
42
- self.group = group
43
- self.group_size = group_size
44
- self.stats_mode = stats_mode
45
-
46
- assert isinstance(
47
- input, (torch.HalfTensor, torch.FloatTensor,
48
- torch.cuda.HalfTensor, torch.cuda.FloatTensor)), \
49
- f'only support Half or Float Tensor, but {input.type()}'
50
- output = torch.zeros_like(input)
51
- input3d = input.flatten(start_dim=2)
52
- output3d = output.view_as(input3d)
53
- num_channels = input3d.size(1)
54
-
55
- # ensure mean/var/norm/std are initialized as zeros
56
- # ``torch.empty()`` does not guarantee that
57
- mean = torch.zeros(
58
- num_channels, dtype=torch.float, device=input3d.device)
59
- var = torch.zeros(
60
- num_channels, dtype=torch.float, device=input3d.device)
61
- norm = torch.zeros_like(
62
- input3d, dtype=torch.float, device=input3d.device)
63
- std = torch.zeros(
64
- num_channels, dtype=torch.float, device=input3d.device)
65
-
66
- batch_size = input3d.size(0)
67
- if batch_size > 0:
68
- ext_module.sync_bn_forward_mean(input3d, mean)
69
- batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype)
70
- else:
71
- # skip updating mean and leave it as zeros when the input is empty
72
- batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype)
73
-
74
- # synchronize mean and the batch flag
75
- vec = torch.cat([mean, batch_flag])
76
- if self.stats_mode == 'N':
77
- vec *= batch_size
78
- if self.group_size > 1:
79
- dist.all_reduce(vec, group=self.group)
80
- total_batch = vec[-1].detach()
81
- mean = vec[:num_channels]
82
-
83
- if self.stats_mode == 'default':
84
- mean = mean / self.group_size
85
- elif self.stats_mode == 'N':
86
- mean = mean / total_batch.clamp(min=1)
87
- else:
88
- raise NotImplementedError
89
-
90
- # leave var as zeros when the input is empty
91
- if batch_size > 0:
92
- ext_module.sync_bn_forward_var(input3d, mean, var)
93
-
94
- if self.stats_mode == 'N':
95
- var *= batch_size
96
- if self.group_size > 1:
97
- dist.all_reduce(var, group=self.group)
98
-
99
- if self.stats_mode == 'default':
100
- var /= self.group_size
101
- elif self.stats_mode == 'N':
102
- var /= total_batch.clamp(min=1)
103
- else:
104
- raise NotImplementedError
105
-
106
- # if the total batch size over all the ranks is zero,
107
- # we should not update the statistics in the current batch
108
- update_flag = total_batch.clamp(max=1)
109
- momentum = update_flag * self.momentum
110
- ext_module.sync_bn_forward_output(
111
- input3d,
112
- mean,
113
- var,
114
- weight,
115
- bias,
116
- running_mean,
117
- running_var,
118
- norm,
119
- std,
120
- output3d,
121
- eps=self.eps,
122
- momentum=momentum,
123
- group_size=self.group_size)
124
- self.save_for_backward(norm, std, weight)
125
- return output
126
-
127
- @staticmethod
128
- @once_differentiable
129
- def backward(self, grad_output):
130
- norm, std, weight = self.saved_tensors
131
- grad_weight = torch.zeros_like(weight)
132
- grad_bias = torch.zeros_like(weight)
133
- grad_input = torch.zeros_like(grad_output)
134
- grad_output3d = grad_output.flatten(start_dim=2)
135
- grad_input3d = grad_input.view_as(grad_output3d)
136
-
137
- batch_size = grad_input3d.size(0)
138
- if batch_size > 0:
139
- ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight,
140
- grad_bias)
141
-
142
- # all reduce
143
- if self.group_size > 1:
144
- dist.all_reduce(grad_weight, group=self.group)
145
- dist.all_reduce(grad_bias, group=self.group)
146
- grad_weight /= self.group_size
147
- grad_bias /= self.group_size
148
-
149
- if batch_size > 0:
150
- ext_module.sync_bn_backward_data(grad_output3d, weight,
151
- grad_weight, grad_bias, norm, std,
152
- grad_input3d)
153
-
154
- return grad_input, None, None, grad_weight, grad_bias, \
155
- None, None, None, None, None
156
-
157
-
158
- @NORM_LAYERS.register_module(name='MMSyncBN')
159
- class SyncBatchNorm(Module):
160
- """Synchronized Batch Normalization.
161
-
162
- Args:
163
- num_features (int): number of features/chennels in input tensor
164
- eps (float, optional): a value added to the denominator for numerical
165
- stability. Defaults to 1e-5.
166
- momentum (float, optional): the value used for the running_mean and
167
- running_var computation. Defaults to 0.1.
168
- affine (bool, optional): whether to use learnable affine parameters.
169
- Defaults to True.
170
- track_running_stats (bool, optional): whether to track the running
171
- mean and variance during training. When set to False, this
172
- module does not track such statistics, and initializes statistics
173
- buffers ``running_mean`` and ``running_var`` as ``None``. When
174
- these buffers are ``None``, this module always uses batch
175
- statistics in both training and eval modes. Defaults to True.
176
- group (int, optional): synchronization of stats happen within
177
- each process group individually. By default it is synchronization
178
- across the whole world. Defaults to None.
179
- stats_mode (str, optional): The statistical mode. Available options
180
- includes ``'default'`` and ``'N'``. Defaults to 'default'.
181
- When ``stats_mode=='default'``, it computes the overall statistics
182
- using those from each worker with equal weight, i.e., the
183
- statistics are synchronized and simply divied by ``group``. This
184
- mode will produce inaccurate statistics when empty tensors occur.
185
- When ``stats_mode=='N'``, it compute the overall statistics using
186
- the total number of batches in each worker ignoring the number of
187
- group, i.e., the statistics are synchronized and then divied by
188
- the total batch ``N``. This mode is beneficial when empty tensors
189
- occur during training, as it average the total mean by the real
190
- number of batch.
191
- """
192
-
193
- def __init__(self,
194
- num_features,
195
- eps=1e-5,
196
- momentum=0.1,
197
- affine=True,
198
- track_running_stats=True,
199
- group=None,
200
- stats_mode='default'):
201
- super(SyncBatchNorm, self).__init__()
202
- self.num_features = num_features
203
- self.eps = eps
204
- self.momentum = momentum
205
- self.affine = affine
206
- self.track_running_stats = track_running_stats
207
- group = dist.group.WORLD if group is None else group
208
- self.group = group
209
- self.group_size = dist.get_world_size(group)
210
- assert stats_mode in ['default', 'N'], \
211
- f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"'
212
- self.stats_mode = stats_mode
213
- if self.affine:
214
- self.weight = Parameter(torch.Tensor(num_features))
215
- self.bias = Parameter(torch.Tensor(num_features))
216
- else:
217
- self.register_parameter('weight', None)
218
- self.register_parameter('bias', None)
219
- if self.track_running_stats:
220
- self.register_buffer('running_mean', torch.zeros(num_features))
221
- self.register_buffer('running_var', torch.ones(num_features))
222
- self.register_buffer('num_batches_tracked',
223
- torch.tensor(0, dtype=torch.long))
224
- else:
225
- self.register_buffer('running_mean', None)
226
- self.register_buffer('running_var', None)
227
- self.register_buffer('num_batches_tracked', None)
228
- self.reset_parameters()
229
-
230
- def reset_running_stats(self):
231
- if self.track_running_stats:
232
- self.running_mean.zero_()
233
- self.running_var.fill_(1)
234
- self.num_batches_tracked.zero_()
235
-
236
- def reset_parameters(self):
237
- self.reset_running_stats()
238
- if self.affine:
239
- self.weight.data.uniform_() # pytorch use ones_()
240
- self.bias.data.zero_()
241
-
242
- def forward(self, input):
243
- if input.dim() < 2:
244
- raise ValueError(
245
- f'expected at least 2D input, got {input.dim()}D input')
246
- if self.momentum is None:
247
- exponential_average_factor = 0.0
248
- else:
249
- exponential_average_factor = self.momentum
250
-
251
- if self.training and self.track_running_stats:
252
- if self.num_batches_tracked is not None:
253
- self.num_batches_tracked += 1
254
- if self.momentum is None: # use cumulative moving average
255
- exponential_average_factor = 1.0 / float(
256
- self.num_batches_tracked)
257
- else: # use exponential moving average
258
- exponential_average_factor = self.momentum
259
-
260
- if self.training or not self.track_running_stats:
261
- return SyncBatchNormFunction.apply(
262
- input, self.running_mean, self.running_var, self.weight,
263
- self.bias, exponential_average_factor, self.eps, self.group,
264
- self.group_size, self.stats_mode)
265
- else:
266
- return F.batch_norm(input, self.running_mean, self.running_var,
267
- self.weight, self.bias, False,
268
- exponential_average_factor, self.eps)
269
-
270
- def __repr__(self):
271
- s = self.__class__.__name__
272
- s += f'({self.num_features}, '
273
- s += f'eps={self.eps}, '
274
- s += f'momentum={self.momentum}, '
275
- s += f'affine={self.affine}, '
276
- s += f'track_running_stats={self.track_running_stats}, '
277
- s += f'group_size={self.group_size},'
278
- s += f'stats_mode={self.stats_mode})'
279
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/parallel/_functions.py DELETED
@@ -1,79 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch
3
- from torch.nn.parallel._functions import _get_stream
4
-
5
-
6
- def scatter(input, devices, streams=None):
7
- """Scatters tensor across multiple GPUs."""
8
- if streams is None:
9
- streams = [None] * len(devices)
10
-
11
- if isinstance(input, list):
12
- chunk_size = (len(input) - 1) // len(devices) + 1
13
- outputs = [
14
- scatter(input[i], [devices[i // chunk_size]],
15
- [streams[i // chunk_size]]) for i in range(len(input))
16
- ]
17
- return outputs
18
- elif isinstance(input, torch.Tensor):
19
- output = input.contiguous()
20
- # TODO: copy to a pinned buffer first (if copying from CPU)
21
- stream = streams[0] if output.numel() > 0 else None
22
- if devices != [-1]:
23
- with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
24
- output = output.cuda(devices[0], non_blocking=True)
25
- else:
26
- # unsqueeze the first dimension thus the tensor's shape is the
27
- # same as those scattered with GPU.
28
- output = output.unsqueeze(0)
29
- return output
30
- else:
31
- raise Exception(f'Unknown type {type(input)}.')
32
-
33
-
34
- def synchronize_stream(output, devices, streams):
35
- if isinstance(output, list):
36
- chunk_size = len(output) // len(devices)
37
- for i in range(len(devices)):
38
- for j in range(chunk_size):
39
- synchronize_stream(output[i * chunk_size + j], [devices[i]],
40
- [streams[i]])
41
- elif isinstance(output, torch.Tensor):
42
- if output.numel() != 0:
43
- with torch.cuda.device(devices[0]):
44
- main_stream = torch.cuda.current_stream()
45
- main_stream.wait_stream(streams[0])
46
- output.record_stream(main_stream)
47
- else:
48
- raise Exception(f'Unknown type {type(output)}.')
49
-
50
-
51
- def get_input_device(input):
52
- if isinstance(input, list):
53
- for item in input:
54
- input_device = get_input_device(item)
55
- if input_device != -1:
56
- return input_device
57
- return -1
58
- elif isinstance(input, torch.Tensor):
59
- return input.get_device() if input.is_cuda else -1
60
- else:
61
- raise Exception(f'Unknown type {type(input)}.')
62
-
63
-
64
- class Scatter:
65
-
66
- @staticmethod
67
- def forward(target_gpus, input):
68
- input_device = get_input_device(input)
69
- streams = None
70
- if input_device == -1 and target_gpus != [-1]:
71
- # Perform CPU to GPU copies in a background stream
72
- streams = [_get_stream(device) for device in target_gpus]
73
-
74
- outputs = scatter(input, target_gpus, streams)
75
- # Synchronize with the copy stream
76
- if streams is not None:
77
- synchronize_stream(outputs, target_gpus, streams)
78
-
79
- return tuple(outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/visualize.py DELETED
@@ -1,247 +0,0 @@
1
- import cv2
2
- import gc
3
- import requests
4
- from io import BytesIO
5
- import base64
6
- from scipy import misc
7
- from PIL import Image
8
- from matplotlib.axes import Axes
9
- from matplotlib.figure import Figure
10
- from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
11
- from typing import Tuple
12
-
13
- import torch
14
- from fastai.core import *
15
- from fastai.vision import *
16
-
17
- from .filters import IFilter, MasterFilter, ColorizerFilter
18
- from .generators import gen_inference_deep, gen_inference_wide
19
-
20
-
21
-
22
- # class LoadedModel
23
- class ModelImageVisualizer:
24
- def __init__(self, filter: IFilter, results_dir: str = None):
25
- self.filter = filter
26
- self.results_dir = None if results_dir is None else Path(results_dir)
27
- self.results_dir.mkdir(parents=True, exist_ok=True)
28
-
29
- def _clean_mem(self):
30
- torch.cuda.empty_cache()
31
- # gc.collect()
32
-
33
- def _open_pil_image(self, path: Path) -> Image:
34
- return Image.open(path).convert('RGB')
35
-
36
- def _get_image_from_url(self, url: str) -> Image:
37
- response = requests.get(url, timeout=30, headers={'Accept': '*/*;q=0.8'})
38
- img = Image.open(BytesIO(response.content)).convert('RGB')
39
- return img
40
-
41
- def plot_transformed_image_from_url(
42
- self,
43
- url: str,
44
- path: str = 'test_images/image.png',
45
- results_dir:Path = None,
46
- figsize: Tuple[int, int] = (20, 20),
47
- render_factor: int = None,
48
-
49
- display_render_factor: bool = False,
50
- compare: bool = False,
51
- post_process: bool = True,
52
- watermarked: bool = True,
53
- ) -> Path:
54
- img = self._get_image_from_url(url)
55
- img.save(path)
56
- return self.plot_transformed_image(
57
- path=path,
58
- results_dir=results_dir,
59
- figsize=figsize,
60
- render_factor=render_factor,
61
- display_render_factor=display_render_factor,
62
- compare=compare,
63
- post_process = post_process,
64
- watermarked=watermarked,
65
- )
66
-
67
- def plot_transformed_image(
68
- self,
69
- path: str,
70
- results_dir:Path = None,
71
- figsize: Tuple[int, int] = (20, 20),
72
- render_factor: int = None,
73
- display_render_factor: bool = False,
74
- compare: bool = False,
75
- post_process: bool = True,
76
- watermarked: bool = True,
77
- ) -> Path:
78
- path = Path(path)
79
- if results_dir is None:
80
- results_dir = Path(self.results_dir)
81
- result = self.get_transformed_image(
82
- path, render_factor, post_process=post_process,watermarked=watermarked
83
- )
84
- orig = self._open_pil_image(path)
85
- if compare:
86
- self._plot_comparison(
87
- figsize, render_factor, display_render_factor, orig, result
88
- )
89
- else:
90
- self._plot_solo(figsize, render_factor, display_render_factor, result)
91
-
92
- orig.close()
93
- result_path = self._save_result_image(path, result, results_dir=results_dir)
94
- result.close()
95
- return result_path
96
-
97
- def plot_transformed_pil_image(
98
- self,
99
- input_image: Image,
100
- figsize: Tuple[int, int] = (20, 20),
101
- render_factor: int = None,
102
- display_render_factor: bool = False,
103
- compare: bool = False,
104
- post_process: bool = True,
105
- ) -> Image:
106
-
107
- result = self.get_transformed_pil_image(
108
- input_image, render_factor, post_process=post_process
109
- )
110
-
111
- if compare:
112
- self._plot_comparison(
113
- figsize, render_factor, display_render_factor, input_image, result
114
- )
115
- else:
116
- self._plot_solo(figsize, render_factor, display_render_factor, result)
117
-
118
- return result
119
-
120
- def _plot_comparison(
121
- self,
122
- figsize: Tuple[int, int],
123
- render_factor: int,
124
- display_render_factor: bool,
125
- orig: Image,
126
- result: Image,
127
- ):
128
- fig, axes = plt.subplots(1, 2, figsize=figsize)
129
- self._plot_image(
130
- orig,
131
- axes=axes[0],
132
- figsize=figsize,
133
- render_factor=render_factor,
134
- display_render_factor=False,
135
- )
136
- self._plot_image(
137
- result,
138
- axes=axes[1],
139
- figsize=figsize,
140
- render_factor=render_factor,
141
- display_render_factor=display_render_factor,
142
- )
143
-
144
- def _plot_solo(
145
- self,
146
- figsize: Tuple[int, int],
147
- render_factor: int,
148
- display_render_factor: bool,
149
- result: Image,
150
- ):
151
- fig, axes = plt.subplots(1, 1, figsize=figsize)
152
- self._plot_image(
153
- result,
154
- axes=axes,
155
- figsize=figsize,
156
- render_factor=render_factor,
157
- display_render_factor=display_render_factor,
158
- )
159
-
160
- def _save_result_image(self, source_path: Path, image: Image, results_dir = None) -> Path:
161
- if results_dir is None:
162
- results_dir = Path(self.results_dir)
163
- result_path = results_dir / source_path.name
164
- image.save(result_path)
165
- return result_path
166
-
167
- def get_transformed_image(
168
- self, path: Path, render_factor: int = None, post_process: bool = True,
169
- watermarked: bool = True,
170
- ) -> Image:
171
- self._clean_mem()
172
- orig_image = self._open_pil_image(path)
173
- filtered_image = self.filter.filter(
174
- orig_image, orig_image, render_factor=render_factor,post_process=post_process
175
- )
176
-
177
- return filtered_image
178
-
179
- def get_transformed_pil_image(
180
- self, input_image: Image, render_factor: int = None, post_process: bool = True,
181
- ) -> Image:
182
- self._clean_mem()
183
- filtered_image = self.filter.filter(
184
- input_image, input_image, render_factor=render_factor,post_process=post_process
185
- )
186
-
187
- return filtered_image
188
-
189
- def _plot_image(
190
- self,
191
- image: Image,
192
- render_factor: int,
193
- axes: Axes = None,
194
- figsize=(20, 20),
195
- display_render_factor = False,
196
- ):
197
- if axes is None:
198
- _, axes = plt.subplots(figsize=figsize)
199
- axes.imshow(np.asarray(image) / 255)
200
- axes.axis('off')
201
- if render_factor is not None and display_render_factor:
202
- plt.text(
203
- 10,
204
- 10,
205
- 'render_factor: ' + str(render_factor),
206
- color='white',
207
- backgroundcolor='black',
208
- )
209
-
210
- def _get_num_rows_columns(self, num_images: int, max_columns: int) -> Tuple[int, int]:
211
- columns = min(num_images, max_columns)
212
- rows = num_images // columns
213
- rows = rows if rows * columns == num_images else rows + 1
214
- return rows, columns
215
-
216
-
217
- def get_image_colorizer(
218
- root_folder: Path = Path('./'), render_factor: int = 35, artistic: bool = True
219
- ) -> ModelImageVisualizer:
220
- if artistic:
221
- return get_artistic_image_colorizer(root_folder=root_folder, render_factor=render_factor)
222
- else:
223
- return get_stable_image_colorizer(root_folder=root_folder, render_factor=render_factor)
224
-
225
-
226
- def get_stable_image_colorizer(
227
- root_folder: Path = Path('./'),
228
- weights_name: str = 'ColorizeStable_gen',
229
- results_dir='output',
230
- render_factor: int = 35
231
- ) -> ModelImageVisualizer:
232
- learn = gen_inference_wide(root_folder=root_folder, weights_name=weights_name)
233
- filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
234
- vis = ModelImageVisualizer(filtr, results_dir=results_dir)
235
- return vis
236
-
237
-
238
- def get_artistic_image_colorizer(
239
- root_folder: Path = Path('./'),
240
- weights_name: str = 'ColorizeArtistic_gen',
241
- results_dir='output',
242
- render_factor: int = 35
243
- ) -> ModelImageVisualizer:
244
- learn = gen_inference_deep(root_folder=root_folder, weights_name=weights_name)
245
- filtr = MasterFilter([ColorizerFilter(learn=learn)], render_factor=render_factor)
246
- vis = ModelImageVisualizer(filtr, results_dir=results_dir)
247
- return vis
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/__init__.py DELETED
@@ -1,21 +0,0 @@
1
- from pip._internal.distributions.base import AbstractDistribution
2
- from pip._internal.distributions.sdist import SourceDistribution
3
- from pip._internal.distributions.wheel import WheelDistribution
4
- from pip._internal.req.req_install import InstallRequirement
5
-
6
-
7
- def make_distribution_for_install_requirement(
8
- install_req: InstallRequirement,
9
- ) -> AbstractDistribution:
10
- """Returns a Distribution for the given InstallRequirement"""
11
- # Editable requirements will always be source distributions. They use the
12
- # legacy logic until we create a modern standard for them.
13
- if install_req.editable:
14
- return SourceDistribution(install_req)
15
-
16
- # If it's a wheel, it's a WheelDistribution
17
- if install_req.is_wheel:
18
- return WheelDistribution(install_req)
19
-
20
- # Otherwise, a SourceDistribution
21
- return SourceDistribution(install_req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyproject_hooks/_in_process/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- """This is a subpackage because the directory is on sys.path for _in_process.py
2
-
3
- The subpackage should stay as empty as possible to avoid shadowing modules that
4
- the backend might import.
5
- """
6
-
7
- import importlib.resources as resources
8
-
9
- try:
10
- resources.files
11
- except AttributeError:
12
- # Python 3.8 compatibility
13
- def _in_proc_script_path():
14
- return resources.path(__package__, '_in_process.py')
15
- else:
16
- def _in_proc_script_path():
17
- return resources.as_file(
18
- resources.files(__package__).joinpath('_in_process.py'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/status.py DELETED
@@ -1,132 +0,0 @@
1
- from types import TracebackType
2
- from typing import Optional, Type
3
-
4
- from .console import Console, RenderableType
5
- from .jupyter import JupyterMixin
6
- from .live import Live
7
- from .spinner import Spinner
8
- from .style import StyleType
9
-
10
-
11
- class Status(JupyterMixin):
12
- """Displays a status indicator with a 'spinner' animation.
13
-
14
- Args:
15
- status (RenderableType): A status renderable (str or Text typically).
16
- console (Console, optional): Console instance to use, or None for global console. Defaults to None.
17
- spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
18
- spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
19
- speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
20
- refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
21
- """
22
-
23
- def __init__(
24
- self,
25
- status: RenderableType,
26
- *,
27
- console: Optional[Console] = None,
28
- spinner: str = "dots",
29
- spinner_style: StyleType = "status.spinner",
30
- speed: float = 1.0,
31
- refresh_per_second: float = 12.5,
32
- ):
33
- self.status = status
34
- self.spinner_style = spinner_style
35
- self.speed = speed
36
- self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed)
37
- self._live = Live(
38
- self.renderable,
39
- console=console,
40
- refresh_per_second=refresh_per_second,
41
- transient=True,
42
- )
43
-
44
- @property
45
- def renderable(self) -> Spinner:
46
- return self._spinner
47
-
48
- @property
49
- def console(self) -> "Console":
50
- """Get the Console used by the Status objects."""
51
- return self._live.console
52
-
53
- def update(
54
- self,
55
- status: Optional[RenderableType] = None,
56
- *,
57
- spinner: Optional[str] = None,
58
- spinner_style: Optional[StyleType] = None,
59
- speed: Optional[float] = None,
60
- ) -> None:
61
- """Update status.
62
-
63
- Args:
64
- status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None.
65
- spinner (Optional[str], optional): New spinner or None for no change. Defaults to None.
66
- spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None.
67
- speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None.
68
- """
69
- if status is not None:
70
- self.status = status
71
- if spinner_style is not None:
72
- self.spinner_style = spinner_style
73
- if speed is not None:
74
- self.speed = speed
75
- if spinner is not None:
76
- self._spinner = Spinner(
77
- spinner, text=self.status, style=self.spinner_style, speed=self.speed
78
- )
79
- self._live.update(self.renderable, refresh=True)
80
- else:
81
- self._spinner.update(
82
- text=self.status, style=self.spinner_style, speed=self.speed
83
- )
84
-
85
- def start(self) -> None:
86
- """Start the status animation."""
87
- self._live.start()
88
-
89
- def stop(self) -> None:
90
- """Stop the spinner animation."""
91
- self._live.stop()
92
-
93
- def __rich__(self) -> RenderableType:
94
- return self.renderable
95
-
96
- def __enter__(self) -> "Status":
97
- self.start()
98
- return self
99
-
100
- def __exit__(
101
- self,
102
- exc_type: Optional[Type[BaseException]],
103
- exc_val: Optional[BaseException],
104
- exc_tb: Optional[TracebackType],
105
- ) -> None:
106
- self.stop()
107
-
108
-
109
- if __name__ == "__main__": # pragma: no cover
110
-
111
- from time import sleep
112
-
113
- from .console import Console
114
-
115
- console = Console()
116
- with console.status("[magenta]Covid detector booting up") as status:
117
- sleep(3)
118
- console.log("Importing advanced AI")
119
- sleep(3)
120
- console.log("Advanced Covid AI Ready")
121
- sleep(3)
122
- status.update(status="[bold blue] Scanning for Covid", spinner="earth")
123
- sleep(3)
124
- console.log("Found 10,000,000,000 copies of Covid32.exe")
125
- sleep(3)
126
- status.update(
127
- status="[bold red]Moving Covid32.exe to Trash",
128
- spinner="bouncingBall",
129
- spinner_style="yellow",
130
- )
131
- sleep(5)
132
- console.print("[bold green]Covid deleted successfully")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/AudioSep/models/CLAP/training/params.py DELETED
@@ -1,563 +0,0 @@
1
- import argparse
2
-
3
-
4
- def get_default_params(model_name):
5
- # Params from paper (https://arxiv.org/pdf/2103.00020.pdf)
6
- model_name = model_name.lower()
7
- if "vit" in model_name:
8
- return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6}
9
- else:
10
- return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8}
11
-
12
-
13
- def parse_args():
14
- parser = argparse.ArgumentParser()
15
- parser.add_argument(
16
- "--train-data",
17
- type=str,
18
- default=None,
19
- help="Path to h5 filewith training data",
20
- )
21
- parser.add_argument(
22
- "--val-data",
23
- type=str,
24
- default=None,
25
- help="Path to h5 file with validation data",
26
- )
27
- parser.add_argument(
28
- "--freeze-text",
29
- default=False,
30
- action="store_true",
31
- help="if you need to freeze the text encoder, make this True",
32
- )
33
- parser.add_argument(
34
- "--freeze-text-after",
35
- type=int,
36
- default=-1,
37
- help="if you need to freeze the text encoder after (include) epoch x, set this param to x. Set -1 to disable it",
38
- )
39
- parser.add_argument(
40
- "--train-ipc",
41
- type=str,
42
- default=None,
43
- help="Path to npy file of the number of instance per class in training data",
44
- )
45
- parser.add_argument(
46
- "--val-ipc",
47
- type=str,
48
- default=None,
49
- help="Path to npy file of the number of instance per class in validation data",
50
- )
51
- parser.add_argument(
52
- "--train-num-samples",
53
- type=int,
54
- default=None,
55
- help="Number of samples in dataset. Required for webdataset if not available in info file.",
56
- )
57
- parser.add_argument(
58
- "--val-num-samples",
59
- type=int,
60
- default=None,
61
- help="Number of samples in dataset. Useful for webdataset if not available in info file.",
62
- )
63
- parser.add_argument(
64
- "--dataset-type",
65
- choices=["webdataset", "csv", "auto", "toy"],
66
- default="auto",
67
- help="Which type of dataset to process.",
68
- )
69
- parser.add_argument(
70
- "--csv-separator",
71
- type=str,
72
- default="\t",
73
- help="For csv-like datasets, which separator to use.",
74
- )
75
- parser.add_argument(
76
- "--csv-img-key",
77
- type=str,
78
- default="filepath",
79
- help="For csv-like datasets, the name of the key for the image paths.",
80
- )
81
- parser.add_argument(
82
- "--csv-caption-key",
83
- type=str,
84
- default="title",
85
- help="For csv-like datasets, the name of the key for the captions.",
86
- )
87
- parser.add_argument(
88
- "--imagenet-val",
89
- type=str,
90
- default=None,
91
- help="Path to imagenet val set for conducting zero shot evaluation.",
92
- )
93
- parser.add_argument(
94
- "--imagenet-v2",
95
- type=str,
96
- default=None,
97
- help="Path to imagenet v2 for conducting zero shot evaluation.",
98
- )
99
- parser.add_argument(
100
- "--datasetnames",
101
- nargs="+",
102
- default=None,
103
- help="If loading webdataset, spedify the dataset names to load. Can be some of these: Clotho, audioset, audiocaps, BBCSoundEffects",
104
- )
105
- parser.add_argument(
106
- "--full-train-dataset",
107
- nargs="+",
108
- default=None,
109
- help="Which dataset will be trained with all the subsets. (train+test)",
110
- )
111
- parser.add_argument(
112
- "--exclude-eval-dataset",
113
- nargs="+",
114
- default=None,
115
- help="Which dataset will be excluded with evaluation",
116
- )
117
- parser.add_argument(
118
- "--datasetinfos",
119
- nargs="+",
120
- default=None,
121
- help="If loading webdataset, spedify the dataset types to load. Can be some of these: train, test, valid, unbalanced_train, balanced_train, eval",
122
- )
123
- parser.add_argument(
124
- "--dataset-proportion",
125
- type=float,
126
- default=1.0,
127
- help="How much proportion of dataset we want to train.",
128
- )
129
- parser.add_argument(
130
- "--remotedata",
131
- default=False,
132
- action="store_true",
133
- help="if the dataset is remote, set this flag",
134
- )
135
- parser.add_argument(
136
- "--class-label-path",
137
- type=str,
138
- default=None,
139
- help="The path of the class label pickle or csv.",
140
- )
141
- parser.add_argument(
142
- "--datasetpath",
143
- type=str,
144
- default="/mnt/audio_clip/webdataset_tar",
145
- help="The path to the dataset",
146
- )
147
- parser.add_argument(
148
- "--logs",
149
- type=str,
150
- default="./logs/",
151
- help="Where to store tensorboard logs. Use None to avoid storing logs.",
152
- )
153
- parser.add_argument(
154
- "--log-local",
155
- action="store_true",
156
- default=False,
157
- help="log files on local master, otherwise global master only.",
158
- )
159
- parser.add_argument(
160
- "--name",
161
- type=str,
162
- default=None,
163
- help="Optional identifier for the experiment when storing logs. Otherwise use current time.",
164
- )
165
- parser.add_argument(
166
- "--workers", type=int, default=1, help="Number of workers per GPU."
167
- )
168
- parser.add_argument(
169
- "--batch-size", type=int, default=64, help="Batch size per GPU."
170
- )
171
- parser.add_argument(
172
- "--epochs", type=int, default=32, help="Number of epochs to train for."
173
- )
174
- parser.add_argument("--lr", type=float, default=None, help="Learning rate.")
175
- parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.")
176
- parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.")
177
- parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.")
178
- parser.add_argument("--momentum", type=float, default=None, help="SGD epsilon.")
179
- parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.")
180
-
181
- parser.add_argument(
182
- "--split-opt",
183
- action="store_true",
184
- default=False,
185
- help="Use this flag to skip the learning rate decay.",
186
- )
187
- parser.add_argument(
188
- "--lr-pretrained", type=float, default=None, help="Learning rate for text."
189
- )
190
- parser.add_argument(
191
- "--beta1-pretrained", type=float, default=None, help="Adam beta 1 for text."
192
- )
193
- parser.add_argument(
194
- "--beta2-pretrained", type=float, default=None, help="Adam beta 2 for text."
195
- )
196
- parser.add_argument(
197
- "--eps-pretrained", type=float, default=None, help="Adam epsilon for text."
198
- )
199
- parser.add_argument(
200
- "--wd-pretrained", type=float, default=0.2, help="Weight decay for text."
201
- )
202
- parser.add_argument(
203
- "--momentum-pretrained", type=float, default=0.9, help="Momentum for text."
204
- )
205
- parser.add_argument(
206
- "--lr-new", type=float, default=None, help="Learning rate for audio."
207
- )
208
- parser.add_argument(
209
- "--beta1-new", type=float, default=None, help="Adam beta 1 for audio."
210
- )
211
- parser.add_argument(
212
- "--beta2-new", type=float, default=None, help="Adam beta 2 for audio."
213
- )
214
- parser.add_argument(
215
- "--eps-new", type=float, default=None, help="Adam epsilon for audio."
216
- )
217
- parser.add_argument(
218
- "--wd-new", type=float, default=0.2, help="Weight decay for audio."
219
- )
220
- parser.add_argument(
221
- "--momentum-new", type=float, default=0.9, help="Momentum for audio."
222
- )
223
- parser.add_argument(
224
- "--warmup", type=int, default=10000, help="Number of steps to warmup for."
225
- )
226
- parser.add_argument(
227
- "--use-bn-sync",
228
- default=False,
229
- action="store_true",
230
- help="Whether to use batch norm sync.",
231
- )
232
- parser.add_argument(
233
- "--skip-scheduler",
234
- action="store_true",
235
- default=False,
236
- help="Use this flag to skip the learning rate decay.",
237
- )
238
- parser.add_argument(
239
- "--save-frequency", type=int, default=1, help="How often to save checkpoints."
240
- )
241
- parser.add_argument(
242
- "--save-top-performance",
243
- type=int,
244
- default=0,
245
- help="Save the top x performance weights if the value >0",
246
- )
247
- parser.add_argument(
248
- "--save-most-recent",
249
- action="store_true",
250
- default=False,
251
- help="Always save the most recent model trained to epoch_latest.pt.",
252
- )
253
- parser.add_argument(
254
- "--zeroshot-frequency", type=int, default=2, help="How often to run zero shot."
255
- )
256
- parser.add_argument(
257
- "--val-frequency",
258
- type=int,
259
- default=1,
260
- help="How often to run evaluation with val data.",
261
- )
262
- parser.add_argument(
263
- "--resume",
264
- default=None,
265
- type=str,
266
- help="path to latest checkpoint (default: none)",
267
- )
268
- parser.add_argument(
269
- "--precision",
270
- choices=["amp", "fp16", "fp32"],
271
- default="amp",
272
- help="Floating point precision.",
273
- )
274
- parser.add_argument(
275
- "--amodel",
276
- type=str,
277
- default="RN50",
278
- help="Name of the audio backbone to use.",
279
- )
280
- parser.add_argument(
281
- "--tmodel",
282
- type=str,
283
- default="transformer",
284
- help="Name of the text backbone to use. Can be [transformer, bert, roberta, bart]",
285
- )
286
- parser.add_argument(
287
- "--pretrained-audio",
288
- default="",
289
- type=str,
290
- help="Use a pretrained audio model weights for the audio encoder of CLAP",
291
- )
292
- parser.add_argument(
293
- "--pretrained-text",
294
- default="",
295
- type=str,
296
- help="Use a pretrained text model weights for the text encoder of CLAP",
297
- )
298
- parser.add_argument(
299
- "--pretrained",
300
- default="",
301
- type=str,
302
- help="Use a pretrained CLIP model weights with the specified tag or file path.",
303
- )
304
- parser.add_argument(
305
- "--pretrained-image",
306
- default=False,
307
- action="store_true",
308
- help="Load imagenet pretrained weights for image tower backbone if available.",
309
- )
310
- parser.add_argument(
311
- "--lock-image",
312
- default=False,
313
- action="store_true",
314
- help="Lock full image tower by disabling gradients.",
315
- )
316
- parser.add_argument(
317
- "--lock-image-unlocked-groups",
318
- type=int,
319
- default=0,
320
- help="Leave last n image tower layer groups unlocked.",
321
- )
322
- parser.add_argument(
323
- "--lock-image-freeze-bn-stats",
324
- default=False,
325
- action="store_true",
326
- help="Freeze BatchNorm running stats in image tower for any locked layers.",
327
- )
328
- parser.add_argument(
329
- "--local-loss",
330
- default=False,
331
- action="store_true",
332
- help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)",
333
- )
334
- parser.add_argument(
335
- "--gather-with-grad",
336
- default=False,
337
- action="store_true",
338
- help="enable full distributed gradient for feature gather",
339
- )
340
- parser.add_argument(
341
- "--force-quick-gelu",
342
- default=False,
343
- action="store_true",
344
- help="Force use of QuickGELU activation for non-OpenAI transformer models.",
345
- )
346
- parser.add_argument(
347
- "--torchscript",
348
- default=False,
349
- action="store_true",
350
- help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'",
351
- )
352
- parser.add_argument(
353
- "--trace",
354
- default=False,
355
- action="store_true",
356
- help="torch.jit.trace the model for inference / eval only",
357
- )
358
- # arguments for distributed training
359
- parser.add_argument(
360
- "--dist-url",
361
- default="env://",
362
- type=str,
363
- help="url used to set up distributed training",
364
- )
365
- parser.add_argument(
366
- "--dist-backend", default="nccl", type=str, help="distributed backend"
367
- )
368
- parser.add_argument(
369
- "--report-to",
370
- default="",
371
- type=str,
372
- help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']",
373
- )
374
- parser.add_argument(
375
- "--wandb-notes", default="", type=str, help="Notes if logging with wandb"
376
- )
377
- parser.add_argument(
378
- "--C", type=float, default=3.16, help="inverse regularizer for logistic reg."
379
- )
380
- parser.add_argument(
381
- "--debug",
382
- default=False,
383
- action="store_true",
384
- help="If true, more information is logged.",
385
- )
386
- parser.add_argument(
387
- "--copy-codebase",
388
- default=False,
389
- action="store_true",
390
- help="If true, we copy the entire base on the log diretory, and execute from there.",
391
- )
392
- parser.add_argument(
393
- "--horovod",
394
- default=False,
395
- action="store_true",
396
- help="Use horovod for distributed training.",
397
- )
398
- parser.add_argument(
399
- "--ddp-static-graph",
400
- default=False,
401
- action="store_true",
402
- help="Enable static graph optimization for DDP in PyTorch >= 1.11.",
403
- )
404
- parser.add_argument(
405
- "--no-set-device-rank",
406
- default=False,
407
- action="store_true",
408
- help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).",
409
- )
410
- parser.add_argument("--seed", type=int, default=4242, help="Default random seed.")
411
-
412
- parser.add_argument(
413
- "--top-k-checkpoint-select-dataset",
414
- type=str,
415
- default="all",
416
- help="The dataset of selecting top-k checkpoint.",
417
- )
418
-
419
- # @R10, @R@5, @R1, mAP@10
420
- parser.add_argument(
421
- "--top-k-checkpoint-select-metric",
422
- type=str,
423
- default="_R@10",
424
- help="The metric for selecting top-k checkpoint.",
425
- )
426
- parser.add_argument(
427
- "--openai-model-cache-dir",
428
- type=str,
429
- default="~/.cache/clip",
430
- help="Directory to download OpenAI models.",
431
- )
432
- parser.add_argument(
433
- "--optimizer",
434
- type=str,
435
- default="adamw",
436
- help="can be AdamW or SGD",
437
- )
438
- parser.add_argument(
439
- "--parallel-eval",
440
- default=False,
441
- action="store_true",
442
- help="Eval in parallel (multi-GPU, multi-node).",
443
- )
444
-
445
- parser.add_argument(
446
- "--no-eval",
447
- default=False,
448
- action="store_true",
449
- help="Training without evaluation.",
450
- )
451
-
452
- parser.add_argument(
453
- "--lp-mlp",
454
- default=False,
455
- action="store_true",
456
- help="Linear Probe using MLP layer or not.",
457
- )
458
-
459
- parser.add_argument(
460
- "--lp-freeze",
461
- default=False,
462
- action="store_true",
463
- help="Linear Probe using Freeze CLAP or not",
464
- )
465
-
466
- parser.add_argument(
467
- "--lp-act",
468
- default="None",
469
- type=str,
470
- help="Options are ['relu','elu','prelu','softmax','sigmoid']",
471
- )
472
-
473
- parser.add_argument(
474
- "--lp-loss", type=str, default="bce", help="Loss func of Linear Probe."
475
- )
476
-
477
- parser.add_argument(
478
- "--lp-metrics",
479
- type=str,
480
- default="map,mauc,acc",
481
- help="Metrics of Linear Probe.",
482
- )
483
-
484
- parser.add_argument(
485
- "--lp-lr", type=float, default=1e-4, help="learning rate of linear probe"
486
- )
487
- parser.add_argument(
488
- "--kappa",
489
- type=float,
490
- default=0,
491
- help="the kappa in the weighted contrastive loss, default is to turn off the weighted contrastive loss",
492
- )
493
-
494
- parser.add_argument(
495
- "--data-filling",
496
- type=str,
497
- default="pad",
498
- help="type of data filling when the audio length is shorter than the max length."
499
- "Can be one of the following: repeat, repeatpad, pad",
500
- )
501
- parser.add_argument(
502
- "--data-truncating",
503
- type=str,
504
- default="rand_trunc",
505
- help="type of data truncation when the audio length is longer than the max length."
506
- "Can be one of the following: rand_trunc, fusion",
507
- )
508
-
509
- parser.add_argument(
510
- "--clap-mlploss",
511
- default=False,
512
- action="store_true",
513
- help="Using MLP loss for CLAP model or not",
514
- )
515
-
516
- parser.add_argument(
517
- "--wandb-id",
518
- type=str,
519
- default=None,
520
- help="the id of wandb experiment to restore.",
521
- )
522
-
523
- parser.add_argument(
524
- "--sleep", type=float, default=0, help="sleep n seconds before start training"
525
- )
526
-
527
- # variable length processing
528
- parser.add_argument(
529
- "--enable-fusion",
530
- default=False,
531
- action="store_true",
532
- help="Enable feature funsion for variable-length data",
533
- )
534
-
535
- parser.add_argument(
536
- "--fusion-type",
537
- type=str,
538
- default="None",
539
- help="Type is among ['channel_map', 'daf_1d','aff_1d','iaff_1d','daf_2d','aff_2d','iaff_2d']",
540
- )
541
-
542
- parser.add_argument(
543
- "--mixup",
544
- default=False,
545
- action="store_true",
546
- help="Enable mixup in finetuning training.",
547
- )
548
- parser.add_argument(
549
- "--text-augment-selection",
550
- type=str,
551
- default=None,
552
- help="For selecting levels of augmented text. Type is among ['all', 'augment_only', 'none']",
553
- )
554
-
555
- args = parser.parse_args()
556
-
557
- # If some params are not passed, we use the default values based on model name.
558
- default_params = get_default_params(args.amodel)
559
- for name, val in default_params.items():
560
- if getattr(args, name) is None:
561
- setattr(args, name, val)
562
-
563
- return args
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AutoGeneralAI/ChatGPT/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ChatGPT
3
- emoji: 🐢
4
- colorFrom: yellow
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_roi_pooler.py DELETED
@@ -1,165 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import unittest
4
- import torch
5
-
6
- from detectron2.modeling.poolers import ROIPooler
7
- from detectron2.structures import Boxes, RotatedBoxes
8
- from detectron2.utils.testing import random_boxes
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
-
13
- class TestROIPooler(unittest.TestCase):
14
- def _test_roialignv2_roialignrotated_match(self, device):
15
- pooler_resolution = 14
16
- canonical_level = 4
17
- canonical_scale_factor = 2 ** canonical_level
18
- pooler_scales = (1.0 / canonical_scale_factor,)
19
- sampling_ratio = 0
20
-
21
- N, C, H, W = 2, 4, 10, 8
22
- N_rois = 10
23
- std = 11
24
- mean = 0
25
- feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
26
-
27
- features = [feature.to(device)]
28
-
29
- rois = []
30
- rois_rotated = []
31
- for _ in range(N):
32
- boxes = random_boxes(N_rois, W * canonical_scale_factor)
33
- rotated_boxes = torch.zeros(N_rois, 5)
34
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
35
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
36
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
37
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
38
- rois.append(Boxes(boxes).to(device))
39
- rois_rotated.append(RotatedBoxes(rotated_boxes).to(device))
40
-
41
- roialignv2_pooler = ROIPooler(
42
- output_size=pooler_resolution,
43
- scales=pooler_scales,
44
- sampling_ratio=sampling_ratio,
45
- pooler_type="ROIAlignV2",
46
- )
47
-
48
- roialignv2_out = roialignv2_pooler(features, rois)
49
-
50
- roialignrotated_pooler = ROIPooler(
51
- output_size=pooler_resolution,
52
- scales=pooler_scales,
53
- sampling_ratio=sampling_ratio,
54
- pooler_type="ROIAlignRotated",
55
- )
56
-
57
- roialignrotated_out = roialignrotated_pooler(features, rois_rotated)
58
-
59
- self.assertTrue(torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4))
60
-
61
- def test_roialignv2_roialignrotated_match_cpu(self):
62
- self._test_roialignv2_roialignrotated_match(device="cpu")
63
-
64
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
65
- def test_roialignv2_roialignrotated_match_cuda(self):
66
- self._test_roialignv2_roialignrotated_match(device="cuda")
67
-
68
- def _test_scriptability(self, device):
69
- pooler_resolution = 14
70
- canonical_level = 4
71
- canonical_scale_factor = 2 ** canonical_level
72
- pooler_scales = (1.0 / canonical_scale_factor,)
73
- sampling_ratio = 0
74
-
75
- N, C, H, W = 2, 4, 10, 8
76
- N_rois = 10
77
- std = 11
78
- mean = 0
79
- feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
80
-
81
- features = [feature.to(device)]
82
-
83
- rois = []
84
- for _ in range(N):
85
- boxes = random_boxes(N_rois, W * canonical_scale_factor)
86
-
87
- rois.append(Boxes(boxes).to(device))
88
-
89
- roialignv2_pooler = ROIPooler(
90
- output_size=pooler_resolution,
91
- scales=pooler_scales,
92
- sampling_ratio=sampling_ratio,
93
- pooler_type="ROIAlignV2",
94
- )
95
-
96
- roialignv2_out = roialignv2_pooler(features, rois)
97
- scripted_roialignv2_out = torch.jit.script(roialignv2_pooler)(features, rois)
98
- self.assertTrue(torch.equal(roialignv2_out, scripted_roialignv2_out))
99
-
100
- def test_scriptability_cpu(self):
101
- self._test_scriptability(device="cpu")
102
-
103
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
104
- def test_scriptability_gpu(self):
105
- self._test_scriptability(device="cuda")
106
-
107
- def test_no_images(self):
108
- N, C, H, W = 0, 32, 32, 32
109
- feature = torch.rand(N, C, H, W) - 0.5
110
- features = [feature]
111
- pooler = ROIPooler(
112
- output_size=14, scales=(1.0,), sampling_ratio=0.0, pooler_type="ROIAlignV2"
113
- )
114
- output = pooler.forward(features, [])
115
- self.assertEqual(output.shape, (0, C, 14, 14))
116
-
117
- def test_roi_pooler_tracing(self):
118
- class Model(torch.nn.Module):
119
- def __init__(self, roi):
120
- super(Model, self).__init__()
121
- self.roi = roi
122
-
123
- def forward(self, x, boxes):
124
- return self.roi(x, [Boxes(boxes)])
125
-
126
- pooler_resolution = 14
127
- canonical_level = 4
128
- canonical_scale_factor = 2 ** canonical_level
129
- pooler_scales = (1.0 / canonical_scale_factor, 0.5 / canonical_scale_factor)
130
- sampling_ratio = 0
131
-
132
- N, C, H, W = 1, 4, 10, 8
133
- N_rois = 10
134
- std = 11
135
- mean = 0
136
- feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean
137
- feature = [feature, feature]
138
-
139
- rois = random_boxes(N_rois, W * canonical_scale_factor)
140
- # Add one larger box so that this level has only one box.
141
- # This may trigger the bug https://github.com/pytorch/pytorch/issues/49852
142
- # that we shall workaround.
143
- rois = torch.cat([rois, torch.tensor([[0, 0, 448, 448]])])
144
-
145
- model = Model(
146
- ROIPooler(
147
- output_size=pooler_resolution,
148
- scales=pooler_scales,
149
- sampling_ratio=sampling_ratio,
150
- pooler_type="ROIAlign",
151
- )
152
- )
153
-
154
- with torch.no_grad():
155
- func = torch.jit.trace(model, (feature, rois))
156
- o = func(feature, rois)
157
- self.assertEqual(o.shape, (11, 4, 14, 14))
158
- o = func(feature, rois[:5])
159
- self.assertEqual(o.shape, (5, 4, 14, 14))
160
- o = func(feature, random_boxes(20, W * canonical_scale_factor))
161
- self.assertEqual(o.shape, (20, 4, 14, 14))
162
-
163
-
164
- if __name__ == "__main__":
165
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ayush113/cricket_matchups/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Cricket Matchups
3
- emoji: 🏃
4
- colorFrom: purple
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.46.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/Fixes/local_fixes.py DELETED
@@ -1,136 +0,0 @@
1
- import os
2
- import sys
3
- import time
4
- import shutil
5
- import requests
6
- import zipfile
7
-
8
- def insert_new_line(file_name, line_to_find, text_to_insert):
9
- lines = []
10
- with open(file_name, 'r', encoding='utf-8') as read_obj:
11
- lines = read_obj.readlines()
12
- already_exists = False
13
- with open(file_name + '.tmp', 'w', encoding='utf-8') as write_obj:
14
- for i in range(len(lines)):
15
- write_obj.write(lines[i])
16
- if lines[i].strip() == line_to_find:
17
- # If next line exists and starts with sys.path.append, skip
18
- if i+1 < len(lines) and lines[i+1].strip().startswith("sys.path.append"):
19
- print('It was already fixed! Skip adding a line...')
20
- already_exists = True
21
- break
22
- else:
23
- write_obj.write(text_to_insert + '\n')
24
- # If no existing sys.path.append line was found, replace the original file
25
- if not already_exists:
26
- os.replace(file_name + '.tmp', file_name)
27
- return True
28
- else:
29
- # If existing line was found, delete temporary file
30
- os.remove(file_name + '.tmp')
31
- return False
32
-
33
- def replace_in_file(file_name, old_text, new_text):
34
- with open(file_name, 'r', encoding='utf-8') as file:
35
- file_contents = file.read()
36
-
37
- if old_text in file_contents:
38
- file_contents = file_contents.replace(old_text, new_text)
39
- with open(file_name, 'w', encoding='utf-8') as file:
40
- file.write(file_contents)
41
- return True
42
-
43
- return False
44
-
45
- if __name__ == "__main__":
46
- current_path = os.getcwd()
47
- file_name = os.path.join(current_path, "infer", "modules", "train", "extract", "extract_f0_print.py")
48
- line_to_find = 'import numpy as np, logging'
49
- text_to_insert = "sys.path.append(r'" + current_path + "')"
50
-
51
-
52
- success_1 = insert_new_line(file_name, line_to_find, text_to_insert)
53
- if success_1:
54
- print('The first operation was successful!')
55
- else:
56
- print('He skipped the first operation because it was already fixed!')
57
-
58
- file_name = 'infer-web.py'
59
- old_text = 'with gr.Blocks(theme=gr.themes.Soft()) as app:'
60
- new_text = 'with gr.Blocks() as app:'
61
-
62
- success_2 = replace_in_file(file_name, old_text, new_text)
63
- if success_2:
64
- print('The second operation was successful!')
65
- else:
66
- print('The second operation was omitted because it was already fixed!')
67
-
68
- print('Local corrections successful! You should now be able to infer and train locally in Applio RVC Fork.')
69
-
70
- time.sleep(5)
71
-
72
- def find_torchcrepe_directory(directory):
73
- """
74
- Recursively searches for the topmost folder named 'torchcrepe' within a directory.
75
- Returns the path of the directory found or None if none is found.
76
- """
77
- for root, dirs, files in os.walk(directory):
78
- if 'torchcrepe' in dirs:
79
- return os.path.join(root, 'torchcrepe')
80
- return None
81
-
82
- def download_and_extract_torchcrepe():
83
- url = 'https://github.com/maxrmorrison/torchcrepe/archive/refs/heads/master.zip'
84
- temp_dir = 'temp_torchcrepe'
85
- destination_dir = os.getcwd()
86
-
87
- try:
88
- torchcrepe_dir_path = os.path.join(destination_dir, 'torchcrepe')
89
-
90
- if os.path.exists(torchcrepe_dir_path):
91
- print("Skipping the torchcrepe download. The folder already exists.")
92
- return
93
-
94
- # Download the file
95
- print("Starting torchcrepe download...")
96
- response = requests.get(url)
97
-
98
- # Raise an error if the GET request was unsuccessful
99
- response.raise_for_status()
100
- print("Download completed.")
101
-
102
- # Save the downloaded file
103
- zip_file_path = os.path.join(temp_dir, 'master.zip')
104
- os.makedirs(temp_dir, exist_ok=True)
105
- with open(zip_file_path, 'wb') as file:
106
- file.write(response.content)
107
- print(f"Zip file saved to {zip_file_path}")
108
-
109
- # Extract the zip file
110
- print("Extracting content...")
111
- with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
112
- zip_file.extractall(temp_dir)
113
- print("Extraction completed.")
114
-
115
- # Locate the torchcrepe folder and move it to the destination directory
116
- torchcrepe_dir = find_torchcrepe_directory(temp_dir)
117
- if torchcrepe_dir:
118
- shutil.move(torchcrepe_dir, destination_dir)
119
- print(f"Moved the torchcrepe directory to {destination_dir}!")
120
- else:
121
- print("The torchcrepe directory could not be located.")
122
-
123
- except Exception as e:
124
- print("Torchcrepe not successfully downloaded", e)
125
-
126
- # Clean up temporary directory
127
- if os.path.exists(temp_dir):
128
- shutil.rmtree(temp_dir)
129
-
130
- # Run the function
131
- download_and_extract_torchcrepe()
132
-
133
- temp_dir = 'temp_torchcrepe'
134
-
135
- if os.path.exists(temp_dir):
136
- shutil.rmtree(temp_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/base.py DELETED
@@ -1,688 +0,0 @@
1
- import csv
2
- import email.message
3
- import functools
4
- import json
5
- import logging
6
- import pathlib
7
- import re
8
- import zipfile
9
- from typing import (
10
- IO,
11
- TYPE_CHECKING,
12
- Any,
13
- Collection,
14
- Container,
15
- Dict,
16
- Iterable,
17
- Iterator,
18
- List,
19
- NamedTuple,
20
- Optional,
21
- Tuple,
22
- Union,
23
- )
24
-
25
- from pip._vendor.packaging.requirements import Requirement
26
- from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet
27
- from pip._vendor.packaging.utils import NormalizedName
28
- from pip._vendor.packaging.version import LegacyVersion, Version
29
-
30
- from pip._internal.exceptions import NoneMetadataError
31
- from pip._internal.locations import site_packages, user_site
32
- from pip._internal.models.direct_url import (
33
- DIRECT_URL_METADATA_NAME,
34
- DirectUrl,
35
- DirectUrlValidationError,
36
- )
37
- from pip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here.
38
- from pip._internal.utils.egg_link import egg_link_path_from_sys_path
39
- from pip._internal.utils.misc import is_local, normalize_path
40
- from pip._internal.utils.packaging import safe_extra
41
- from pip._internal.utils.urls import url_to_path
42
-
43
- from ._json import msg_to_json
44
-
45
- if TYPE_CHECKING:
46
- from typing import Protocol
47
- else:
48
- Protocol = object
49
-
50
- DistributionVersion = Union[LegacyVersion, Version]
51
-
52
- InfoPath = Union[str, pathlib.PurePath]
53
-
54
- logger = logging.getLogger(__name__)
55
-
56
-
57
- class BaseEntryPoint(Protocol):
58
- @property
59
- def name(self) -> str:
60
- raise NotImplementedError()
61
-
62
- @property
63
- def value(self) -> str:
64
- raise NotImplementedError()
65
-
66
- @property
67
- def group(self) -> str:
68
- raise NotImplementedError()
69
-
70
-
71
- def _convert_installed_files_path(
72
- entry: Tuple[str, ...],
73
- info: Tuple[str, ...],
74
- ) -> str:
75
- """Convert a legacy installed-files.txt path into modern RECORD path.
76
-
77
- The legacy format stores paths relative to the info directory, while the
78
- modern format stores paths relative to the package root, e.g. the
79
- site-packages directory.
80
-
81
- :param entry: Path parts of the installed-files.txt entry.
82
- :param info: Path parts of the egg-info directory relative to package root.
83
- :returns: The converted entry.
84
-
85
- For best compatibility with symlinks, this does not use ``abspath()`` or
86
- ``Path.resolve()``, but tries to work with path parts:
87
-
88
- 1. While ``entry`` starts with ``..``, remove the equal amounts of parts
89
- from ``info``; if ``info`` is empty, start appending ``..`` instead.
90
- 2. Join the two directly.
91
- """
92
- while entry and entry[0] == "..":
93
- if not info or info[-1] == "..":
94
- info += ("..",)
95
- else:
96
- info = info[:-1]
97
- entry = entry[1:]
98
- return str(pathlib.Path(*info, *entry))
99
-
100
-
101
- class RequiresEntry(NamedTuple):
102
- requirement: str
103
- extra: str
104
- marker: str
105
-
106
-
107
- class BaseDistribution(Protocol):
108
- @classmethod
109
- def from_directory(cls, directory: str) -> "BaseDistribution":
110
- """Load the distribution from a metadata directory.
111
-
112
- :param directory: Path to a metadata directory, e.g. ``.dist-info``.
113
- """
114
- raise NotImplementedError()
115
-
116
- @classmethod
117
- def from_metadata_file_contents(
118
- cls,
119
- metadata_contents: bytes,
120
- filename: str,
121
- project_name: str,
122
- ) -> "BaseDistribution":
123
- """Load the distribution from the contents of a METADATA file.
124
-
125
- This is used to implement PEP 658 by generating a "shallow" dist object that can
126
- be used for resolution without downloading or building the actual dist yet.
127
-
128
- :param metadata_contents: The contents of a METADATA file.
129
- :param filename: File name for the dist with this metadata.
130
- :param project_name: Name of the project this dist represents.
131
- """
132
- raise NotImplementedError()
133
-
134
- @classmethod
135
- def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution":
136
- """Load the distribution from a given wheel.
137
-
138
- :param wheel: A concrete wheel definition.
139
- :param name: File name of the wheel.
140
-
141
- :raises InvalidWheel: Whenever loading of the wheel causes a
142
- :py:exc:`zipfile.BadZipFile` exception to be thrown.
143
- :raises UnsupportedWheel: If the wheel is a valid zip, but malformed
144
- internally.
145
- """
146
- raise NotImplementedError()
147
-
148
- def __repr__(self) -> str:
149
- return f"{self.raw_name} {self.version} ({self.location})"
150
-
151
- def __str__(self) -> str:
152
- return f"{self.raw_name} {self.version}"
153
-
154
- @property
155
- def location(self) -> Optional[str]:
156
- """Where the distribution is loaded from.
157
-
158
- A string value is not necessarily a filesystem path, since distributions
159
- can be loaded from other sources, e.g. arbitrary zip archives. ``None``
160
- means the distribution is created in-memory.
161
-
162
- Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
163
- this is a symbolic link, we want to preserve the relative path between
164
- it and files in the distribution.
165
- """
166
- raise NotImplementedError()
167
-
168
- @property
169
- def editable_project_location(self) -> Optional[str]:
170
- """The project location for editable distributions.
171
-
172
- This is the directory where pyproject.toml or setup.py is located.
173
- None if the distribution is not installed in editable mode.
174
- """
175
- # TODO: this property is relatively costly to compute, memoize it ?
176
- direct_url = self.direct_url
177
- if direct_url:
178
- if direct_url.is_local_editable():
179
- return url_to_path(direct_url.url)
180
- else:
181
- # Search for an .egg-link file by walking sys.path, as it was
182
- # done before by dist_is_editable().
183
- egg_link_path = egg_link_path_from_sys_path(self.raw_name)
184
- if egg_link_path:
185
- # TODO: get project location from second line of egg_link file
186
- # (https://github.com/pypa/pip/issues/10243)
187
- return self.location
188
- return None
189
-
190
- @property
191
- def installed_location(self) -> Optional[str]:
192
- """The distribution's "installed" location.
193
-
194
- This should generally be a ``site-packages`` directory. This is
195
- usually ``dist.location``, except for legacy develop-installed packages,
196
- where ``dist.location`` is the source code location, and this is where
197
- the ``.egg-link`` file is.
198
-
199
- The returned location is normalized (in particular, with symlinks removed).
200
- """
201
- raise NotImplementedError()
202
-
203
- @property
204
- def info_location(self) -> Optional[str]:
205
- """Location of the .[egg|dist]-info directory or file.
206
-
207
- Similarly to ``location``, a string value is not necessarily a
208
- filesystem path. ``None`` means the distribution is created in-memory.
209
-
210
- For a modern .dist-info installation on disk, this should be something
211
- like ``{location}/{raw_name}-{version}.dist-info``.
212
-
213
- Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
214
- this is a symbolic link, we want to preserve the relative path between
215
- it and other files in the distribution.
216
- """
217
- raise NotImplementedError()
218
-
219
- @property
220
- def installed_by_distutils(self) -> bool:
221
- """Whether this distribution is installed with legacy distutils format.
222
-
223
- A distribution installed with "raw" distutils not patched by setuptools
224
- uses one single file at ``info_location`` to store metadata. We need to
225
- treat this specially on uninstallation.
226
- """
227
- info_location = self.info_location
228
- if not info_location:
229
- return False
230
- return pathlib.Path(info_location).is_file()
231
-
232
- @property
233
- def installed_as_egg(self) -> bool:
234
- """Whether this distribution is installed as an egg.
235
-
236
- This usually indicates the distribution was installed by (older versions
237
- of) easy_install.
238
- """
239
- location = self.location
240
- if not location:
241
- return False
242
- return location.endswith(".egg")
243
-
244
- @property
245
- def installed_with_setuptools_egg_info(self) -> bool:
246
- """Whether this distribution is installed with the ``.egg-info`` format.
247
-
248
- This usually indicates the distribution was installed with setuptools
249
- with an old pip version or with ``single-version-externally-managed``.
250
-
251
- Note that this ensure the metadata store is a directory. distutils can
252
- also installs an ``.egg-info``, but as a file, not a directory. This
253
- property is *False* for that case. Also see ``installed_by_distutils``.
254
- """
255
- info_location = self.info_location
256
- if not info_location:
257
- return False
258
- if not info_location.endswith(".egg-info"):
259
- return False
260
- return pathlib.Path(info_location).is_dir()
261
-
262
- @property
263
- def installed_with_dist_info(self) -> bool:
264
- """Whether this distribution is installed with the "modern format".
265
-
266
- This indicates a "modern" installation, e.g. storing metadata in the
267
- ``.dist-info`` directory. This applies to installations made by
268
- setuptools (but through pip, not directly), or anything using the
269
- standardized build backend interface (PEP 517).
270
- """
271
- info_location = self.info_location
272
- if not info_location:
273
- return False
274
- if not info_location.endswith(".dist-info"):
275
- return False
276
- return pathlib.Path(info_location).is_dir()
277
-
278
- @property
279
- def canonical_name(self) -> NormalizedName:
280
- raise NotImplementedError()
281
-
282
- @property
283
- def version(self) -> DistributionVersion:
284
- raise NotImplementedError()
285
-
286
- @property
287
- def setuptools_filename(self) -> str:
288
- """Convert a project name to its setuptools-compatible filename.
289
-
290
- This is a copy of ``pkg_resources.to_filename()`` for compatibility.
291
- """
292
- return self.raw_name.replace("-", "_")
293
-
294
- @property
295
- def direct_url(self) -> Optional[DirectUrl]:
296
- """Obtain a DirectUrl from this distribution.
297
-
298
- Returns None if the distribution has no `direct_url.json` metadata,
299
- or if `direct_url.json` is invalid.
300
- """
301
- try:
302
- content = self.read_text(DIRECT_URL_METADATA_NAME)
303
- except FileNotFoundError:
304
- return None
305
- try:
306
- return DirectUrl.from_json(content)
307
- except (
308
- UnicodeDecodeError,
309
- json.JSONDecodeError,
310
- DirectUrlValidationError,
311
- ) as e:
312
- logger.warning(
313
- "Error parsing %s for %s: %s",
314
- DIRECT_URL_METADATA_NAME,
315
- self.canonical_name,
316
- e,
317
- )
318
- return None
319
-
320
- @property
321
- def installer(self) -> str:
322
- try:
323
- installer_text = self.read_text("INSTALLER")
324
- except (OSError, ValueError, NoneMetadataError):
325
- return "" # Fail silently if the installer file cannot be read.
326
- for line in installer_text.splitlines():
327
- cleaned_line = line.strip()
328
- if cleaned_line:
329
- return cleaned_line
330
- return ""
331
-
332
- @property
333
- def requested(self) -> bool:
334
- return self.is_file("REQUESTED")
335
-
336
- @property
337
- def editable(self) -> bool:
338
- return bool(self.editable_project_location)
339
-
340
- @property
341
- def local(self) -> bool:
342
- """If distribution is installed in the current virtual environment.
343
-
344
- Always True if we're not in a virtualenv.
345
- """
346
- if self.installed_location is None:
347
- return False
348
- return is_local(self.installed_location)
349
-
350
- @property
351
- def in_usersite(self) -> bool:
352
- if self.installed_location is None or user_site is None:
353
- return False
354
- return self.installed_location.startswith(normalize_path(user_site))
355
-
356
- @property
357
- def in_site_packages(self) -> bool:
358
- if self.installed_location is None or site_packages is None:
359
- return False
360
- return self.installed_location.startswith(normalize_path(site_packages))
361
-
362
- def is_file(self, path: InfoPath) -> bool:
363
- """Check whether an entry in the info directory is a file."""
364
- raise NotImplementedError()
365
-
366
- def iter_distutils_script_names(self) -> Iterator[str]:
367
- """Find distutils 'scripts' entries metadata.
368
-
369
- If 'scripts' is supplied in ``setup.py``, distutils records those in the
370
- installed distribution's ``scripts`` directory, a file for each script.
371
- """
372
- raise NotImplementedError()
373
-
374
- def read_text(self, path: InfoPath) -> str:
375
- """Read a file in the info directory.
376
-
377
- :raise FileNotFoundError: If ``path`` does not exist in the directory.
378
- :raise NoneMetadataError: If ``path`` exists in the info directory, but
379
- cannot be read.
380
- """
381
- raise NotImplementedError()
382
-
383
- def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
384
- raise NotImplementedError()
385
-
386
- def _metadata_impl(self) -> email.message.Message:
387
- raise NotImplementedError()
388
-
389
- @functools.lru_cache(maxsize=1)
390
- def _metadata_cached(self) -> email.message.Message:
391
- # When we drop python 3.7 support, move this to the metadata property and use
392
- # functools.cached_property instead of lru_cache.
393
- metadata = self._metadata_impl()
394
- self._add_egg_info_requires(metadata)
395
- return metadata
396
-
397
- @property
398
- def metadata(self) -> email.message.Message:
399
- """Metadata of distribution parsed from e.g. METADATA or PKG-INFO.
400
-
401
- This should return an empty message if the metadata file is unavailable.
402
-
403
- :raises NoneMetadataError: If the metadata file is available, but does
404
- not contain valid metadata.
405
- """
406
- return self._metadata_cached()
407
-
408
- @property
409
- def metadata_dict(self) -> Dict[str, Any]:
410
- """PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO.
411
-
412
- This should return an empty dict if the metadata file is unavailable.
413
-
414
- :raises NoneMetadataError: If the metadata file is available, but does
415
- not contain valid metadata.
416
- """
417
- return msg_to_json(self.metadata)
418
-
419
- @property
420
- def metadata_version(self) -> Optional[str]:
421
- """Value of "Metadata-Version:" in distribution metadata, if available."""
422
- return self.metadata.get("Metadata-Version")
423
-
424
- @property
425
- def raw_name(self) -> str:
426
- """Value of "Name:" in distribution metadata."""
427
- # The metadata should NEVER be missing the Name: key, but if it somehow
428
- # does, fall back to the known canonical name.
429
- return self.metadata.get("Name", self.canonical_name)
430
-
431
- @property
432
- def requires_python(self) -> SpecifierSet:
433
- """Value of "Requires-Python:" in distribution metadata.
434
-
435
- If the key does not exist or contains an invalid value, an empty
436
- SpecifierSet should be returned.
437
- """
438
- value = self.metadata.get("Requires-Python")
439
- if value is None:
440
- return SpecifierSet()
441
- try:
442
- # Convert to str to satisfy the type checker; this can be a Header object.
443
- spec = SpecifierSet(str(value))
444
- except InvalidSpecifier as e:
445
- message = "Package %r has an invalid Requires-Python: %s"
446
- logger.warning(message, self.raw_name, e)
447
- return SpecifierSet()
448
- return spec
449
-
450
- def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
451
- """Dependencies of this distribution.
452
-
453
- For modern .dist-info distributions, this is the collection of
454
- "Requires-Dist:" entries in distribution metadata.
455
- """
456
- raise NotImplementedError()
457
-
458
- def iter_provided_extras(self) -> Iterable[str]:
459
- """Extras provided by this distribution.
460
-
461
- For modern .dist-info distributions, this is the collection of
462
- "Provides-Extra:" entries in distribution metadata.
463
- """
464
- raise NotImplementedError()
465
-
466
- def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]:
467
- try:
468
- text = self.read_text("RECORD")
469
- except FileNotFoundError:
470
- return None
471
- # This extra Path-str cast normalizes entries.
472
- return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines()))
473
-
474
- def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]:
475
- try:
476
- text = self.read_text("installed-files.txt")
477
- except FileNotFoundError:
478
- return None
479
- paths = (p for p in text.splitlines(keepends=False) if p)
480
- root = self.location
481
- info = self.info_location
482
- if root is None or info is None:
483
- return paths
484
- try:
485
- info_rel = pathlib.Path(info).relative_to(root)
486
- except ValueError: # info is not relative to root.
487
- return paths
488
- if not info_rel.parts: # info *is* root.
489
- return paths
490
- return (
491
- _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts)
492
- for p in paths
493
- )
494
-
495
- def iter_declared_entries(self) -> Optional[Iterator[str]]:
496
- """Iterate through file entries declared in this distribution.
497
-
498
- For modern .dist-info distributions, this is the files listed in the
499
- ``RECORD`` metadata file. For legacy setuptools distributions, this
500
- comes from ``installed-files.txt``, with entries normalized to be
501
- compatible with the format used by ``RECORD``.
502
-
503
- :return: An iterator for listed entries, or None if the distribution
504
- contains neither ``RECORD`` nor ``installed-files.txt``.
505
- """
506
- return (
507
- self._iter_declared_entries_from_record()
508
- or self._iter_declared_entries_from_legacy()
509
- )
510
-
511
- def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]:
512
- """Parse a ``requires.txt`` in an egg-info directory.
513
-
514
- This is an INI-ish format where an egg-info stores dependencies. A
515
- section name describes extra other environment markers, while each entry
516
- is an arbitrary string (not a key-value pair) representing a dependency
517
- as a requirement string (no markers).
518
-
519
- There is a construct in ``importlib.metadata`` called ``Sectioned`` that
520
- does mostly the same, but the format is currently considered private.
521
- """
522
- try:
523
- content = self.read_text("requires.txt")
524
- except FileNotFoundError:
525
- return
526
- extra = marker = "" # Section-less entries don't have markers.
527
- for line in content.splitlines():
528
- line = line.strip()
529
- if not line or line.startswith("#"): # Comment; ignored.
530
- continue
531
- if line.startswith("[") and line.endswith("]"): # A section header.
532
- extra, _, marker = line.strip("[]").partition(":")
533
- continue
534
- yield RequiresEntry(requirement=line, extra=extra, marker=marker)
535
-
536
- def _iter_egg_info_extras(self) -> Iterable[str]:
537
- """Get extras from the egg-info directory."""
538
- known_extras = {""}
539
- for entry in self._iter_requires_txt_entries():
540
- if entry.extra in known_extras:
541
- continue
542
- known_extras.add(entry.extra)
543
- yield entry.extra
544
-
545
- def _iter_egg_info_dependencies(self) -> Iterable[str]:
546
- """Get distribution dependencies from the egg-info directory.
547
-
548
- To ease parsing, this converts a legacy dependency entry into a PEP 508
549
- requirement string. Like ``_iter_requires_txt_entries()``, there is code
550
- in ``importlib.metadata`` that does mostly the same, but not do exactly
551
- what we need.
552
-
553
- Namely, ``importlib.metadata`` does not normalize the extra name before
554
- putting it into the requirement string, which causes marker comparison
555
- to fail because the dist-info format do normalize. This is consistent in
556
- all currently available PEP 517 backends, although not standardized.
557
- """
558
- for entry in self._iter_requires_txt_entries():
559
- if entry.extra and entry.marker:
560
- marker = f'({entry.marker}) and extra == "{safe_extra(entry.extra)}"'
561
- elif entry.extra:
562
- marker = f'extra == "{safe_extra(entry.extra)}"'
563
- elif entry.marker:
564
- marker = entry.marker
565
- else:
566
- marker = ""
567
- if marker:
568
- yield f"{entry.requirement} ; {marker}"
569
- else:
570
- yield entry.requirement
571
-
572
- def _add_egg_info_requires(self, metadata: email.message.Message) -> None:
573
- """Add egg-info requires.txt information to the metadata."""
574
- if not metadata.get_all("Requires-Dist"):
575
- for dep in self._iter_egg_info_dependencies():
576
- metadata["Requires-Dist"] = dep
577
- if not metadata.get_all("Provides-Extra"):
578
- for extra in self._iter_egg_info_extras():
579
- metadata["Provides-Extra"] = extra
580
-
581
-
582
- class BaseEnvironment:
583
- """An environment containing distributions to introspect."""
584
-
585
- @classmethod
586
- def default(cls) -> "BaseEnvironment":
587
- raise NotImplementedError()
588
-
589
- @classmethod
590
- def from_paths(cls, paths: Optional[List[str]]) -> "BaseEnvironment":
591
- raise NotImplementedError()
592
-
593
- def get_distribution(self, name: str) -> Optional["BaseDistribution"]:
594
- """Given a requirement name, return the installed distributions.
595
-
596
- The name may not be normalized. The implementation must canonicalize
597
- it for lookup.
598
- """
599
- raise NotImplementedError()
600
-
601
- def _iter_distributions(self) -> Iterator["BaseDistribution"]:
602
- """Iterate through installed distributions.
603
-
604
- This function should be implemented by subclass, but never called
605
- directly. Use the public ``iter_distribution()`` instead, which
606
- implements additional logic to make sure the distributions are valid.
607
- """
608
- raise NotImplementedError()
609
-
610
- def iter_all_distributions(self) -> Iterator[BaseDistribution]:
611
- """Iterate through all installed distributions without any filtering."""
612
- for dist in self._iter_distributions():
613
- # Make sure the distribution actually comes from a valid Python
614
- # packaging distribution. Pip's AdjacentTempDirectory leaves folders
615
- # e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The
616
- # valid project name pattern is taken from PEP 508.
617
- project_name_valid = re.match(
618
- r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$",
619
- dist.canonical_name,
620
- flags=re.IGNORECASE,
621
- )
622
- if not project_name_valid:
623
- logger.warning(
624
- "Ignoring invalid distribution %s (%s)",
625
- dist.canonical_name,
626
- dist.location,
627
- )
628
- continue
629
- yield dist
630
-
631
- def iter_installed_distributions(
632
- self,
633
- local_only: bool = True,
634
- skip: Container[str] = stdlib_pkgs,
635
- include_editables: bool = True,
636
- editables_only: bool = False,
637
- user_only: bool = False,
638
- ) -> Iterator[BaseDistribution]:
639
- """Return a list of installed distributions.
640
-
641
- This is based on ``iter_all_distributions()`` with additional filtering
642
- options. Note that ``iter_installed_distributions()`` without arguments
643
- is *not* equal to ``iter_all_distributions()``, since some of the
644
- configurations exclude packages by default.
645
-
646
- :param local_only: If True (default), only return installations
647
- local to the current virtualenv, if in a virtualenv.
648
- :param skip: An iterable of canonicalized project names to ignore;
649
- defaults to ``stdlib_pkgs``.
650
- :param include_editables: If False, don't report editables.
651
- :param editables_only: If True, only report editables.
652
- :param user_only: If True, only report installations in the user
653
- site directory.
654
- """
655
- it = self.iter_all_distributions()
656
- if local_only:
657
- it = (d for d in it if d.local)
658
- if not include_editables:
659
- it = (d for d in it if not d.editable)
660
- if editables_only:
661
- it = (d for d in it if d.editable)
662
- if user_only:
663
- it = (d for d in it if d.in_usersite)
664
- return (d for d in it if d.canonical_name not in skip)
665
-
666
-
667
- class Wheel(Protocol):
668
- location: str
669
-
670
- def as_zipfile(self) -> zipfile.ZipFile:
671
- raise NotImplementedError()
672
-
673
-
674
- class FilesystemWheel(Wheel):
675
- def __init__(self, location: str) -> None:
676
- self.location = location
677
-
678
- def as_zipfile(self) -> zipfile.ZipFile:
679
- return zipfile.ZipFile(self.location, allowZip64=True)
680
-
681
-
682
- class MemoryWheel(Wheel):
683
- def __init__(self, location: str, stream: IO[bytes]) -> None:
684
- self.location = location
685
- self.stream = stream
686
-
687
- def as_zipfile(self) -> zipfile.ZipFile:
688
- return zipfile.ZipFile(self.stream, allowZip64=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/utf1632prober.py DELETED
@@ -1,225 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- #
3
- # Contributor(s):
4
- # Jason Zavaglia
5
- #
6
- # This library is free software; you can redistribute it and/or
7
- # modify it under the terms of the GNU Lesser General Public
8
- # License as published by the Free Software Foundation; either
9
- # version 2.1 of the License, or (at your option) any later version.
10
- #
11
- # This library is distributed in the hope that it will be useful,
12
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
- # Lesser General Public License for more details.
15
- #
16
- # You should have received a copy of the GNU Lesser General Public
17
- # License along with this library; if not, write to the Free Software
18
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19
- # 02110-1301 USA
20
- ######################### END LICENSE BLOCK #########################
21
- from typing import List, Union
22
-
23
- from .charsetprober import CharSetProber
24
- from .enums import ProbingState
25
-
26
-
27
- class UTF1632Prober(CharSetProber):
28
- """
29
- This class simply looks for occurrences of zero bytes, and infers
30
- whether the file is UTF16 or UTF32 (low-endian or big-endian)
31
- For instance, files looking like ( \0 \0 \0 [nonzero] )+
32
- have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
33
- may be guessed to be UTF16BE, and inversely for little-endian varieties.
34
- """
35
-
36
- # how many logical characters to scan before feeling confident of prediction
37
- MIN_CHARS_FOR_DETECTION = 20
38
- # a fixed constant ratio of expected zeros or non-zeros in modulo-position.
39
- EXPECTED_RATIO = 0.94
40
-
41
- def __init__(self) -> None:
42
- super().__init__()
43
- self.position = 0
44
- self.zeros_at_mod = [0] * 4
45
- self.nonzeros_at_mod = [0] * 4
46
- self._state = ProbingState.DETECTING
47
- self.quad = [0, 0, 0, 0]
48
- self.invalid_utf16be = False
49
- self.invalid_utf16le = False
50
- self.invalid_utf32be = False
51
- self.invalid_utf32le = False
52
- self.first_half_surrogate_pair_detected_16be = False
53
- self.first_half_surrogate_pair_detected_16le = False
54
- self.reset()
55
-
56
- def reset(self) -> None:
57
- super().reset()
58
- self.position = 0
59
- self.zeros_at_mod = [0] * 4
60
- self.nonzeros_at_mod = [0] * 4
61
- self._state = ProbingState.DETECTING
62
- self.invalid_utf16be = False
63
- self.invalid_utf16le = False
64
- self.invalid_utf32be = False
65
- self.invalid_utf32le = False
66
- self.first_half_surrogate_pair_detected_16be = False
67
- self.first_half_surrogate_pair_detected_16le = False
68
- self.quad = [0, 0, 0, 0]
69
-
70
- @property
71
- def charset_name(self) -> str:
72
- if self.is_likely_utf32be():
73
- return "utf-32be"
74
- if self.is_likely_utf32le():
75
- return "utf-32le"
76
- if self.is_likely_utf16be():
77
- return "utf-16be"
78
- if self.is_likely_utf16le():
79
- return "utf-16le"
80
- # default to something valid
81
- return "utf-16"
82
-
83
- @property
84
- def language(self) -> str:
85
- return ""
86
-
87
- def approx_32bit_chars(self) -> float:
88
- return max(1.0, self.position / 4.0)
89
-
90
- def approx_16bit_chars(self) -> float:
91
- return max(1.0, self.position / 2.0)
92
-
93
- def is_likely_utf32be(self) -> bool:
94
- approx_chars = self.approx_32bit_chars()
95
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
96
- self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
97
- and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
98
- and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
99
- and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
100
- and not self.invalid_utf32be
101
- )
102
-
103
- def is_likely_utf32le(self) -> bool:
104
- approx_chars = self.approx_32bit_chars()
105
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
106
- self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
107
- and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
108
- and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
109
- and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
110
- and not self.invalid_utf32le
111
- )
112
-
113
- def is_likely_utf16be(self) -> bool:
114
- approx_chars = self.approx_16bit_chars()
115
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
116
- (self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
117
- > self.EXPECTED_RATIO
118
- and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
119
- > self.EXPECTED_RATIO
120
- and not self.invalid_utf16be
121
- )
122
-
123
- def is_likely_utf16le(self) -> bool:
124
- approx_chars = self.approx_16bit_chars()
125
- return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
126
- (self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
127
- > self.EXPECTED_RATIO
128
- and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
129
- > self.EXPECTED_RATIO
130
- and not self.invalid_utf16le
131
- )
132
-
133
- def validate_utf32_characters(self, quad: List[int]) -> None:
134
- """
135
- Validate if the quad of bytes is valid UTF-32.
136
-
137
- UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
138
- excluding 0x0000D800 - 0x0000DFFF
139
-
140
- https://en.wikipedia.org/wiki/UTF-32
141
- """
142
- if (
143
- quad[0] != 0
144
- or quad[1] > 0x10
145
- or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
146
- ):
147
- self.invalid_utf32be = True
148
- if (
149
- quad[3] != 0
150
- or quad[2] > 0x10
151
- or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
152
- ):
153
- self.invalid_utf32le = True
154
-
155
- def validate_utf16_characters(self, pair: List[int]) -> None:
156
- """
157
- Validate if the pair of bytes is valid UTF-16.
158
-
159
- UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
160
- with an exception for surrogate pairs, which must be in the range
161
- 0xD800-0xDBFF followed by 0xDC00-0xDFFF
162
-
163
- https://en.wikipedia.org/wiki/UTF-16
164
- """
165
- if not self.first_half_surrogate_pair_detected_16be:
166
- if 0xD8 <= pair[0] <= 0xDB:
167
- self.first_half_surrogate_pair_detected_16be = True
168
- elif 0xDC <= pair[0] <= 0xDF:
169
- self.invalid_utf16be = True
170
- else:
171
- if 0xDC <= pair[0] <= 0xDF:
172
- self.first_half_surrogate_pair_detected_16be = False
173
- else:
174
- self.invalid_utf16be = True
175
-
176
- if not self.first_half_surrogate_pair_detected_16le:
177
- if 0xD8 <= pair[1] <= 0xDB:
178
- self.first_half_surrogate_pair_detected_16le = True
179
- elif 0xDC <= pair[1] <= 0xDF:
180
- self.invalid_utf16le = True
181
- else:
182
- if 0xDC <= pair[1] <= 0xDF:
183
- self.first_half_surrogate_pair_detected_16le = False
184
- else:
185
- self.invalid_utf16le = True
186
-
187
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
188
- for c in byte_str:
189
- mod4 = self.position % 4
190
- self.quad[mod4] = c
191
- if mod4 == 3:
192
- self.validate_utf32_characters(self.quad)
193
- self.validate_utf16_characters(self.quad[0:2])
194
- self.validate_utf16_characters(self.quad[2:4])
195
- if c == 0:
196
- self.zeros_at_mod[mod4] += 1
197
- else:
198
- self.nonzeros_at_mod[mod4] += 1
199
- self.position += 1
200
- return self.state
201
-
202
- @property
203
- def state(self) -> ProbingState:
204
- if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
205
- # terminal, decided states
206
- return self._state
207
- if self.get_confidence() > 0.80:
208
- self._state = ProbingState.FOUND_IT
209
- elif self.position > 4 * 1024:
210
- # if we get to 4kb into the file, and we can't conclude it's UTF,
211
- # let's give up
212
- self._state = ProbingState.NOT_ME
213
- return self._state
214
-
215
- def get_confidence(self) -> float:
216
- return (
217
- 0.85
218
- if (
219
- self.is_likely_utf16le()
220
- or self.is_likely_utf16be()
221
- or self.is_likely_utf32le()
222
- or self.is_likely_utf32be()
223
- )
224
- else 0.00
225
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/other.py DELETED
@@ -1,161 +0,0 @@
1
- """
2
- pygments.formatters.other
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~
4
-
5
- Other formatters: NullFormatter, RawTokenFormatter.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- from pip._vendor.pygments.formatter import Formatter
12
- from pip._vendor.pygments.util import get_choice_opt
13
- from pip._vendor.pygments.token import Token
14
- from pip._vendor.pygments.console import colorize
15
-
16
- __all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
17
-
18
-
19
- class NullFormatter(Formatter):
20
- """
21
- Output the text unchanged without any formatting.
22
- """
23
- name = 'Text only'
24
- aliases = ['text', 'null']
25
- filenames = ['*.txt']
26
-
27
- def format(self, tokensource, outfile):
28
- enc = self.encoding
29
- for ttype, value in tokensource:
30
- if enc:
31
- outfile.write(value.encode(enc))
32
- else:
33
- outfile.write(value)
34
-
35
-
36
- class RawTokenFormatter(Formatter):
37
- r"""
38
- Format tokens as a raw representation for storing token streams.
39
-
40
- The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
41
- be converted to a token stream with the `RawTokenLexer`, described in the
42
- :doc:`lexer list <lexers>`.
43
-
44
- Only two options are accepted:
45
-
46
- `compress`
47
- If set to ``'gz'`` or ``'bz2'``, compress the output with the given
48
- compression algorithm after encoding (default: ``''``).
49
- `error_color`
50
- If set to a color name, highlight error tokens using that color. If
51
- set but with no value, defaults to ``'red'``.
52
-
53
- .. versionadded:: 0.11
54
-
55
- """
56
- name = 'Raw tokens'
57
- aliases = ['raw', 'tokens']
58
- filenames = ['*.raw']
59
-
60
- unicodeoutput = False
61
-
62
- def __init__(self, **options):
63
- Formatter.__init__(self, **options)
64
- # We ignore self.encoding if it is set, since it gets set for lexer
65
- # and formatter if given with -Oencoding on the command line.
66
- # The RawTokenFormatter outputs only ASCII. Override here.
67
- self.encoding = 'ascii' # let pygments.format() do the right thing
68
- self.compress = get_choice_opt(options, 'compress',
69
- ['', 'none', 'gz', 'bz2'], '')
70
- self.error_color = options.get('error_color', None)
71
- if self.error_color is True:
72
- self.error_color = 'red'
73
- if self.error_color is not None:
74
- try:
75
- colorize(self.error_color, '')
76
- except KeyError:
77
- raise ValueError("Invalid color %r specified" %
78
- self.error_color)
79
-
80
- def format(self, tokensource, outfile):
81
- try:
82
- outfile.write(b'')
83
- except TypeError:
84
- raise TypeError('The raw tokens formatter needs a binary '
85
- 'output file')
86
- if self.compress == 'gz':
87
- import gzip
88
- outfile = gzip.GzipFile('', 'wb', 9, outfile)
89
-
90
- write = outfile.write
91
- flush = outfile.close
92
- elif self.compress == 'bz2':
93
- import bz2
94
- compressor = bz2.BZ2Compressor(9)
95
-
96
- def write(text):
97
- outfile.write(compressor.compress(text))
98
-
99
- def flush():
100
- outfile.write(compressor.flush())
101
- outfile.flush()
102
- else:
103
- write = outfile.write
104
- flush = outfile.flush
105
-
106
- if self.error_color:
107
- for ttype, value in tokensource:
108
- line = b"%r\t%r\n" % (ttype, value)
109
- if ttype is Token.Error:
110
- write(colorize(self.error_color, line))
111
- else:
112
- write(line)
113
- else:
114
- for ttype, value in tokensource:
115
- write(b"%r\t%r\n" % (ttype, value))
116
- flush()
117
-
118
-
119
- TESTCASE_BEFORE = '''\
120
- def testNeedsName(lexer):
121
- fragment = %r
122
- tokens = [
123
- '''
124
- TESTCASE_AFTER = '''\
125
- ]
126
- assert list(lexer.get_tokens(fragment)) == tokens
127
- '''
128
-
129
-
130
- class TestcaseFormatter(Formatter):
131
- """
132
- Format tokens as appropriate for a new testcase.
133
-
134
- .. versionadded:: 2.0
135
- """
136
- name = 'Testcase'
137
- aliases = ['testcase']
138
-
139
- def __init__(self, **options):
140
- Formatter.__init__(self, **options)
141
- if self.encoding is not None and self.encoding != 'utf-8':
142
- raise ValueError("Only None and utf-8 are allowed encodings.")
143
-
144
- def format(self, tokensource, outfile):
145
- indentation = ' ' * 12
146
- rawbuf = []
147
- outbuf = []
148
- for ttype, value in tokensource:
149
- rawbuf.append(value)
150
- outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
151
-
152
- before = TESTCASE_BEFORE % (''.join(rawbuf),)
153
- during = ''.join(outbuf)
154
- after = TESTCASE_AFTER
155
- if self.encoding is None:
156
- outfile.write(before + during + after)
157
- else:
158
- outfile.write(before.encode('utf-8'))
159
- outfile.write(during.encode('utf-8'))
160
- outfile.write(after.encode('utf-8'))
161
- outfile.flush()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/sphinxext.py DELETED
@@ -1,217 +0,0 @@
1
- """
2
- pygments.sphinxext
3
- ~~~~~~~~~~~~~~~~~~
4
-
5
- Sphinx extension to generate automatic documentation of lexers,
6
- formatters and filters.
7
-
8
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
9
- :license: BSD, see LICENSE for details.
10
- """
11
-
12
- import sys
13
-
14
- from docutils import nodes
15
- from docutils.statemachine import ViewList
16
- from docutils.parsers.rst import Directive
17
- from sphinx.util.nodes import nested_parse_with_titles
18
-
19
-
20
- MODULEDOC = '''
21
- .. module:: %s
22
-
23
- %s
24
- %s
25
- '''
26
-
27
- LEXERDOC = '''
28
- .. class:: %s
29
-
30
- :Short names: %s
31
- :Filenames: %s
32
- :MIME types: %s
33
-
34
- %s
35
-
36
- '''
37
-
38
- FMTERDOC = '''
39
- .. class:: %s
40
-
41
- :Short names: %s
42
- :Filenames: %s
43
-
44
- %s
45
-
46
- '''
47
-
48
- FILTERDOC = '''
49
- .. class:: %s
50
-
51
- :Name: %s
52
-
53
- %s
54
-
55
- '''
56
-
57
-
58
- class PygmentsDoc(Directive):
59
- """
60
- A directive to collect all lexers/formatters/filters and generate
61
- autoclass directives for them.
62
- """
63
- has_content = False
64
- required_arguments = 1
65
- optional_arguments = 0
66
- final_argument_whitespace = False
67
- option_spec = {}
68
-
69
- def run(self):
70
- self.filenames = set()
71
- if self.arguments[0] == 'lexers':
72
- out = self.document_lexers()
73
- elif self.arguments[0] == 'formatters':
74
- out = self.document_formatters()
75
- elif self.arguments[0] == 'filters':
76
- out = self.document_filters()
77
- elif self.arguments[0] == 'lexers_overview':
78
- out = self.document_lexers_overview()
79
- else:
80
- raise Exception('invalid argument for "pygmentsdoc" directive')
81
- node = nodes.compound()
82
- vl = ViewList(out.split('\n'), source='')
83
- nested_parse_with_titles(self.state, vl, node)
84
- for fn in self.filenames:
85
- self.state.document.settings.record_dependencies.add(fn)
86
- return node.children
87
-
88
- def document_lexers_overview(self):
89
- """Generate a tabular overview of all lexers.
90
-
91
- The columns are the lexer name, the extensions handled by this lexer
92
- (or "None"), the aliases and a link to the lexer class."""
93
- from pip._vendor.pygments.lexers._mapping import LEXERS
94
- from pip._vendor.pygments.lexers import find_lexer_class
95
- out = []
96
-
97
- table = []
98
-
99
- def format_link(name, url):
100
- if url:
101
- return f'`{name} <{url}>`_'
102
- return name
103
-
104
- for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
105
- lexer_cls = find_lexer_class(data[1])
106
- extensions = lexer_cls.filenames + lexer_cls.alias_filenames
107
-
108
- table.append({
109
- 'name': format_link(data[1], lexer_cls.url),
110
- 'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
111
- 'aliases': ', '.join(data[2]),
112
- 'class': f'{data[0]}.{classname}'
113
- })
114
-
115
- column_names = ['name', 'extensions', 'aliases', 'class']
116
- column_lengths = [max([len(row[column]) for row in table if row[column]])
117
- for column in column_names]
118
-
119
- def write_row(*columns):
120
- """Format a table row"""
121
- out = []
122
- for l, c in zip(column_lengths, columns):
123
- if c:
124
- out.append(c.ljust(l))
125
- else:
126
- out.append(' '*l)
127
-
128
- return ' '.join(out)
129
-
130
- def write_seperator():
131
- """Write a table separator row"""
132
- sep = ['='*c for c in column_lengths]
133
- return write_row(*sep)
134
-
135
- out.append(write_seperator())
136
- out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
137
- out.append(write_seperator())
138
- for row in table:
139
- out.append(write_row(
140
- row['name'],
141
- row['extensions'],
142
- row['aliases'],
143
- f':class:`~{row["class"]}`'))
144
- out.append(write_seperator())
145
-
146
- return '\n'.join(out)
147
-
148
- def document_lexers(self):
149
- from pip._vendor.pygments.lexers._mapping import LEXERS
150
- out = []
151
- modules = {}
152
- moduledocstrings = {}
153
- for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
154
- module = data[0]
155
- mod = __import__(module, None, None, [classname])
156
- self.filenames.add(mod.__file__)
157
- cls = getattr(mod, classname)
158
- if not cls.__doc__:
159
- print("Warning: %s does not have a docstring." % classname)
160
- docstring = cls.__doc__
161
- if isinstance(docstring, bytes):
162
- docstring = docstring.decode('utf8')
163
- modules.setdefault(module, []).append((
164
- classname,
165
- ', '.join(data[2]) or 'None',
166
- ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
167
- ', '.join(data[4]) or 'None',
168
- docstring))
169
- if module not in moduledocstrings:
170
- moddoc = mod.__doc__
171
- if isinstance(moddoc, bytes):
172
- moddoc = moddoc.decode('utf8')
173
- moduledocstrings[module] = moddoc
174
-
175
- for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
176
- if moduledocstrings[module] is None:
177
- raise Exception("Missing docstring for %s" % (module,))
178
- heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
179
- out.append(MODULEDOC % (module, heading, '-'*len(heading)))
180
- for data in lexers:
181
- out.append(LEXERDOC % data)
182
-
183
- return ''.join(out)
184
-
185
- def document_formatters(self):
186
- from pip._vendor.pygments.formatters import FORMATTERS
187
-
188
- out = []
189
- for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
190
- module = data[0]
191
- mod = __import__(module, None, None, [classname])
192
- self.filenames.add(mod.__file__)
193
- cls = getattr(mod, classname)
194
- docstring = cls.__doc__
195
- if isinstance(docstring, bytes):
196
- docstring = docstring.decode('utf8')
197
- heading = cls.__name__
198
- out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
199
- ', '.join(data[3]).replace('*', '\\*') or 'None',
200
- docstring))
201
- return ''.join(out)
202
-
203
- def document_filters(self):
204
- from pip._vendor.pygments.filters import FILTERS
205
-
206
- out = []
207
- for name, cls in FILTERS.items():
208
- self.filenames.add(sys.modules[cls.__module__].__file__)
209
- docstring = cls.__doc__
210
- if isinstance(docstring, bytes):
211
- docstring = docstring.decode('utf8')
212
- out.append(FILTERDOC % (cls.__name__, name, docstring))
213
- return ''.join(out)
214
-
215
-
216
- def setup(app):
217
- app.add_directive('pygmentsdoc', PygmentsDoc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/testing.py DELETED
@@ -1,331 +0,0 @@
1
- # testing.py
2
-
3
- from contextlib import contextmanager
4
- import typing
5
-
6
- from .core import (
7
- ParserElement,
8
- ParseException,
9
- Keyword,
10
- __diag__,
11
- __compat__,
12
- )
13
-
14
-
15
- class pyparsing_test:
16
- """
17
- namespace class for classes useful in writing unit tests
18
- """
19
-
20
- class reset_pyparsing_context:
21
- """
22
- Context manager to be used when writing unit tests that modify pyparsing config values:
23
- - packrat parsing
24
- - bounded recursion parsing
25
- - default whitespace characters.
26
- - default keyword characters
27
- - literal string auto-conversion class
28
- - __diag__ settings
29
-
30
- Example::
31
-
32
- with reset_pyparsing_context():
33
- # test that literals used to construct a grammar are automatically suppressed
34
- ParserElement.inlineLiteralsUsing(Suppress)
35
-
36
- term = Word(alphas) | Word(nums)
37
- group = Group('(' + term[...] + ')')
38
-
39
- # assert that the '()' characters are not included in the parsed tokens
40
- self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
41
-
42
- # after exiting context manager, literals are converted to Literal expressions again
43
- """
44
-
45
- def __init__(self):
46
- self._save_context = {}
47
-
48
- def save(self):
49
- self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
50
- self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
51
-
52
- self._save_context[
53
- "literal_string_class"
54
- ] = ParserElement._literalStringClass
55
-
56
- self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
57
-
58
- self._save_context["packrat_enabled"] = ParserElement._packratEnabled
59
- if ParserElement._packratEnabled:
60
- self._save_context[
61
- "packrat_cache_size"
62
- ] = ParserElement.packrat_cache.size
63
- else:
64
- self._save_context["packrat_cache_size"] = None
65
- self._save_context["packrat_parse"] = ParserElement._parse
66
- self._save_context[
67
- "recursion_enabled"
68
- ] = ParserElement._left_recursion_enabled
69
-
70
- self._save_context["__diag__"] = {
71
- name: getattr(__diag__, name) for name in __diag__._all_names
72
- }
73
-
74
- self._save_context["__compat__"] = {
75
- "collect_all_And_tokens": __compat__.collect_all_And_tokens
76
- }
77
-
78
- return self
79
-
80
- def restore(self):
81
- # reset pyparsing global state
82
- if (
83
- ParserElement.DEFAULT_WHITE_CHARS
84
- != self._save_context["default_whitespace"]
85
- ):
86
- ParserElement.set_default_whitespace_chars(
87
- self._save_context["default_whitespace"]
88
- )
89
-
90
- ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
91
-
92
- Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
93
- ParserElement.inlineLiteralsUsing(
94
- self._save_context["literal_string_class"]
95
- )
96
-
97
- for name, value in self._save_context["__diag__"].items():
98
- (__diag__.enable if value else __diag__.disable)(name)
99
-
100
- ParserElement._packratEnabled = False
101
- if self._save_context["packrat_enabled"]:
102
- ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
103
- else:
104
- ParserElement._parse = self._save_context["packrat_parse"]
105
- ParserElement._left_recursion_enabled = self._save_context[
106
- "recursion_enabled"
107
- ]
108
-
109
- __compat__.collect_all_And_tokens = self._save_context["__compat__"]
110
-
111
- return self
112
-
113
- def copy(self):
114
- ret = type(self)()
115
- ret._save_context.update(self._save_context)
116
- return ret
117
-
118
- def __enter__(self):
119
- return self.save()
120
-
121
- def __exit__(self, *args):
122
- self.restore()
123
-
124
- class TestParseResultsAsserts:
125
- """
126
- A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
127
- """
128
-
129
- def assertParseResultsEquals(
130
- self, result, expected_list=None, expected_dict=None, msg=None
131
- ):
132
- """
133
- Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
134
- and compare any defined results names with an optional ``expected_dict``.
135
- """
136
- if expected_list is not None:
137
- self.assertEqual(expected_list, result.as_list(), msg=msg)
138
- if expected_dict is not None:
139
- self.assertEqual(expected_dict, result.as_dict(), msg=msg)
140
-
141
- def assertParseAndCheckList(
142
- self, expr, test_string, expected_list, msg=None, verbose=True
143
- ):
144
- """
145
- Convenience wrapper assert to test a parser element and input string, and assert that
146
- the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
147
- """
148
- result = expr.parse_string(test_string, parse_all=True)
149
- if verbose:
150
- print(result.dump())
151
- else:
152
- print(result.as_list())
153
- self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
154
-
155
- def assertParseAndCheckDict(
156
- self, expr, test_string, expected_dict, msg=None, verbose=True
157
- ):
158
- """
159
- Convenience wrapper assert to test a parser element and input string, and assert that
160
- the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
161
- """
162
- result = expr.parse_string(test_string, parseAll=True)
163
- if verbose:
164
- print(result.dump())
165
- else:
166
- print(result.as_list())
167
- self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
168
-
169
- def assertRunTestResults(
170
- self, run_tests_report, expected_parse_results=None, msg=None
171
- ):
172
- """
173
- Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
174
- list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
175
- with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
176
- Finally, asserts that the overall ``runTests()`` success value is ``True``.
177
-
178
- :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
179
- :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
180
- """
181
- run_test_success, run_test_results = run_tests_report
182
-
183
- if expected_parse_results is not None:
184
- merged = [
185
- (*rpt, expected)
186
- for rpt, expected in zip(run_test_results, expected_parse_results)
187
- ]
188
- for test_string, result, expected in merged:
189
- # expected should be a tuple containing a list and/or a dict or an exception,
190
- # and optional failure message string
191
- # an empty tuple will skip any result validation
192
- fail_msg = next(
193
- (exp for exp in expected if isinstance(exp, str)), None
194
- )
195
- expected_exception = next(
196
- (
197
- exp
198
- for exp in expected
199
- if isinstance(exp, type) and issubclass(exp, Exception)
200
- ),
201
- None,
202
- )
203
- if expected_exception is not None:
204
- with self.assertRaises(
205
- expected_exception=expected_exception, msg=fail_msg or msg
206
- ):
207
- if isinstance(result, Exception):
208
- raise result
209
- else:
210
- expected_list = next(
211
- (exp for exp in expected if isinstance(exp, list)), None
212
- )
213
- expected_dict = next(
214
- (exp for exp in expected if isinstance(exp, dict)), None
215
- )
216
- if (expected_list, expected_dict) != (None, None):
217
- self.assertParseResultsEquals(
218
- result,
219
- expected_list=expected_list,
220
- expected_dict=expected_dict,
221
- msg=fail_msg or msg,
222
- )
223
- else:
224
- # warning here maybe?
225
- print("no validation for {!r}".format(test_string))
226
-
227
- # do this last, in case some specific test results can be reported instead
228
- self.assertTrue(
229
- run_test_success, msg=msg if msg is not None else "failed runTests"
230
- )
231
-
232
- @contextmanager
233
- def assertRaisesParseException(self, exc_type=ParseException, msg=None):
234
- with self.assertRaises(exc_type, msg=msg):
235
- yield
236
-
237
- @staticmethod
238
- def with_line_numbers(
239
- s: str,
240
- start_line: typing.Optional[int] = None,
241
- end_line: typing.Optional[int] = None,
242
- expand_tabs: bool = True,
243
- eol_mark: str = "|",
244
- mark_spaces: typing.Optional[str] = None,
245
- mark_control: typing.Optional[str] = None,
246
- ) -> str:
247
- """
248
- Helpful method for debugging a parser - prints a string with line and column numbers.
249
- (Line and column numbers are 1-based.)
250
-
251
- :param s: tuple(bool, str - string to be printed with line and column numbers
252
- :param start_line: int - (optional) starting line number in s to print (default=1)
253
- :param end_line: int - (optional) ending line number in s to print (default=len(s))
254
- :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
255
- :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
256
- :param mark_spaces: str - (optional) special character to display in place of spaces
257
- :param mark_control: str - (optional) convert non-printing control characters to a placeholding
258
- character; valid values:
259
- - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
260
- - any single character string - replace control characters with given string
261
- - None (default) - string is displayed as-is
262
-
263
- :return: str - input string with leading line numbers and column number headers
264
- """
265
- if expand_tabs:
266
- s = s.expandtabs()
267
- if mark_control is not None:
268
- if mark_control == "unicode":
269
- tbl = str.maketrans(
270
- {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
271
- | {127: 0x2421}
272
- )
273
- eol_mark = ""
274
- else:
275
- tbl = str.maketrans(
276
- {c: mark_control for c in list(range(0, 32)) + [127]}
277
- )
278
- s = s.translate(tbl)
279
- if mark_spaces is not None and mark_spaces != " ":
280
- if mark_spaces == "unicode":
281
- tbl = str.maketrans({9: 0x2409, 32: 0x2423})
282
- s = s.translate(tbl)
283
- else:
284
- s = s.replace(" ", mark_spaces)
285
- if start_line is None:
286
- start_line = 1
287
- if end_line is None:
288
- end_line = len(s)
289
- end_line = min(end_line, len(s))
290
- start_line = min(max(1, start_line), end_line)
291
-
292
- if mark_control != "unicode":
293
- s_lines = s.splitlines()[start_line - 1 : end_line]
294
- else:
295
- s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
296
- if not s_lines:
297
- return ""
298
-
299
- lineno_width = len(str(end_line))
300
- max_line_len = max(len(line) for line in s_lines)
301
- lead = " " * (lineno_width + 1)
302
- if max_line_len >= 99:
303
- header0 = (
304
- lead
305
- + "".join(
306
- "{}{}".format(" " * 99, (i + 1) % 100)
307
- for i in range(max(max_line_len // 100, 1))
308
- )
309
- + "\n"
310
- )
311
- else:
312
- header0 = ""
313
- header1 = (
314
- header0
315
- + lead
316
- + "".join(
317
- " {}".format((i + 1) % 10)
318
- for i in range(-(-max_line_len // 10))
319
- )
320
- + "\n"
321
- )
322
- header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
323
- return (
324
- header1
325
- + header2
326
- + "\n".join(
327
- "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
328
- for i, line in enumerate(s_lines, start=start_line)
329
- )
330
- + "\n"
331
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blessin/drama-director/app.py DELETED
@@ -1,56 +0,0 @@
1
- import gradio as gr
2
- import openai
3
- from gtts import gTTS
4
- import tempfile
5
-
6
- def generate_stage_directions(location, situation, api_key):
7
- prompt = (
8
- f"Write detailed stage directions for a scene with 5 characters, set in a {location}. "
9
- "You are reading these directions out loud to an audience, so keep the stage directions conversational. "
10
- "Do not break it down into different sections. Each character enters one by one, two of them enter as a couple. "
11
- "As they enter, tell us their name and describe their physical characteristics, their emotional state and their actions, "
12
- "gestures and movements in the scene. Write detailed stage direction on how they interact with the location they are in "
13
- "and with each other, with detailed description on their movements, actions and gestures in the scene. Make the overall "
14
- "scene highly dramatic, full of twists and turns, with lots of movement by the characters that keep changing positions "
15
- "and moving around. At some point, a {situation} happens in the scene. Show the characters interacting with elements of "
16
- "the location. Describe in vivid detail their emotion, facial expressions and emotions. You will also write dialogues for "
17
- "each character. Keep the dialogues short. Keep the scene mostly non-verbal, with only a few dialogues. Make the scene "
18
- "very dramatic, emotional, thrilling. Keep your response limited to 750 words."
19
- )
20
-
21
- openai.api_key = api_key # Set the API key from the user input
22
-
23
- try:
24
- response = openai.Completion.create(
25
- engine="text-davinci-003",
26
- prompt=prompt,
27
- max_tokens=750,
28
- temperature=0.7,
29
- )
30
- stage_directions = response.choices[0].text.strip()
31
- response_audio_path = text_to_audio(stage_directions)
32
- return response_audio_path
33
- except Exception as e:
34
- return str(e)
35
-
36
- def text_to_audio(text):
37
- tts = gTTS(text, lang='en')
38
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
39
- tts.save(temp_file.name)
40
- return temp_file.name
41
-
42
- # Create Gradio UI
43
- iface = gr.Interface(
44
- fn=generate_stage_directions,
45
- inputs=[
46
- gr.Textbox(label="Location"),
47
- gr.Textbox(label="Situation"),
48
- gr.Textbox(label="API Key")
49
- ],
50
- outputs=gr.Audio(type='filepath', label="Stage Directions"),
51
- live=True,
52
- title="DramaDirector",
53
- description="Input a location, situation, and your OpenAI API key to generate stage directions.",
54
- )
55
-
56
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/error.py DELETED
@@ -1,164 +0,0 @@
1
- import openai
2
-
3
-
4
- class OpenAIError(Exception):
5
- def __init__(
6
- self,
7
- message=None,
8
- http_body=None,
9
- http_status=None,
10
- json_body=None,
11
- headers=None,
12
- code=None,
13
- ):
14
- super(OpenAIError, self).__init__(message)
15
-
16
- if http_body and hasattr(http_body, "decode"):
17
- try:
18
- http_body = http_body.decode("utf-8")
19
- except BaseException:
20
- http_body = (
21
- "<Could not decode body as utf-8. "
22
- "Please report to [email protected]>"
23
- )
24
-
25
- self._message = message
26
- self.http_body = http_body
27
- self.http_status = http_status
28
- self.json_body = json_body
29
- self.headers = headers or {}
30
- self.code = code
31
- self.request_id = self.headers.get("request-id", None)
32
- self.error = self.construct_error_object()
33
- self.organization = self.headers.get("openai-organization", None)
34
-
35
- def __str__(self):
36
- msg = self._message or "<empty message>"
37
- if self.request_id is not None:
38
- return "Request {0}: {1}".format(self.request_id, msg)
39
- else:
40
- return msg
41
-
42
- # Returns the underlying `Exception` (base class) message, which is usually
43
- # the raw message returned by OpenAI's API. This was previously available
44
- # in python2 via `error.message`. Unlike `str(error)`, it omits "Request
45
- # req_..." from the beginning of the string.
46
- @property
47
- def user_message(self):
48
- return self._message
49
-
50
- def __repr__(self):
51
- return "%s(message=%r, http_status=%r, request_id=%r)" % (
52
- self.__class__.__name__,
53
- self._message,
54
- self.http_status,
55
- self.request_id,
56
- )
57
-
58
- def construct_error_object(self):
59
- if (
60
- self.json_body is None
61
- or "error" not in self.json_body
62
- or not isinstance(self.json_body["error"], dict)
63
- ):
64
- return None
65
-
66
- return openai.api_resources.error_object.ErrorObject.construct_from(
67
- self.json_body["error"]
68
- )
69
-
70
-
71
- class APIError(OpenAIError):
72
- pass
73
-
74
-
75
- class TryAgain(OpenAIError):
76
- pass
77
-
78
-
79
- class APIConnectionError(OpenAIError):
80
- def __init__(
81
- self,
82
- message,
83
- http_body=None,
84
- http_status=None,
85
- json_body=None,
86
- headers=None,
87
- code=None,
88
- should_retry=False,
89
- ):
90
- super(APIConnectionError, self).__init__(
91
- message, http_body, http_status, json_body, headers, code
92
- )
93
- self.should_retry = should_retry
94
-
95
-
96
- class InvalidRequestError(OpenAIError):
97
- def __init__(
98
- self,
99
- message,
100
- param,
101
- code=None,
102
- http_body=None,
103
- http_status=None,
104
- json_body=None,
105
- headers=None,
106
- ):
107
- super(InvalidRequestError, self).__init__(
108
- message, http_body, http_status, json_body, headers, code
109
- )
110
- self.param = param
111
-
112
- def __repr__(self):
113
- return "%s(message=%r, param=%r, code=%r, http_status=%r, " "request_id=%r)" % (
114
- self.__class__.__name__,
115
- self._message,
116
- self.param,
117
- self.code,
118
- self.http_status,
119
- self.request_id,
120
- )
121
-
122
- def __reduce__(self):
123
- return type(self), (
124
- self._message,
125
- self.param,
126
- self.code,
127
- self.http_body,
128
- self.http_status,
129
- self.json_body,
130
- self.headers,
131
- )
132
-
133
-
134
- class AuthenticationError(OpenAIError):
135
- pass
136
-
137
-
138
- class PermissionError(OpenAIError):
139
- pass
140
-
141
-
142
- class RateLimitError(OpenAIError):
143
- pass
144
-
145
-
146
- class ServiceUnavailableError(OpenAIError):
147
- pass
148
-
149
-
150
- class InvalidAPIType(OpenAIError):
151
- pass
152
-
153
-
154
- class SignatureVerificationError(OpenAIError):
155
- def __init__(self, message, sig_header, http_body=None):
156
- super(SignatureVerificationError, self).__init__(message, http_body)
157
- self.sig_header = sig_header
158
-
159
- def __reduce__(self):
160
- return type(self), (
161
- self._message,
162
- self.sig_header,
163
- self.http_body,
164
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from . import transforms # isort:skip
3
-
4
- from .build import (
5
- build_detection_test_loader,
6
- build_detection_train_loader,
7
- get_detection_dataset_dicts,
8
- load_proposals_into_dataset,
9
- print_instances_class_histogram,
10
- )
11
- from .catalog import DatasetCatalog, MetadataCatalog
12
- from .common import DatasetFromList, MapDataset
13
- from .dataset_mapper import DatasetMapper
14
-
15
- # ensure the builtin datasets are registered
16
- from . import datasets, samplers # isort:skip
17
-
18
- __all__ = [k for k in globals().keys() if not k.startswith("_")]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_kwargs_and_defaults.py DELETED
@@ -1,192 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import pytest
3
-
4
- import env # noqa: F401
5
-
6
- from pybind11_tests import kwargs_and_defaults as m
7
-
8
-
9
- def test_function_signatures(doc):
10
- assert doc(m.kw_func0) == "kw_func0(arg0: int, arg1: int) -> str"
11
- assert doc(m.kw_func1) == "kw_func1(x: int, y: int) -> str"
12
- assert doc(m.kw_func2) == "kw_func2(x: int = 100, y: int = 200) -> str"
13
- assert doc(m.kw_func3) == "kw_func3(data: str = 'Hello world!') -> None"
14
- assert doc(m.kw_func4) == "kw_func4(myList: List[int] = [13, 17]) -> str"
15
- assert doc(m.kw_func_udl) == "kw_func_udl(x: int, y: int = 300) -> str"
16
- assert doc(m.kw_func_udl_z) == "kw_func_udl_z(x: int, y: int = 0) -> str"
17
- assert doc(m.args_function) == "args_function(*args) -> tuple"
18
- assert doc(m.args_kwargs_function) == "args_kwargs_function(*args, **kwargs) -> tuple"
19
- assert doc(m.KWClass.foo0) == \
20
- "foo0(self: m.kwargs_and_defaults.KWClass, arg0: int, arg1: float) -> None"
21
- assert doc(m.KWClass.foo1) == \
22
- "foo1(self: m.kwargs_and_defaults.KWClass, x: int, y: float) -> None"
23
-
24
-
25
- def test_named_arguments(msg):
26
- assert m.kw_func0(5, 10) == "x=5, y=10"
27
-
28
- assert m.kw_func1(5, 10) == "x=5, y=10"
29
- assert m.kw_func1(5, y=10) == "x=5, y=10"
30
- assert m.kw_func1(y=10, x=5) == "x=5, y=10"
31
-
32
- assert m.kw_func2() == "x=100, y=200"
33
- assert m.kw_func2(5) == "x=5, y=200"
34
- assert m.kw_func2(x=5) == "x=5, y=200"
35
- assert m.kw_func2(y=10) == "x=100, y=10"
36
- assert m.kw_func2(5, 10) == "x=5, y=10"
37
- assert m.kw_func2(x=5, y=10) == "x=5, y=10"
38
-
39
- with pytest.raises(TypeError) as excinfo:
40
- # noinspection PyArgumentList
41
- m.kw_func2(x=5, y=10, z=12)
42
- assert excinfo.match(
43
- r'(?s)^kw_func2\(\): incompatible.*Invoked with: kwargs: ((x=5|y=10|z=12)(, |$))' + '{3}$')
44
-
45
- assert m.kw_func4() == "{13 17}"
46
- assert m.kw_func4(myList=[1, 2, 3]) == "{1 2 3}"
47
-
48
- assert m.kw_func_udl(x=5, y=10) == "x=5, y=10"
49
- assert m.kw_func_udl_z(x=5) == "x=5, y=0"
50
-
51
-
52
- def test_arg_and_kwargs():
53
- args = 'arg1_value', 'arg2_value', 3
54
- assert m.args_function(*args) == args
55
-
56
- args = 'a1', 'a2'
57
- kwargs = dict(arg3='a3', arg4=4)
58
- assert m.args_kwargs_function(*args, **kwargs) == (args, kwargs)
59
-
60
-
61
- def test_mixed_args_and_kwargs(msg):
62
- mpa = m.mixed_plus_args
63
- mpk = m.mixed_plus_kwargs
64
- mpak = m.mixed_plus_args_kwargs
65
- mpakd = m.mixed_plus_args_kwargs_defaults
66
-
67
- assert mpa(1, 2.5, 4, 99.5, None) == (1, 2.5, (4, 99.5, None))
68
- assert mpa(1, 2.5) == (1, 2.5, ())
69
- with pytest.raises(TypeError) as excinfo:
70
- assert mpa(1)
71
- assert msg(excinfo.value) == """
72
- mixed_plus_args(): incompatible function arguments. The following argument types are supported:
73
- 1. (arg0: int, arg1: float, *args) -> tuple
74
-
75
- Invoked with: 1
76
- """ # noqa: E501 line too long
77
- with pytest.raises(TypeError) as excinfo:
78
- assert mpa()
79
- assert msg(excinfo.value) == """
80
- mixed_plus_args(): incompatible function arguments. The following argument types are supported:
81
- 1. (arg0: int, arg1: float, *args) -> tuple
82
-
83
- Invoked with:
84
- """ # noqa: E501 line too long
85
-
86
- assert mpk(-2, 3.5, pi=3.14159, e=2.71828) == (-2, 3.5, {'e': 2.71828, 'pi': 3.14159})
87
- assert mpak(7, 7.7, 7.77, 7.777, 7.7777, minusseven=-7) == (
88
- 7, 7.7, (7.77, 7.777, 7.7777), {'minusseven': -7})
89
- assert mpakd() == (1, 3.14159, (), {})
90
- assert mpakd(3) == (3, 3.14159, (), {})
91
- assert mpakd(j=2.71828) == (1, 2.71828, (), {})
92
- assert mpakd(k=42) == (1, 3.14159, (), {'k': 42})
93
- assert mpakd(1, 1, 2, 3, 5, 8, then=13, followedby=21) == (
94
- 1, 1, (2, 3, 5, 8), {'then': 13, 'followedby': 21})
95
- # Arguments specified both positionally and via kwargs should fail:
96
- with pytest.raises(TypeError) as excinfo:
97
- assert mpakd(1, i=1)
98
- assert msg(excinfo.value) == """
99
- mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
100
- 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
101
-
102
- Invoked with: 1; kwargs: i=1
103
- """ # noqa: E501 line too long
104
- with pytest.raises(TypeError) as excinfo:
105
- assert mpakd(1, 2, j=1)
106
- assert msg(excinfo.value) == """
107
- mixed_plus_args_kwargs_defaults(): incompatible function arguments. The following argument types are supported:
108
- 1. (i: int = 1, j: float = 3.14159, *args, **kwargs) -> tuple
109
-
110
- Invoked with: 1, 2; kwargs: j=1
111
- """ # noqa: E501 line too long
112
-
113
-
114
- def test_keyword_only_args(msg):
115
- assert m.kwonly_all(i=1, j=2) == (1, 2)
116
- assert m.kwonly_all(j=1, i=2) == (2, 1)
117
-
118
- with pytest.raises(TypeError) as excinfo:
119
- assert m.kwonly_all(i=1) == (1,)
120
- assert "incompatible function arguments" in str(excinfo.value)
121
-
122
- with pytest.raises(TypeError) as excinfo:
123
- assert m.kwonly_all(1, 2) == (1, 2)
124
- assert "incompatible function arguments" in str(excinfo.value)
125
-
126
- assert m.kwonly_some(1, k=3, j=2) == (1, 2, 3)
127
-
128
- assert m.kwonly_with_defaults(z=8) == (3, 4, 5, 8)
129
- assert m.kwonly_with_defaults(2, z=8) == (2, 4, 5, 8)
130
- assert m.kwonly_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9)
131
- assert m.kwonly_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9)
132
-
133
- assert m.kwonly_mixed(1, j=2) == (1, 2)
134
- assert m.kwonly_mixed(j=2, i=3) == (3, 2)
135
- assert m.kwonly_mixed(i=2, j=3) == (2, 3)
136
-
137
- assert m.kwonly_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {'extra': 7})
138
- assert m.kwonly_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {'extra': 6})
139
- assert m.kwonly_plus_more(2, k=3, extra=4) == (2, -1, 3, {'extra': 4})
140
-
141
- with pytest.raises(TypeError) as excinfo:
142
- assert m.kwonly_mixed(i=1) == (1,)
143
- assert "incompatible function arguments" in str(excinfo.value)
144
-
145
- with pytest.raises(RuntimeError) as excinfo:
146
- m.register_invalid_kwonly(m)
147
- assert msg(excinfo.value) == """
148
- arg(): cannot specify an unnamed argument after an kwonly() annotation
149
- """
150
-
151
-
152
- @pytest.mark.xfail("env.PYPY and env.PY2", reason="PyPy2 doesn't double count")
153
- def test_args_refcount():
154
- """Issue/PR #1216 - py::args elements get double-inc_ref()ed when combined with regular
155
- arguments"""
156
- refcount = m.arg_refcount_h
157
-
158
- myval = 54321
159
- expected = refcount(myval)
160
- assert m.arg_refcount_h(myval) == expected
161
- assert m.arg_refcount_o(myval) == expected + 1
162
- assert m.arg_refcount_h(myval) == expected
163
- assert refcount(myval) == expected
164
-
165
- assert m.mixed_plus_args(1, 2.0, "a", myval) == (1, 2.0, ("a", myval))
166
- assert refcount(myval) == expected
167
-
168
- assert m.mixed_plus_kwargs(3, 4.0, a=1, b=myval) == (3, 4.0, {"a": 1, "b": myval})
169
- assert refcount(myval) == expected
170
-
171
- assert m.args_function(-1, myval) == (-1, myval)
172
- assert refcount(myval) == expected
173
-
174
- assert m.mixed_plus_args_kwargs(5, 6.0, myval, a=myval) == (5, 6.0, (myval,), {"a": myval})
175
- assert refcount(myval) == expected
176
-
177
- assert m.args_kwargs_function(7, 8, myval, a=1, b=myval) == \
178
- ((7, 8, myval), {"a": 1, "b": myval})
179
- assert refcount(myval) == expected
180
-
181
- exp3 = refcount(myval, myval, myval)
182
- assert m.args_refcount(myval, myval, myval) == (exp3, exp3, exp3)
183
- assert refcount(myval) == expected
184
-
185
- # This function takes the first arg as a `py::object` and the rest as a `py::args`. Unlike the
186
- # previous case, when we have both positional and `py::args` we need to construct a new tuple
187
- # for the `py::args`; in the previous case, we could simply inc_ref and pass on Python's input
188
- # tuple without having to inc_ref the individual elements, but here we can't, hence the extra
189
- # refs.
190
- assert m.mixed_args_refcount(myval, myval, myval) == (exp3 + 3, exp3 + 3, exp3 + 3)
191
-
192
- assert m.class_default_argument() == "<class 'decimal.Decimal'>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par.h DELETED
@@ -1,125 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
- #include <thrust/detail/config.h>
30
- #include <thrust/system/cuda/detail/guarded_cuda_runtime_api.h>
31
- #include <thrust/system/cuda/detail/execution_policy.h>
32
- #include <thrust/system/cuda/detail/util.h>
33
-
34
- #include <thrust/detail/allocator_aware_execution_policy.h>
35
-
36
- #if THRUST_CPP_DIALECT >= 2011
37
- # include <thrust/detail/dependencies_aware_execution_policy.h>
38
- #endif
39
-
40
-
41
- namespace thrust
42
- {
43
- namespace cuda_cub {
44
-
45
- template <class Derived>
46
- struct execute_on_stream_base : execution_policy<Derived>
47
- {
48
- private:
49
- cudaStream_t stream;
50
-
51
- public:
52
- __host__ __device__
53
- execute_on_stream_base(cudaStream_t stream_ = default_stream())
54
- : stream(stream_) {}
55
-
56
- THRUST_RUNTIME_FUNCTION
57
- Derived
58
- on(cudaStream_t const &s) const
59
- {
60
- Derived result = derived_cast(*this);
61
- result.stream = s;
62
- return result;
63
- }
64
-
65
- private:
66
- friend __host__ __device__
67
- cudaStream_t
68
- get_stream(const execute_on_stream_base &exec)
69
- {
70
- return exec.stream;
71
- }
72
- };
73
-
74
- struct execute_on_stream : execute_on_stream_base<execute_on_stream>
75
- {
76
- typedef execute_on_stream_base<execute_on_stream> base_t;
77
-
78
- __host__ __device__
79
- execute_on_stream() : base_t(){};
80
- __host__ __device__
81
- execute_on_stream(cudaStream_t stream) : base_t(stream){};
82
- };
83
-
84
-
85
- struct par_t : execution_policy<par_t>,
86
- thrust::detail::allocator_aware_execution_policy<
87
- execute_on_stream_base>
88
- #if THRUST_CPP_DIALECT >= 2011
89
- , thrust::detail::dependencies_aware_execution_policy<
90
- execute_on_stream_base>
91
- #endif
92
- {
93
- typedef execution_policy<par_t> base_t;
94
-
95
- __host__ __device__
96
- THRUST_CONSTEXPR par_t() : base_t() {}
97
-
98
- typedef execute_on_stream stream_attachment_type;
99
-
100
- THRUST_RUNTIME_FUNCTION
101
- stream_attachment_type
102
- on(cudaStream_t const &stream) const
103
- {
104
- return execute_on_stream(stream);
105
- }
106
- };
107
-
108
- THRUST_INLINE_CONSTANT par_t par;
109
- } // namespace cuda_
110
-
111
- namespace system {
112
- namespace cuda {
113
- using thrust::cuda_cub::par;
114
- namespace detail {
115
- using thrust::cuda_cub::par_t;
116
- }
117
- } // namesapce cuda
118
- } // namespace system
119
-
120
- namespace cuda {
121
- using thrust::cuda_cub::par;
122
- } // namespace cuda
123
-
124
- } // end namespace thrust
125
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/layers/roi_align_rotated.py DELETED
@@ -1,93 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import torch
3
- from torch import nn
4
- from torch.autograd import Function
5
- from torch.autograd.function import once_differentiable
6
- from torch.nn.modules.utils import _pair
7
-
8
- from detectron2 import _C
9
-
10
-
11
- class _ROIAlignRotated(Function):
12
- @staticmethod
13
- def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
14
- ctx.save_for_backward(roi)
15
- ctx.output_size = _pair(output_size)
16
- ctx.spatial_scale = spatial_scale
17
- ctx.sampling_ratio = sampling_ratio
18
- ctx.input_shape = input.size()
19
- output = _C.roi_align_rotated_forward(
20
- input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
21
- )
22
- return output
23
-
24
- @staticmethod
25
- @once_differentiable
26
- def backward(ctx, grad_output):
27
- (rois,) = ctx.saved_tensors
28
- output_size = ctx.output_size
29
- spatial_scale = ctx.spatial_scale
30
- sampling_ratio = ctx.sampling_ratio
31
- bs, ch, h, w = ctx.input_shape
32
- grad_input = _C.roi_align_rotated_backward(
33
- grad_output,
34
- rois,
35
- spatial_scale,
36
- output_size[0],
37
- output_size[1],
38
- bs,
39
- ch,
40
- h,
41
- w,
42
- sampling_ratio,
43
- )
44
- return grad_input, None, None, None, None, None
45
-
46
-
47
- roi_align_rotated = _ROIAlignRotated.apply
48
-
49
-
50
- class ROIAlignRotated(nn.Module):
51
- def __init__(self, output_size, spatial_scale, sampling_ratio):
52
- """
53
- Args:
54
- output_size (tuple): h, w
55
- spatial_scale (float): scale the input boxes by this number
56
- sampling_ratio (int): number of inputs samples to take for each output
57
- sample. 0 to take samples densely.
58
-
59
- Note:
60
- ROIAlignRotated supports continuous coordinate by default:
61
- Given a continuous coordinate c, its two neighboring pixel indices (in our
62
- pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
63
- c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
64
- from the underlying signal at continuous coordinates 0.5 and 1.5).
65
- """
66
- super(ROIAlignRotated, self).__init__()
67
- self.output_size = output_size
68
- self.spatial_scale = spatial_scale
69
- self.sampling_ratio = sampling_ratio
70
-
71
- def forward(self, input, rois):
72
- """
73
- Args:
74
- input: NCHW images
75
- rois: Bx6 boxes. First column is the index into N.
76
- The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees).
77
- """
78
- assert rois.dim() == 2 and rois.size(1) == 6
79
- orig_dtype = input.dtype
80
- if orig_dtype == torch.float16:
81
- input = input.float()
82
- rois = rois.float()
83
- return roi_align_rotated(
84
- input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
85
- ).to(dtype=orig_dtype)
86
-
87
- def __repr__(self):
88
- tmpstr = self.__class__.__name__ + "("
89
- tmpstr += "output_size=" + str(self.output_size)
90
- tmpstr += ", spatial_scale=" + str(self.spatial_scale)
91
- tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
92
- tmpstr += ")"
93
- return tmpstr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisPreston/diff-svc_minato_aqua/modules/commons/common_layers.py DELETED
@@ -1,675 +0,0 @@
1
- import math
2
-
3
- import torch
4
- import torch.nn.functional as F
5
- import torch.onnx.operators
6
- from torch import nn
7
- from torch.nn import Parameter
8
-
9
- import utils
10
-
11
-
12
- class Reshape(nn.Module):
13
- def __init__(self, *args):
14
- super(Reshape, self).__init__()
15
- self.shape = args
16
-
17
- def forward(self, x):
18
- return x.view(self.shape)
19
-
20
-
21
- class Permute(nn.Module):
22
- def __init__(self, *args):
23
- super(Permute, self).__init__()
24
- self.args = args
25
-
26
- def forward(self, x):
27
- return x.permute(self.args)
28
-
29
-
30
- class LinearNorm(torch.nn.Module):
31
- def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
32
- super(LinearNorm, self).__init__()
33
- self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
34
-
35
- torch.nn.init.xavier_uniform_(
36
- self.linear_layer.weight,
37
- gain=torch.nn.init.calculate_gain(w_init_gain))
38
-
39
- def forward(self, x):
40
- return self.linear_layer(x)
41
-
42
-
43
- class ConvNorm(torch.nn.Module):
44
- def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
45
- padding=None, dilation=1, bias=True, w_init_gain='linear'):
46
- super(ConvNorm, self).__init__()
47
- if padding is None:
48
- assert (kernel_size % 2 == 1)
49
- padding = int(dilation * (kernel_size - 1) / 2)
50
-
51
- self.conv = torch.nn.Conv1d(in_channels, out_channels,
52
- kernel_size=kernel_size, stride=stride,
53
- padding=padding, dilation=dilation,
54
- bias=bias)
55
-
56
- torch.nn.init.xavier_uniform_(
57
- self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
58
-
59
- def forward(self, signal):
60
- conv_signal = self.conv(signal)
61
- return conv_signal
62
-
63
-
64
- def Embedding(num_embeddings, embedding_dim, padding_idx=None):
65
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
66
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
67
- if padding_idx is not None:
68
- nn.init.constant_(m.weight[padding_idx], 0)
69
- return m
70
-
71
-
72
- def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
73
- if not export and torch.cuda.is_available():
74
- try:
75
- from apex.normalization import FusedLayerNorm
76
- return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
77
- except ImportError:
78
- pass
79
- return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
80
-
81
-
82
- def Linear(in_features, out_features, bias=True):
83
- m = nn.Linear(in_features, out_features, bias)
84
- nn.init.xavier_uniform_(m.weight)
85
- if bias:
86
- nn.init.constant_(m.bias, 0.)
87
- return m
88
-
89
-
90
- class SinusoidalPositionalEmbedding(nn.Module):
91
- """This module produces sinusoidal positional embeddings of any length.
92
-
93
- Padding symbols are ignored.
94
- """
95
-
96
- def __init__(self, embedding_dim, padding_idx, init_size=1024):
97
- super().__init__()
98
- self.embedding_dim = embedding_dim
99
- self.padding_idx = padding_idx
100
- self.weights = SinusoidalPositionalEmbedding.get_embedding(
101
- init_size,
102
- embedding_dim,
103
- padding_idx,
104
- )
105
- self.register_buffer('_float_tensor', torch.FloatTensor(1))
106
-
107
- @staticmethod
108
- def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
109
- """Build sinusoidal embeddings.
110
-
111
- This matches the implementation in tensor2tensor, but differs slightly
112
- from the description in Section 3.5 of "Attention Is All You Need".
113
- """
114
- half_dim = embedding_dim // 2
115
- emb = math.log(10000) / (half_dim - 1)
116
- emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
117
- emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
118
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
119
- if embedding_dim % 2 == 1:
120
- # zero pad
121
- emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
122
- if padding_idx is not None:
123
- emb[padding_idx, :] = 0
124
- return emb
125
-
126
- def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
127
- """Input is expected to be of size [bsz x seqlen]."""
128
- bsz, seq_len = input.shape[:2]
129
- max_pos = self.padding_idx + 1 + seq_len
130
- if self.weights is None or max_pos > self.weights.size(0):
131
- # recompute/expand embeddings if needed
132
- self.weights = SinusoidalPositionalEmbedding.get_embedding(
133
- max_pos,
134
- self.embedding_dim,
135
- self.padding_idx,
136
- )
137
- self.weights = self.weights.to(self._float_tensor)
138
-
139
- if incremental_state is not None:
140
- # positions is the same for every token when decoding a single step
141
- pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
142
- return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
143
-
144
- positions = utils.make_positions(input, self.padding_idx) if positions is None else positions
145
- return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
146
-
147
- def max_positions(self):
148
- """Maximum number of supported positions."""
149
- return int(1e5) # an arbitrary large number
150
-
151
-
152
- class ConvTBC(nn.Module):
153
- def __init__(self, in_channels, out_channels, kernel_size, padding=0):
154
- super(ConvTBC, self).__init__()
155
- self.in_channels = in_channels
156
- self.out_channels = out_channels
157
- self.kernel_size = kernel_size
158
- self.padding = padding
159
-
160
- self.weight = torch.nn.Parameter(torch.Tensor(
161
- self.kernel_size, in_channels, out_channels))
162
- self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
163
-
164
- def forward(self, input):
165
- return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding)
166
-
167
-
168
- class MultiheadAttention(nn.Module):
169
- def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
170
- add_bias_kv=False, add_zero_attn=False, self_attention=False,
171
- encoder_decoder_attention=False):
172
- super().__init__()
173
- self.embed_dim = embed_dim
174
- self.kdim = kdim if kdim is not None else embed_dim
175
- self.vdim = vdim if vdim is not None else embed_dim
176
- self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
177
-
178
- self.num_heads = num_heads
179
- self.dropout = dropout
180
- self.head_dim = embed_dim // num_heads
181
- assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
182
- self.scaling = self.head_dim ** -0.5
183
-
184
- self.self_attention = self_attention
185
- self.encoder_decoder_attention = encoder_decoder_attention
186
-
187
- assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
188
- 'value to be of the same size'
189
-
190
- if self.qkv_same_dim:
191
- self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
192
- else:
193
- self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
194
- self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
195
- self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
196
-
197
- if bias:
198
- self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
199
- else:
200
- self.register_parameter('in_proj_bias', None)
201
-
202
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
203
-
204
- if add_bias_kv:
205
- self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
206
- self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
207
- else:
208
- self.bias_k = self.bias_v = None
209
-
210
- self.add_zero_attn = add_zero_attn
211
-
212
- self.reset_parameters()
213
-
214
- self.enable_torch_version = False
215
- if hasattr(F, "multi_head_attention_forward"):
216
- self.enable_torch_version = True
217
- else:
218
- self.enable_torch_version = False
219
- self.last_attn_probs = None
220
-
221
- def reset_parameters(self):
222
- if self.qkv_same_dim:
223
- nn.init.xavier_uniform_(self.in_proj_weight)
224
- else:
225
- nn.init.xavier_uniform_(self.k_proj_weight)
226
- nn.init.xavier_uniform_(self.v_proj_weight)
227
- nn.init.xavier_uniform_(self.q_proj_weight)
228
-
229
- nn.init.xavier_uniform_(self.out_proj.weight)
230
- if self.in_proj_bias is not None:
231
- nn.init.constant_(self.in_proj_bias, 0.)
232
- nn.init.constant_(self.out_proj.bias, 0.)
233
- if self.bias_k is not None:
234
- nn.init.xavier_normal_(self.bias_k)
235
- if self.bias_v is not None:
236
- nn.init.xavier_normal_(self.bias_v)
237
-
238
- def forward(
239
- self,
240
- query, key, value,
241
- key_padding_mask=None,
242
- incremental_state=None,
243
- need_weights=True,
244
- static_kv=False,
245
- attn_mask=None,
246
- before_softmax=False,
247
- need_head_weights=False,
248
- enc_dec_attn_constraint_mask=None,
249
- reset_attn_weight=None
250
- ):
251
- """Input shape: Time x Batch x Channel
252
-
253
- Args:
254
- key_padding_mask (ByteTensor, optional): mask to exclude
255
- keys that are pads, of shape `(batch, src_len)`, where
256
- padding elements are indicated by 1s.
257
- need_weights (bool, optional): return the attention weights,
258
- averaged over heads (default: False).
259
- attn_mask (ByteTensor, optional): typically used to
260
- implement causal attention, where the mask prevents the
261
- attention from looking forward in time (default: None).
262
- before_softmax (bool, optional): return the raw attention
263
- weights and values before the attention softmax.
264
- need_head_weights (bool, optional): return the attention
265
- weights for each head. Implies *need_weights*. Default:
266
- return the average attention weights over all heads.
267
- """
268
- if need_head_weights:
269
- need_weights = True
270
-
271
- tgt_len, bsz, embed_dim = query.size()
272
- assert embed_dim == self.embed_dim
273
- assert list(query.size()) == [tgt_len, bsz, embed_dim]
274
-
275
- if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
276
- if self.qkv_same_dim:
277
- return F.multi_head_attention_forward(query, key, value,
278
- self.embed_dim, self.num_heads,
279
- self.in_proj_weight,
280
- self.in_proj_bias, self.bias_k, self.bias_v,
281
- self.add_zero_attn, self.dropout,
282
- self.out_proj.weight, self.out_proj.bias,
283
- self.training, key_padding_mask, need_weights,
284
- attn_mask)
285
- else:
286
- return F.multi_head_attention_forward(query, key, value,
287
- self.embed_dim, self.num_heads,
288
- torch.empty([0]),
289
- self.in_proj_bias, self.bias_k, self.bias_v,
290
- self.add_zero_attn, self.dropout,
291
- self.out_proj.weight, self.out_proj.bias,
292
- self.training, key_padding_mask, need_weights,
293
- attn_mask, use_separate_proj_weight=True,
294
- q_proj_weight=self.q_proj_weight,
295
- k_proj_weight=self.k_proj_weight,
296
- v_proj_weight=self.v_proj_weight)
297
-
298
- if incremental_state is not None:
299
- print('Not implemented error.')
300
- exit()
301
- else:
302
- saved_state = None
303
-
304
- if self.self_attention:
305
- # self-attention
306
- q, k, v = self.in_proj_qkv(query)
307
- elif self.encoder_decoder_attention:
308
- # encoder-decoder attention
309
- q = self.in_proj_q(query)
310
- if key is None:
311
- assert value is None
312
- k = v = None
313
- else:
314
- k = self.in_proj_k(key)
315
- v = self.in_proj_v(key)
316
-
317
- else:
318
- q = self.in_proj_q(query)
319
- k = self.in_proj_k(key)
320
- v = self.in_proj_v(value)
321
- q *= self.scaling
322
-
323
- if self.bias_k is not None:
324
- assert self.bias_v is not None
325
- k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
326
- v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
327
- if attn_mask is not None:
328
- attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
329
- if key_padding_mask is not None:
330
- key_padding_mask = torch.cat(
331
- [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
332
-
333
- q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
334
- if k is not None:
335
- k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
336
- if v is not None:
337
- v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
338
-
339
- if saved_state is not None:
340
- print('Not implemented error.')
341
- exit()
342
-
343
- src_len = k.size(1)
344
-
345
- # This is part of a workaround to get around fork/join parallelism
346
- # not supporting Optional types.
347
- if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
348
- key_padding_mask = None
349
-
350
- if key_padding_mask is not None:
351
- assert key_padding_mask.size(0) == bsz
352
- assert key_padding_mask.size(1) == src_len
353
-
354
- if self.add_zero_attn:
355
- src_len += 1
356
- k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
357
- v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
358
- if attn_mask is not None:
359
- attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
360
- if key_padding_mask is not None:
361
- key_padding_mask = torch.cat(
362
- [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
363
-
364
- attn_weights = torch.bmm(q, k.transpose(1, 2))
365
- attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
366
-
367
- assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
368
-
369
- if attn_mask is not None:
370
- if len(attn_mask.shape) == 2:
371
- attn_mask = attn_mask.unsqueeze(0)
372
- elif len(attn_mask.shape) == 3:
373
- attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
374
- bsz * self.num_heads, tgt_len, src_len)
375
- attn_weights = attn_weights + attn_mask
376
-
377
- if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
378
- attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
379
- attn_weights = attn_weights.masked_fill(
380
- enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
381
- -1e9,
382
- )
383
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
384
-
385
- if key_padding_mask is not None:
386
- # don't attend to padding symbols
387
- attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
388
- attn_weights = attn_weights.masked_fill(
389
- key_padding_mask.unsqueeze(1).unsqueeze(2),
390
- -1e9,
391
- )
392
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
393
-
394
- attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
395
-
396
- if before_softmax:
397
- return attn_weights, v
398
-
399
- attn_weights_float = utils.softmax(attn_weights, dim=-1)
400
- attn_weights = attn_weights_float.type_as(attn_weights)
401
- attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
402
-
403
- if reset_attn_weight is not None:
404
- if reset_attn_weight:
405
- self.last_attn_probs = attn_probs.detach()
406
- else:
407
- assert self.last_attn_probs is not None
408
- attn_probs = self.last_attn_probs
409
- attn = torch.bmm(attn_probs, v)
410
- assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
411
- attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
412
- attn = self.out_proj(attn)
413
-
414
- if need_weights:
415
- attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
416
- if not need_head_weights:
417
- # average attention weights over heads
418
- attn_weights = attn_weights.mean(dim=0)
419
- else:
420
- attn_weights = None
421
-
422
- return attn, (attn_weights, attn_logits)
423
-
424
- def in_proj_qkv(self, query):
425
- return self._in_proj(query).chunk(3, dim=-1)
426
-
427
- def in_proj_q(self, query):
428
- if self.qkv_same_dim:
429
- return self._in_proj(query, end=self.embed_dim)
430
- else:
431
- bias = self.in_proj_bias
432
- if bias is not None:
433
- bias = bias[:self.embed_dim]
434
- return F.linear(query, self.q_proj_weight, bias)
435
-
436
- def in_proj_k(self, key):
437
- if self.qkv_same_dim:
438
- return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
439
- else:
440
- weight = self.k_proj_weight
441
- bias = self.in_proj_bias
442
- if bias is not None:
443
- bias = bias[self.embed_dim:2 * self.embed_dim]
444
- return F.linear(key, weight, bias)
445
-
446
- def in_proj_v(self, value):
447
- if self.qkv_same_dim:
448
- return self._in_proj(value, start=2 * self.embed_dim)
449
- else:
450
- weight = self.v_proj_weight
451
- bias = self.in_proj_bias
452
- if bias is not None:
453
- bias = bias[2 * self.embed_dim:]
454
- return F.linear(value, weight, bias)
455
-
456
- def _in_proj(self, input, start=0, end=None):
457
- weight = self.in_proj_weight
458
- bias = self.in_proj_bias
459
- weight = weight[start:end, :]
460
- if bias is not None:
461
- bias = bias[start:end]
462
- return F.linear(input, weight, bias)
463
-
464
- def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
465
- return attn_weights
466
-
467
-
468
- class Swish(torch.autograd.Function):
469
- @staticmethod
470
- def forward(ctx, i):
471
- result = i * torch.sigmoid(i)
472
- ctx.save_for_backward(i)
473
- return result
474
-
475
- @staticmethod
476
- def backward(ctx, grad_output):
477
- i = ctx.saved_variables[0]
478
- sigmoid_i = torch.sigmoid(i)
479
- return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
480
-
481
-
482
- class CustomSwish(nn.Module):
483
- def forward(self, input_tensor):
484
- return Swish.apply(input_tensor)
485
-
486
-
487
- class Mish(nn.Module):
488
- def forward(self, x):
489
- return x * torch.tanh(F.softplus(x))
490
-
491
-
492
- class TransformerFFNLayer(nn.Module):
493
- def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
494
- super().__init__()
495
- self.kernel_size = kernel_size
496
- self.dropout = dropout
497
- self.act = act
498
- if padding == 'SAME':
499
- self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
500
- elif padding == 'LEFT':
501
- self.ffn_1 = nn.Sequential(
502
- nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
503
- nn.Conv1d(hidden_size, filter_size, kernel_size)
504
- )
505
- self.ffn_2 = Linear(filter_size, hidden_size)
506
- if self.act == 'swish':
507
- self.swish_fn = CustomSwish()
508
-
509
- def forward(self, x, incremental_state=None):
510
- # x: T x B x C
511
- if incremental_state is not None:
512
- assert incremental_state is None, 'Nar-generation does not allow this.'
513
- exit(1)
514
-
515
- x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
516
- x = x * self.kernel_size ** -0.5
517
-
518
- if incremental_state is not None:
519
- x = x[-1:]
520
- if self.act == 'gelu':
521
- x = F.gelu(x)
522
- if self.act == 'relu':
523
- x = F.relu(x)
524
- if self.act == 'swish':
525
- x = self.swish_fn(x)
526
- x = F.dropout(x, self.dropout, training=self.training)
527
- x = self.ffn_2(x)
528
- return x
529
-
530
-
531
- class BatchNorm1dTBC(nn.Module):
532
- def __init__(self, c):
533
- super(BatchNorm1dTBC, self).__init__()
534
- self.bn = nn.BatchNorm1d(c)
535
-
536
- def forward(self, x):
537
- """
538
-
539
- :param x: [T, B, C]
540
- :return: [T, B, C]
541
- """
542
- x = x.permute(1, 2, 0) # [B, C, T]
543
- x = self.bn(x) # [B, C, T]
544
- x = x.permute(2, 0, 1) # [T, B, C]
545
- return x
546
-
547
-
548
- class EncSALayer(nn.Module):
549
- def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
550
- relu_dropout=0.1, kernel_size=9, padding='SAME', norm='ln', act='gelu'):
551
- super().__init__()
552
- self.c = c
553
- self.dropout = dropout
554
- self.num_heads = num_heads
555
- if num_heads > 0:
556
- if norm == 'ln':
557
- self.layer_norm1 = LayerNorm(c)
558
- elif norm == 'bn':
559
- self.layer_norm1 = BatchNorm1dTBC(c)
560
- self.self_attn = MultiheadAttention(
561
- self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
562
- )
563
- if norm == 'ln':
564
- self.layer_norm2 = LayerNorm(c)
565
- elif norm == 'bn':
566
- self.layer_norm2 = BatchNorm1dTBC(c)
567
- self.ffn = TransformerFFNLayer(
568
- c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
569
-
570
- def forward(self, x, encoder_padding_mask=None, **kwargs):
571
- layer_norm_training = kwargs.get('layer_norm_training', None)
572
- if layer_norm_training is not None:
573
- self.layer_norm1.training = layer_norm_training
574
- self.layer_norm2.training = layer_norm_training
575
- if self.num_heads > 0:
576
- residual = x
577
- x = self.layer_norm1(x)
578
- x, _, = self.self_attn(
579
- query=x,
580
- key=x,
581
- value=x,
582
- key_padding_mask=encoder_padding_mask
583
- )
584
- x = F.dropout(x, self.dropout, training=self.training)
585
- x = residual + x
586
- x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
587
-
588
- residual = x
589
- x = self.layer_norm2(x)
590
- x = self.ffn(x)
591
- x = F.dropout(x, self.dropout, training=self.training)
592
- x = residual + x
593
- x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
594
- return x
595
-
596
-
597
- class DecSALayer(nn.Module):
598
- def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, kernel_size=9, act='gelu'):
599
- super().__init__()
600
- self.c = c
601
- self.dropout = dropout
602
- self.layer_norm1 = LayerNorm(c)
603
- self.self_attn = MultiheadAttention(
604
- c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
605
- )
606
- self.layer_norm2 = LayerNorm(c)
607
- self.encoder_attn = MultiheadAttention(
608
- c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
609
- )
610
- self.layer_norm3 = LayerNorm(c)
611
- self.ffn = TransformerFFNLayer(
612
- c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
613
-
614
- def forward(
615
- self,
616
- x,
617
- encoder_out=None,
618
- encoder_padding_mask=None,
619
- incremental_state=None,
620
- self_attn_mask=None,
621
- self_attn_padding_mask=None,
622
- attn_out=None,
623
- reset_attn_weight=None,
624
- **kwargs,
625
- ):
626
- layer_norm_training = kwargs.get('layer_norm_training', None)
627
- if layer_norm_training is not None:
628
- self.layer_norm1.training = layer_norm_training
629
- self.layer_norm2.training = layer_norm_training
630
- self.layer_norm3.training = layer_norm_training
631
- residual = x
632
- x = self.layer_norm1(x)
633
- x, _ = self.self_attn(
634
- query=x,
635
- key=x,
636
- value=x,
637
- key_padding_mask=self_attn_padding_mask,
638
- incremental_state=incremental_state,
639
- attn_mask=self_attn_mask
640
- )
641
- x = F.dropout(x, self.dropout, training=self.training)
642
- x = residual + x
643
-
644
- residual = x
645
- x = self.layer_norm2(x)
646
- if encoder_out is not None:
647
- x, attn = self.encoder_attn(
648
- query=x,
649
- key=encoder_out,
650
- value=encoder_out,
651
- key_padding_mask=encoder_padding_mask,
652
- incremental_state=incremental_state,
653
- static_kv=True,
654
- enc_dec_attn_constraint_mask=None,
655
- # utils.get_incremental_state(self, incremental_state, 'enc_dec_attn_constraint_mask'),
656
- reset_attn_weight=reset_attn_weight
657
- )
658
- attn_logits = attn[1]
659
- else:
660
- assert attn_out is not None
661
- x = self.encoder_attn.in_proj_v(attn_out.transpose(0, 1))
662
- attn_logits = None
663
- x = F.dropout(x, self.dropout, training=self.training)
664
- x = residual + x
665
-
666
- residual = x
667
- x = self.layer_norm3(x)
668
- x = self.ffn(x, incremental_state=incremental_state)
669
- x = F.dropout(x, self.dropout, training=self.training)
670
- x = residual + x
671
- # if len(attn_logits.size()) > 3:
672
- # indices = attn_logits.softmax(-1).max(-1).values.sum(-1).argmax(-1)
673
- # attn_logits = attn_logits.gather(1,
674
- # indices[:, None, None, None].repeat(1, 1, attn_logits.size(-2), attn_logits.size(-1))).squeeze(1)
675
- return x, attn_logits
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/config/config.js DELETED
@@ -1,174 +0,0 @@
1
- import YAML from "yaml"
2
- import fs from "node:fs"
3
- import chokidar from "chokidar"
4
-
5
- /** 配置文件 */
6
- class Cfg {
7
- constructor () {
8
- this.config = {}
9
-
10
- /** 监听文件 */
11
- this.watcher = { config: {}, defSet: {} }
12
-
13
- this.initCfg()
14
- }
15
-
16
- /** 初始化配置 */
17
- initCfg () {
18
- let path = "config/config/"
19
- let pathDef = "config/default_config/"
20
- const files = fs.readdirSync(pathDef).filter(file => file.endsWith(".yaml"))
21
- for (let file of files)
22
- if (!fs.existsSync(`${path}${file}`))
23
- fs.copyFileSync(`${pathDef}${file}`, `${path}${file}`)
24
- if (!fs.existsSync("data")) fs.mkdirSync("data")
25
- if (!fs.existsSync("resources")) fs.mkdirSync("resources")
26
- }
27
-
28
- /** Bot配置 */
29
- get bot () {
30
- let bot = this.getConfig("bot")
31
- let defbot = this.getdefSet("bot")
32
- bot = { ...defbot, ...bot }
33
-
34
- return bot
35
- }
36
-
37
- get other () {
38
- return this.getConfig("other")
39
- }
40
-
41
- get redis () {
42
- return this.getConfig("redis")
43
- }
44
-
45
- get renderer() {
46
- return this.getConfig("renderer");
47
- }
48
-
49
- /** 主人账号 */
50
- get masterQQ () {
51
- let masterQQ = this.getConfig("other").masterQQ || []
52
-
53
- if (!Array.isArray(masterQQ))
54
- masterQQ = [masterQQ]
55
-
56
- const masters = []
57
- for (const i of masterQQ)
58
- masters.push(Number(i) || String(i))
59
- return masters
60
- }
61
-
62
- /** Bot账号:[主人帐号] */
63
- get master () {
64
- let master = this.getConfig("other").master || []
65
-
66
- if (!Array.isArray(master))
67
- master = [master]
68
-
69
- const masters = {}
70
- for (let i of master) {
71
- i = i.split(":")
72
- if (Array.isArray(masters[i[0]]))
73
- masters[i[0]].push(i[1])
74
- else
75
- masters[i[0]] = [i[1]]
76
- }
77
- return masters
78
- }
79
-
80
- /** 机器人账号 */
81
- get uin () {
82
- return Object.keys(this.master)
83
- }
84
- get qq () {
85
- return this.uin
86
- }
87
-
88
- /** package.json */
89
- get package () {
90
- if (this._package) return this._package
91
-
92
- this._package = JSON.parse(fs.readFileSync("package.json", "utf8"))
93
- return this._package
94
- }
95
-
96
- /** 群配置 */
97
- getGroup (bot_id = "", group_id = "") {
98
- const config = this.getConfig("group")
99
- const defCfg = this.getdefSet("group")
100
- return {
101
- ...defCfg.default,
102
- ...config.default,
103
- ...config[`${bot_id}:default`],
104
- ...config[group_id],
105
- ...config[`${bot_id}:${group_id}`],
106
- }
107
- }
108
-
109
- /** other配置 */
110
- getOther () {
111
- let def = this.getdefSet("other")
112
- let config = this.getConfig("other")
113
- return { ...def, ...config }
114
- }
115
-
116
- /**
117
- * @param app 功能
118
- * @param name 配置文件名称
119
- */
120
- getdefSet (name) {
121
- return this.getYaml("default_config", name)
122
- }
123
-
124
- /** 用户配置 */
125
- getConfig (name) {
126
- return this.getYaml("config", name)
127
- }
128
-
129
- /**
130
- * 获取配置yaml
131
- * @param type 默认跑配置-defSet,用户配置-config
132
- * @param name 名称
133
- */
134
- getYaml (type, name) {
135
- let file = `config/${type}/${name}.yaml`
136
- let key = `${type}.${name}`
137
- if (this.config[key]) return this.config[key]
138
-
139
- this.config[key] = YAML.parse(
140
- fs.readFileSync(file, "utf8")
141
- )
142
-
143
- this.watch(file, name, type)
144
-
145
- return this.config[key]
146
- }
147
-
148
- /** 监听配置文件 */
149
- watch (file, name, type = "default_config") {
150
- let key = `${type}.${name}`
151
-
152
- if (this.watcher[key]) return
153
-
154
- const watcher = chokidar.watch(file)
155
- watcher.on("change", path => {
156
- delete this.config[key]
157
- if (typeof Bot == "undefined") return
158
- logger.mark(`[修改配置文件][${type}][${name}]`)
159
- if (this[`change_${name}`]) {
160
- this[`change_${name}`]()
161
- }
162
- })
163
-
164
- this.watcher[key] = watcher
165
- }
166
-
167
- async change_bot () {
168
- /** 修改日志等级 */
169
- let log = await import("./log.js")
170
- log.default()
171
- }
172
- }
173
-
174
- export default new Cfg()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CjangCjengh/Sanskrit-TTS/text/__init__.py DELETED
@@ -1,32 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
- from text import cleaners
3
-
4
-
5
- def text_to_sequence(text, symbols, cleaner_names):
6
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
7
- Args:
8
- text: string to convert to a sequence
9
- cleaner_names: names of the cleaner functions to run the text through
10
- Returns:
11
- List of integers corresponding to the symbols in the text
12
- '''
13
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
14
-
15
- sequence = []
16
-
17
- clean_text = _clean_text(text, cleaner_names)
18
- for symbol in clean_text:
19
- if symbol not in _symbol_to_id.keys():
20
- continue
21
- symbol_id = _symbol_to_id[symbol]
22
- sequence += [symbol_id]
23
- return sequence
24
-
25
-
26
- def _clean_text(text, cleaner_names):
27
- for name in cleaner_names:
28
- cleaner = getattr(cleaners, name)
29
- if not cleaner:
30
- raise Exception('Unknown cleaner: %s' % name)
31
- text = cleaner(text)
32
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/crazy_functions/crazy_utils.py DELETED
@@ -1,608 +0,0 @@
1
- from toolbox import update_ui, get_conf, trimmed_format_exc
2
-
3
- def input_clipping(inputs, history, max_token_limit):
4
- import numpy as np
5
- from request_llm.bridge_all import model_info
6
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
7
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
8
-
9
- mode = 'input-and-history'
10
- # 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
11
- input_token_num = get_token_num(inputs)
12
- if input_token_num < max_token_limit//2:
13
- mode = 'only-history'
14
- max_token_limit = max_token_limit - input_token_num
15
-
16
- everything = [inputs] if mode == 'input-and-history' else ['']
17
- everything.extend(history)
18
- n_token = get_token_num('\n'.join(everything))
19
- everything_token = [get_token_num(e) for e in everything]
20
- delta = max(everything_token) // 16 # 截断时的颗粒度
21
-
22
- while n_token > max_token_limit:
23
- where = np.argmax(everything_token)
24
- encoded = enc.encode(everything[where], disallowed_special=())
25
- clipped_encoded = encoded[:len(encoded)-delta]
26
- everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
27
- everything_token[where] = get_token_num(everything[where])
28
- n_token = get_token_num('\n'.join(everything))
29
-
30
- if mode == 'input-and-history':
31
- inputs = everything[0]
32
- else:
33
- pass
34
- history = everything[1:]
35
- return inputs, history
36
-
37
- def request_gpt_model_in_new_thread_with_ui_alive(
38
- inputs, inputs_show_user, llm_kwargs,
39
- chatbot, history, sys_prompt, refresh_interval=0.2,
40
- handle_token_exceed=True,
41
- retry_times_at_unknown_error=2,
42
- ):
43
- """
44
- Request GPT model,请求GPT模型同时维持用户界面活跃。
45
-
46
- 输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
47
- inputs (string): List of inputs (输入)
48
- inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
49
- top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
50
- temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
51
- chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
52
- history (list): List of chat history (历史,对话历史列表)
53
- sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
54
- refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
55
- handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
56
- retry_times_at_unknown_error:失败时的重试次数
57
-
58
- 输出 Returns:
59
- future: 输出,GPT返回的结果
60
- """
61
- import time
62
- from concurrent.futures import ThreadPoolExecutor
63
- from request_llm.bridge_all import predict_no_ui_long_connection
64
- # 用户反馈
65
- chatbot.append([inputs_show_user, ""])
66
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
67
- executor = ThreadPoolExecutor(max_workers=16)
68
- mutable = ["", time.time(), ""]
69
- def _req_gpt(inputs, history, sys_prompt):
70
- retry_op = retry_times_at_unknown_error
71
- exceeded_cnt = 0
72
- while True:
73
- # watchdog error
74
- if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
75
- raise RuntimeError("检测到程序终止。")
76
- try:
77
- # 【第一种情况】:顺利完成
78
- result = predict_no_ui_long_connection(
79
- inputs=inputs, llm_kwargs=llm_kwargs,
80
- history=history, sys_prompt=sys_prompt, observe_window=mutable)
81
- return result
82
- except ConnectionAbortedError as token_exceeded_error:
83
- # 【第二种情况】:Token溢出
84
- if handle_token_exceed:
85
- exceeded_cnt += 1
86
- # 【选择处理】 尝试计算比例,尽可能多地保留文本
87
- from toolbox import get_reduce_token_percent
88
- p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
89
- MAX_TOKEN = 4096
90
- EXCEED_ALLO = 512 + 512 * exceeded_cnt
91
- inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
92
- mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
93
- continue # 返回重试
94
- else:
95
- # 【选择放弃】
96
- tb_str = '```\n' + trimmed_format_exc() + '```'
97
- mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
98
- return mutable[0] # 放弃
99
- except:
100
- # 【第三种情况】:其他错误:重试几次
101
- tb_str = '```\n' + trimmed_format_exc() + '```'
102
- print(tb_str)
103
- mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
104
- if retry_op > 0:
105
- retry_op -= 1
106
- mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
107
- if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
108
- time.sleep(30)
109
- time.sleep(5)
110
- continue # 返回重试
111
- else:
112
- time.sleep(5)
113
- return mutable[0] # 放弃
114
-
115
- # 提交任务
116
- future = executor.submit(_req_gpt, inputs, history, sys_prompt)
117
- while True:
118
- # yield一次以刷新前端页面
119
- time.sleep(refresh_interval)
120
- # “喂狗”(看门狗)
121
- mutable[1] = time.time()
122
- if future.done():
123
- break
124
- chatbot[-1] = [chatbot[-1][0], mutable[0]]
125
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
126
-
127
- final_result = future.result()
128
- chatbot[-1] = [chatbot[-1][0], final_result]
129
- yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
130
- return final_result
131
-
132
-
133
- def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
134
- inputs_array, inputs_show_user_array, llm_kwargs,
135
- chatbot, history_array, sys_prompt_array,
136
- refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
137
- handle_token_exceed=True, show_user_at_complete=False,
138
- retry_times_at_unknown_error=2,
139
- ):
140
- """
141
- Request GPT model using multiple threads with UI and high efficiency
142
- 请求GPT模型的[多线程]版。
143
- 具备以下功能:
144
- 实时在UI上反馈远程数据流
145
- 使用线程池,可调节线程池的大小避免openai的流量限制错误
146
- 处理中途中止的情况
147
- 网络等出问题时,会把traceback和已经接收的数据转入输出
148
-
149
- 输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
150
- inputs_array (list): List of inputs (每个子任务的输入)
151
- inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
152
- llm_kwargs: llm_kwargs参数
153
- chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
154
- history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
155
- sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
156
- refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
157
- max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
158
- scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
159
- handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
160
- handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
161
- show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
162
- retry_times_at_unknown_error:子任务失败时的重试次数
163
-
164
- 输出 Returns:
165
- list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
166
- """
167
- import time, random
168
- from concurrent.futures import ThreadPoolExecutor
169
- from request_llm.bridge_all import predict_no_ui_long_connection
170
- assert len(inputs_array) == len(history_array)
171
- assert len(inputs_array) == len(sys_prompt_array)
172
- if max_workers == -1: # 读取配置文件
173
- try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
174
- except: max_workers = 8
175
- if max_workers <= 0: max_workers = 3
176
- # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
177
- if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
178
- max_workers = 1
179
-
180
- executor = ThreadPoolExecutor(max_workers=max_workers)
181
- n_frag = len(inputs_array)
182
- # 用户反馈
183
- chatbot.append(["请开始多线程操作。", ""])
184
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
185
- # 跨线程传递
186
- mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
187
-
188
- # 子线程任务
189
- def _req_gpt(index, inputs, history, sys_prompt):
190
- gpt_say = ""
191
- retry_op = retry_times_at_unknown_error
192
- exceeded_cnt = 0
193
- mutable[index][2] = "执行中"
194
- while True:
195
- # watchdog error
196
- if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
197
- raise RuntimeError("检测到程序终止。")
198
- try:
199
- # 【第一种情况】:顺利完成
200
- # time.sleep(10); raise RuntimeError("测试")
201
- gpt_say = predict_no_ui_long_connection(
202
- inputs=inputs, llm_kwargs=llm_kwargs, history=history,
203
- sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
204
- )
205
- mutable[index][2] = "已成功"
206
- return gpt_say
207
- except ConnectionAbortedError as token_exceeded_error:
208
- # 【第二种情况】:Token溢出,
209
- if handle_token_exceed:
210
- exceeded_cnt += 1
211
- # 【选择处理】 尝试计算比例,尽可能多地保留文本
212
- from toolbox import get_reduce_token_percent
213
- p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
214
- MAX_TOKEN = 4096
215
- EXCEED_ALLO = 512 + 512 * exceeded_cnt
216
- inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
217
- gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
218
- mutable[index][2] = f"截断重试"
219
- continue # 返回重试
220
- else:
221
- # 【选择放弃】
222
- tb_str = '```\n' + trimmed_format_exc() + '```'
223
- gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
224
- if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
225
- mutable[index][2] = "输入过长已放弃"
226
- return gpt_say # 放弃
227
- except:
228
- # 【第三种情况】:其他错误
229
- tb_str = '```\n' + trimmed_format_exc() + '```'
230
- print(tb_str)
231
- gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
232
- if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
233
- if retry_op > 0:
234
- retry_op -= 1
235
- wait = random.randint(5, 20)
236
- if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
237
- wait = wait * 3
238
- fail_info = "OpenAI绑定信用卡可解除频率限制 "
239
- else:
240
- fail_info = ""
241
- # 也许等待十几秒后,情况会好转
242
- for i in range(wait):
243
- mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
244
- # 开始重试
245
- mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
246
- continue # 返回重试
247
- else:
248
- mutable[index][2] = "已失败"
249
- wait = 5
250
- time.sleep(5)
251
- return gpt_say # 放弃
252
-
253
- # 异步任务开始
254
- futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
255
- range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
256
- cnt = 0
257
- while True:
258
- # yield一次以刷新前端页面
259
- time.sleep(refresh_interval)
260
- cnt += 1
261
- worker_done = [h.done() for h in futures]
262
- if all(worker_done):
263
- executor.shutdown()
264
- break
265
- # 更好的UI视觉效果
266
- observe_win = []
267
- # 每个线程都要“喂狗”(看门狗)
268
- for thread_index, _ in enumerate(worker_done):
269
- mutable[thread_index][1] = time.time()
270
- # 在前端打印些好玩的东西
271
- for thread_index, _ in enumerate(worker_done):
272
- print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
273
- replace('\n', '').replace('```', '...').replace(
274
- ' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
275
- observe_win.append(print_something_really_funny)
276
- # 在前端打印些好玩的东西
277
- stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
278
- if not done else f'`{mutable[thread_index][2]}`\n\n'
279
- for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
280
- # 在前端打印些好玩的东西
281
- chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
282
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
283
-
284
- # 异步任务结束
285
- gpt_response_collection = []
286
- for inputs_show_user, f in zip(inputs_show_user_array, futures):
287
- gpt_res = f.result()
288
- gpt_response_collection.extend([inputs_show_user, gpt_res])
289
-
290
- # 是否在结束时,在界面上显示结果
291
- if show_user_at_complete:
292
- for inputs_show_user, f in zip(inputs_show_user_array, futures):
293
- gpt_res = f.result()
294
- chatbot.append([inputs_show_user, gpt_res])
295
- yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
296
- time.sleep(0.3)
297
- return gpt_response_collection
298
-
299
-
300
- def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
301
- def cut(txt_tocut, must_break_at_empty_line): # 递归
302
- if get_token_fn(txt_tocut) <= limit:
303
- return [txt_tocut]
304
- else:
305
- lines = txt_tocut.split('\n')
306
- estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
307
- estimated_line_cut = int(estimated_line_cut)
308
- for cnt in reversed(range(estimated_line_cut)):
309
- if must_break_at_empty_line:
310
- if lines[cnt] != "":
311
- continue
312
- print(cnt)
313
- prev = "\n".join(lines[:cnt])
314
- post = "\n".join(lines[cnt:])
315
- if get_token_fn(prev) < limit:
316
- break
317
- if cnt == 0:
318
- raise RuntimeError("存在一行极长的文本!")
319
- # print(len(post))
320
- # 列表递归接龙
321
- result = [prev]
322
- result.extend(cut(post, must_break_at_empty_line))
323
- return result
324
- try:
325
- return cut(txt, must_break_at_empty_line=True)
326
- except RuntimeError:
327
- return cut(txt, must_break_at_empty_line=False)
328
-
329
-
330
- def force_breakdown(txt, limit, get_token_fn):
331
- """
332
- 当无法用标点、空行分割时,我们用最暴力的方法切割
333
- """
334
- for i in reversed(range(len(txt))):
335
- if get_token_fn(txt[:i]) < limit:
336
- return txt[:i], txt[i:]
337
- return "Tiktoken未知错误", "Tiktoken未知错误"
338
-
339
- def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
340
- # 递归
341
- def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
342
- if get_token_fn(txt_tocut) <= limit:
343
- return [txt_tocut]
344
- else:
345
- lines = txt_tocut.split('\n')
346
- estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
347
- estimated_line_cut = int(estimated_line_cut)
348
- cnt = 0
349
- for cnt in reversed(range(estimated_line_cut)):
350
- if must_break_at_empty_line:
351
- if lines[cnt] != "":
352
- continue
353
- prev = "\n".join(lines[:cnt])
354
- post = "\n".join(lines[cnt:])
355
- if get_token_fn(prev) < limit:
356
- break
357
- if cnt == 0:
358
- if break_anyway:
359
- prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
360
- else:
361
- raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
362
- # print(len(post))
363
- # 列表递归接龙
364
- result = [prev]
365
- result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
366
- return result
367
- try:
368
- # 第1次尝试,将双空行(\n\n)作为切分点
369
- return cut(txt, must_break_at_empty_line=True)
370
- except RuntimeError:
371
- try:
372
- # 第2次尝试,将单空行(\n)作为切分点
373
- return cut(txt, must_break_at_empty_line=False)
374
- except RuntimeError:
375
- try:
376
- # 第3次尝试,将英文句号(.)作为切分点
377
- res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
378
- return [r.replace('。\n', '.') for r in res]
379
- except RuntimeError as e:
380
- try:
381
- # 第4次尝试,将中文句号(。)作为切分点
382
- res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
383
- return [r.replace('。。\n', '。') for r in res]
384
- except RuntimeError as e:
385
- # 第5次尝试,没办法了,随便切一下敷衍吧
386
- return cut(txt, must_break_at_empty_line=False, break_anyway=True)
387
-
388
-
389
-
390
- def read_and_clean_pdf_text(fp):
391
- """
392
- 这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
393
-
394
- **输入参数说明**
395
- - `fp`:需要读取和清理文本的pdf文件路径
396
-
397
- **输出参数说明**
398
- - `meta_txt`:清理后的文本内容字符串
399
- - `page_one_meta`:第一页清理后的文本内容列表
400
-
401
- **函数功能**
402
- 读取pdf文件并清理其中的文本内容,清理规则包括:
403
- - 提取所有块元的文本信息,并合并为一个字符串
404
- - 去除短块(字符数小于100)并替换为回车符
405
- - 清理多余的空行
406
- - 合并小写字母开头的段落块并替换为空格
407
- - 清除重复的换行
408
- - 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
409
- """
410
- import fitz, copy
411
- import re
412
- import numpy as np
413
- from colorful import print亮黄, print亮绿
414
- fc = 0 # Index 0 文本
415
- fs = 1 # Index 1 字体
416
- fb = 2 # Index 2 框框
417
- REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
418
- REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
419
- def primary_ffsize(l):
420
- """
421
- 提取文本块主字体
422
- """
423
- fsize_statiscs = {}
424
- for wtf in l['spans']:
425
- if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
426
- fsize_statiscs[wtf['size']] += len(wtf['text'])
427
- return max(fsize_statiscs, key=fsize_statiscs.get)
428
-
429
- def ffsize_same(a,b):
430
- """
431
- 提取字体大小是否近似相等
432
- """
433
- return abs((a-b)/max(a,b)) < 0.02
434
-
435
- with fitz.open(fp) as doc:
436
- meta_txt = []
437
- meta_font = []
438
-
439
- meta_line = []
440
- meta_span = []
441
- ############################## <第 1 步,搜集初始信息> ##################################
442
- for index, page in enumerate(doc):
443
- # file_content += page.get_text()
444
- text_areas = page.get_text("dict") # 获取页面上的文本信息
445
- for t in text_areas['blocks']:
446
- if 'lines' in t:
447
- pf = 998
448
- for l in t['lines']:
449
- txt_line = "".join([wtf['text'] for wtf in l['spans']])
450
- if len(txt_line) == 0: continue
451
- pf = primary_ffsize(l)
452
- meta_line.append([txt_line, pf, l['bbox'], l])
453
- for wtf in l['spans']: # for l in t['lines']:
454
- meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
455
- # meta_line.append(["NEW_BLOCK", pf])
456
- # 块元提取 for each word segment with in line for each line cross-line words for each block
457
- meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
458
- '- ', '') for t in text_areas['blocks'] if 'lines' in t])
459
- meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
460
- for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
461
- if index == 0:
462
- page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
463
- '- ', '') for t in text_areas['blocks'] if 'lines' in t]
464
-
465
- ############################## <第 2 步,获取正文主字体> ##################################
466
- fsize_statiscs = {}
467
- for span in meta_span:
468
- if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
469
- fsize_statiscs[span[1]] += span[2]
470
- main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
471
- if REMOVE_FOOT_NOTE:
472
- give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
473
-
474
- ############################## <第 3 步,切分和重新整合> ##################################
475
- mega_sec = []
476
- sec = []
477
- for index, line in enumerate(meta_line):
478
- if index == 0:
479
- sec.append(line[fc])
480
- continue
481
- if REMOVE_FOOT_NOTE:
482
- if meta_line[index][fs] <= give_up_fize_threshold:
483
- continue
484
- if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
485
- # 尝试识别段落
486
- if meta_line[index][fc].endswith('.') and\
487
- (meta_line[index-1][fc] != 'NEW_BLOCK') and \
488
- (meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
489
- sec[-1] += line[fc]
490
- sec[-1] += "\n\n"
491
- else:
492
- sec[-1] += " "
493
- sec[-1] += line[fc]
494
- else:
495
- if (index+1 < len(meta_line)) and \
496
- meta_line[index][fs] > main_fsize:
497
- # 单行 + 字体大
498
- mega_sec.append(copy.deepcopy(sec))
499
- sec = []
500
- sec.append("# " + line[fc])
501
- else:
502
- # 尝试识别section
503
- if meta_line[index-1][fs] > meta_line[index][fs]:
504
- sec.append("\n" + line[fc])
505
- else:
506
- sec.append(line[fc])
507
- mega_sec.append(copy.deepcopy(sec))
508
-
509
- finals = []
510
- for ms in mega_sec:
511
- final = " ".join(ms)
512
- final = final.replace('- ', ' ')
513
- finals.append(final)
514
- meta_txt = finals
515
-
516
- ############################## <第 4 步,乱七八糟的后处理> ##################################
517
- def 把字符太少的块清除为回车(meta_txt):
518
- for index, block_txt in enumerate(meta_txt):
519
- if len(block_txt) < 100:
520
- meta_txt[index] = '\n'
521
- return meta_txt
522
- meta_txt = 把字符太少的块清除为回车(meta_txt)
523
-
524
- def 清理多余的空行(meta_txt):
525
- for index in reversed(range(1, len(meta_txt))):
526
- if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
527
- meta_txt.pop(index)
528
- return meta_txt
529
- meta_txt = 清理多余的空行(meta_txt)
530
-
531
- def 合并小写开头的段落块(meta_txt):
532
- def starts_with_lowercase_word(s):
533
- pattern = r"^[a-z]+"
534
- match = re.match(pattern, s)
535
- if match:
536
- return True
537
- else:
538
- return False
539
- for _ in range(100):
540
- for index, block_txt in enumerate(meta_txt):
541
- if starts_with_lowercase_word(block_txt):
542
- if meta_txt[index-1] != '\n':
543
- meta_txt[index-1] += ' '
544
- else:
545
- meta_txt[index-1] = ''
546
- meta_txt[index-1] += meta_txt[index]
547
- meta_txt[index] = '\n'
548
- return meta_txt
549
- meta_txt = 合并小写开头的段落块(meta_txt)
550
- meta_txt = 清理多余的空行(meta_txt)
551
-
552
- meta_txt = '\n'.join(meta_txt)
553
- # 清除重复的换行
554
- for _ in range(5):
555
- meta_txt = meta_txt.replace('\n\n', '\n')
556
-
557
- # 换行 -> 双换行
558
- meta_txt = meta_txt.replace('\n', '\n\n')
559
-
560
- ############################## <第 5 步,展示分割效果> ##################################
561
- # for f in finals:
562
- # print亮黄(f)
563
- # print亮绿('***************************')
564
-
565
- return meta_txt, page_one_meta
566
-
567
-
568
- def get_files_from_everything(txt, type): # type='.md'
569
- """
570
- 这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
571
- 下面是对每个参数和返回值的说明:
572
- 参数
573
- - txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
574
- - type: 字符串,表示要搜索的文件类型。默认是.md。
575
- 返回值
576
- - success: 布尔值,表示函数是否成功执行。
577
- - file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
578
- - project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
579
- 该函数详细注释已添加,请确认是否满足您的需要。
580
- """
581
- import glob, os
582
-
583
- success = True
584
- if txt.startswith('http'):
585
- # 网络的远程文件
586
- import requests
587
- from toolbox import get_conf
588
- proxies, = get_conf('proxies')
589
- r = requests.get(txt, proxies=proxies)
590
- with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content)
591
- project_folder = './gpt_log/'
592
- file_manifest = ['./gpt_log/temp'+type]
593
- elif txt.endswith(type):
594
- # 直接给定文件
595
- file_manifest = [txt]
596
- project_folder = os.path.dirname(txt)
597
- elif os.path.exists(txt):
598
- # 本地路径,递归搜索
599
- project_folder = txt
600
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*'+type, recursive=True)]
601
- if len(file_manifest) == 0:
602
- success = False
603
- else:
604
- project_folder = None
605
- file_manifest = []
606
- success = False
607
-
608
- return success, file_manifest, project_folder
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/modules/model_alignment.py DELETED
@@ -1,34 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from fastai.vision import *
4
-
5
- from modules.model import Model, _default_tfmer_cfg
6
-
7
-
8
- class BaseAlignment(Model):
9
- def __init__(self, config):
10
- super().__init__(config)
11
- d_model = ifnone(config.model_alignment_d_model, _default_tfmer_cfg['d_model'])
12
-
13
- self.loss_weight = ifnone(config.model_alignment_loss_weight, 1.0)
14
- self.max_length = config.dataset_max_length + 1 # additional stop token
15
- self.w_att = nn.Linear(2 * d_model, d_model)
16
- self.cls = nn.Linear(d_model, self.charset.num_classes)
17
-
18
- def forward(self, l_feature, v_feature):
19
- """
20
- Args:
21
- l_feature: (N, T, E) where T is length, N is batch size and d is dim of model
22
- v_feature: (N, T, E) shape the same as l_feature
23
- l_lengths: (N,)
24
- v_lengths: (N,)
25
- """
26
- f = torch.cat((l_feature, v_feature), dim=2)
27
- f_att = torch.sigmoid(self.w_att(f))
28
- output = f_att * v_feature + (1 - f_att) * l_feature
29
-
30
- logits = self.cls(output) # (N, T, C)
31
- pt_lengths = self._get_length(logits)
32
-
33
- return {'logits': logits, 'pt_lengths': pt_lengths, 'loss_weight':self.loss_weight,
34
- 'name': 'alignment'}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/linear-58a44b5e.js DELETED
@@ -1,2 +0,0 @@
1
- function W(n,t){return n==null||t==null?NaN:n<t?-1:n>t?1:n>=t?0:NaN}function En(n){let t=n,e=n,r=n;n.length!==2&&(t=(a,u)=>n(a)-u,e=W,r=(a,u)=>W(n(a),u));function i(a,u,s=0,c=a.length){if(s<c){if(e(u,u)!==0)return c;do{const h=s+c>>>1;r(a[h],u)<0?s=h+1:c=h}while(s<c)}return s}function f(a,u,s=0,c=a.length){if(s<c){if(e(u,u)!==0)return c;do{const h=s+c>>>1;r(a[h],u)<=0?s=h+1:c=h}while(s<c)}return s}function o(a,u,s=0,c=a.length){const h=i(a,u,s,c-1);return h>s&&t(a[h-1],u)>-t(a[h],u)?h-1:h}return{left:i,center:o,right:f}}function Un(n){return n===null?NaN:+n}function*Qt(n,t){if(t===void 0)for(let e of n)e!=null&&(e=+e)>=e&&(yield e);else{let e=-1;for(let r of n)(r=t(r,++e,n))!=null&&(r=+r)>=r&&(yield r)}}const Pn=En(W),Yn=Pn.right,Ut=Pn.left;En(Un).center;const Jn=Yn;var nn=Math.sqrt(50),tn=Math.sqrt(10),en=Math.sqrt(2);function Kn(n,t,e){var r,i=-1,f,o,a;if(t=+t,n=+n,e=+e,n===t&&e>0)return[n];if((r=t<n)&&(f=n,n=t,t=f),(a=jn(n,t,e))===0||!isFinite(a))return[];if(a>0){let u=Math.round(n/a),s=Math.round(t/a);for(u*a<n&&++u,s*a>t&&--s,o=new Array(f=s-u+1);++i<f;)o[i]=(u+i)*a}else{a=-a;let u=Math.round(n*a),s=Math.round(t*a);for(u/a<n&&++u,s/a>t&&--s,o=new Array(f=s-u+1);++i<f;)o[i]=(u+i)/a}return r&&o.reverse(),o}function jn(n,t,e){var r=(t-n)/Math.max(0,e),i=Math.floor(Math.log(r)/Math.LN10),f=r/Math.pow(10,i);return i>=0?(f>=nn?10:f>=tn?5:f>=en?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(f>=nn?10:f>=tn?5:f>=en?2:1)}function Wn(n,t,e){var r=Math.abs(t-n)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),f=r/i;return f>=nn?i*=10:f>=tn?i*=5:f>=en&&(i*=2),t<n?-i:i}function nt(n){return Math.abs(n=Math.round(n))>=1e21?n.toLocaleString("en").replace(/,/g,""):n.toString(10)}function G(n,t){if((e=(n=t?n.toExponential(t-1):n.toExponential()).indexOf("e"))<0)return null;var e,r=n.slice(0,e);return[r.length>1?r[0]+r.slice(2):r,+n.slice(e+1)]}function L(n){return n=G(Math.abs(n)),n?n[1]:NaN}function tt(n,t){return function(e,r){for(var i=e.length,f=[],o=0,a=n[0],u=0;i>0&&a>0&&(u+a+1>r&&(a=Math.max(1,r-u)),f.push(e.substring(i-=a,i+a)),!((u+=a+1)>r));)a=n[o=(o+1)%n.length];return f.reverse().join(t)}}function et(n){return function(t){return t.replace(/[0-9]/g,function(e){return n[+e]})}}var rt=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Z(n){if(!(t=rt.exec(n)))throw new Error("invalid format: "+n);var t;return new sn({fill:t[1],align:t[2],sign:t[3],symbol:t[4],zero:t[5],width:t[6],comma:t[7],precision:t[8]&&t[8].slice(1),trim:t[9],type:t[10]})}Z.prototype=sn.prototype;function sn(n){this.fill=n.fill===void 0?" ":n.fill+"",this.align=n.align===void 0?">":n.align+"",this.sign=n.sign===void 0?"-":n.sign+"",this.symbol=n.symbol===void 0?"":n.symbol+"",this.zero=!!n.zero,this.width=n.width===void 0?void 0:+n.width,this.comma=!!n.comma,this.precision=n.precision===void 0?void 0:+n.precision,this.trim=!!n.trim,this.type=n.type===void 0?"":n.type+""}sn.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(this.width===void 0?"":Math.max(1,this.width|0))+(this.comma?",":"")+(this.precision===void 0?"":"."+Math.max(0,this.precision|0))+(this.trim?"~":"")+this.type};function it(n){n:for(var t=n.length,e=1,r=-1,i;e<t;++e)switch(n[e]){case".":r=i=e;break;case"0":r===0&&(r=e),i=e;break;default:if(!+n[e])break n;r>0&&(r=0);break}return r>0?n.slice(0,r)+n.slice(i+1):n}var qn;function at(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1],f=i-(qn=Math.max(-8,Math.min(8,Math.floor(i/3)))*3)+1,o=r.length;return f===o?r:f>o?r+new Array(f-o+1).join("0"):f>0?r.slice(0,f)+"."+r.slice(f):"0."+new Array(1-f).join("0")+G(n,Math.max(0,t+f-1))[0]}function xn(n,t){var e=G(n,t);if(!e)return n+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}const mn={"%":(n,t)=>(n*100).toFixed(t),b:n=>Math.round(n).toString(2),c:n=>n+"",d:nt,e:(n,t)=>n.toExponential(t),f:(n,t)=>n.toFixed(t),g:(n,t)=>n.toPrecision(t),o:n=>Math.round(n).toString(8),p:(n,t)=>xn(n*100,t),r:xn,s:at,X:n=>Math.round(n).toString(16).toUpperCase(),x:n=>Math.round(n).toString(16)};function bn(n){return n}var pn=Array.prototype.map,yn=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function ft(n){var t=n.grouping===void 0||n.thousands===void 0?bn:tt(pn.call(n.grouping,Number),n.thousands+""),e=n.currency===void 0?"":n.currency[0]+"",r=n.currency===void 0?"":n.currency[1]+"",i=n.decimal===void 0?".":n.decimal+"",f=n.numerals===void 0?bn:et(pn.call(n.numerals,String)),o=n.percent===void 0?"%":n.percent+"",a=n.minus===void 0?"−":n.minus+"",u=n.nan===void 0?"NaN":n.nan+"";function s(h){h=Z(h);var l=h.fill,p=h.align,g=h.sign,k=h.symbol,v=h.zero,N=h.width,R=h.comma,y=h.precision,H=h.trim,m=h.type;m==="n"?(R=!0,m="g"):mn[m]||(y===void 0&&(y=12),H=!0,m="g"),(v||l==="0"&&p==="=")&&(v=!0,l="0",p="=");var Vn=k==="$"?e:k==="#"&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",Xn=k==="$"?r:/[%p]/.test(m)?o:"",ln=mn[m],Qn=/[defgprs%]/.test(m);y=y===void 0?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y));function dn(d){var A=Vn,b=Xn,E,gn,F;if(m==="c")b=ln(d)+b,d="";else{d=+d;var $=d<0||1/d<0;if(d=isNaN(d)?u:ln(Math.abs(d),y),H&&(d=it(d)),$&&+d==0&&g!=="+"&&($=!1),A=($?g==="("?g:a:g==="-"||g==="("?"":g)+A,b=(m==="s"?yn[8+qn/3]:"")+b+($&&g==="("?")":""),Qn){for(E=-1,gn=d.length;++E<gn;)if(F=d.charCodeAt(E),48>F||F>57){b=(F===46?i+d.slice(E+1):d.slice(E))+b,d=d.slice(0,E);break}}}R&&!v&&(d=t(d,1/0));var B=A.length+d.length+b.length,_=B<N?new Array(N-B+1).join(l):"";switch(R&&v&&(d=t(_+d,_.length?N-b.length:1/0),_=""),p){case"<":d=A+d+b+_;break;case"=":d=A+_+d+b;break;case"^":d=_.slice(0,B=_.length>>1)+A+d+b+_.slice(B);break;default:d=_+A+d+b;break}return f(d)}return dn.toString=function(){return h+""},dn}function c(h,l){var p=s((h=Z(h),h.type="f",h)),g=Math.max(-8,Math.min(8,Math.floor(L(l)/3)))*3,k=Math.pow(10,-g),v=yn[8+g/3];return function(N){return p(k*N)+v}}return{format:s,formatPrefix:c}}var D,Ln,Hn;ot({thousands:",",grouping:[3],currency:["$",""]});function ot(n){return D=ft(n),Ln=D.format,Hn=D.formatPrefix,D}function ut(n){return Math.max(0,-L(Math.abs(n)))}function st(n,t){return Math.max(0,Math.max(-8,Math.min(8,Math.floor(L(t)/3)))*3-L(Math.abs(n)))}function ht(n,t){return n=Math.abs(n),t=Math.abs(t)-n,Math.max(0,L(t)-L(n))+1}const rn=Math.PI,an=2*rn,S=1e-6,ct=an-S;function fn(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function In(){return new fn}fn.prototype=In.prototype={constructor:fn,moveTo:function(n,t){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)},closePath:function(){this._x1!==null&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")},lineTo:function(n,t){this._+="L"+(this._x1=+n)+","+(this._y1=+t)},quadraticCurveTo:function(n,t,e,r){this._+="Q"+ +n+","+ +t+","+(this._x1=+e)+","+(this._y1=+r)},bezierCurveTo:function(n,t,e,r,i,f){this._+="C"+ +n+","+ +t+","+ +e+","+ +r+","+(this._x1=+i)+","+(this._y1=+f)},arcTo:function(n,t,e,r,i){n=+n,t=+t,e=+e,r=+r,i=+i;var f=this._x1,o=this._y1,a=e-n,u=r-t,s=f-n,c=o-t,h=s*s+c*c;if(i<0)throw new Error("negative radius: "+i);if(this._x1===null)this._+="M"+(this._x1=n)+","+(this._y1=t);else if(h>S)if(!(Math.abs(c*a-u*s)>S)||!i)this._+="L"+(this._x1=n)+","+(this._y1=t);else{var l=e-f,p=r-o,g=a*a+u*u,k=l*l+p*p,v=Math.sqrt(g),N=Math.sqrt(h),R=i*Math.tan((rn-Math.acos((g+h-k)/(2*v*N)))/2),y=R/N,H=R/v;Math.abs(y-1)>S&&(this._+="L"+(n+y*s)+","+(t+y*c)),this._+="A"+i+","+i+",0,0,"+ +(c*l>s*p)+","+(this._x1=n+H*a)+","+(this._y1=t+H*u)}},arc:function(n,t,e,r,i,f){n=+n,t=+t,e=+e,f=!!f;var o=e*Math.cos(r),a=e*Math.sin(r),u=n+o,s=t+a,c=1^f,h=f?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);this._x1===null?this._+="M"+u+","+s:(Math.abs(this._x1-u)>S||Math.abs(this._y1-s)>S)&&(this._+="L"+u+","+s),e&&(h<0&&(h=h%an+an),h>ct?this._+="A"+e+","+e+",0,1,"+c+","+(n-o)+","+(t-a)+"A"+e+","+e+",0,1,"+c+","+(this._x1=u)+","+(this._y1=s):h>S&&(this._+="A"+e+","+e+",0,"+ +(h>=rn)+","+c+","+(this._x1=n+e*Math.cos(i))+","+(this._y1=t+e*Math.sin(i))))},rect:function(n,t,e,r){this._+="M"+(this._x0=this._x1=+n)+","+(this._y0=this._y1=+t)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};function P(n){return function(){return n}}function lt(n){return typeof n=="object"&&"length"in n?n:Array.from(n)}function Tn(n){this._context=n}Tn.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(n,t){switch(n=+n,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(n,t):this._context.moveTo(n,t);break;case 1:this._point=2;default:this._context.lineTo(n,t);break}}};function dt(n){return new Tn(n)}function gt(n){return n[0]}function xt(n){return n[1]}function Yt(n,t){var e=P(!0),r=null,i=dt,f=null;n=typeof n=="function"?n:n===void 0?gt:P(n),t=typeof t=="function"?t:t===void 0?xt:P(t);function o(a){var u,s=(a=lt(a)).length,c,h=!1,l;for(r==null&&(f=i(l=In())),u=0;u<=s;++u)!(u<s&&e(c=a[u],u,a))===h&&((h=!h)?f.lineStart():f.lineEnd()),h&&f.point(+n(c,u,a),+t(c,u,a));if(l)return f=null,l+""||null}return o.x=function(a){return arguments.length?(n=typeof a=="function"?a:P(+a),o):n},o.y=function(a){return arguments.length?(t=typeof a=="function"?a:P(+a),o):t},o.defined=function(a){return arguments.length?(e=typeof a=="function"?a:P(!!a),o):e},o.curve=function(a){return arguments.length?(i=a,r!=null&&(f=i(r)),o):i},o.context=function(a){return arguments.length?(a==null?r=f=null:f=i(r=a),o):r},o}function mt(n,t){switch(arguments.length){case 0:break;case 1:this.range(n);break;default:this.range(t).domain(n);break}return this}function Jt(n,t){switch(arguments.length){case 0:break;case 1:{typeof n=="function"?this.interpolator(n):this.range(n);break}default:{this.domain(n),typeof t=="function"?this.interpolator(t):this.range(t);break}}return this}function hn(n,t,e){n.prototype=t.prototype=e,e.constructor=n}function zn(n,t){var e=Object.create(n.prototype);for(var r in t)e[r]=t[r];return e}function C(){}var I=.7,V=1/I,q="\\s*([+-]?\\d+)\\s*",T="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)\\s*",M="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)%\\s*",bt=/^#([0-9a-f]{3,8})$/,pt=new RegExp("^rgb\\("+[q,q,q]+"\\)$"),yt=new RegExp("^rgb\\("+[M,M,M]+"\\)$"),wt=new RegExp("^rgba\\("+[q,q,q,T]+"\\)$"),Mt=new RegExp("^rgba\\("+[M,M,M,T]+"\\)$"),vt=new RegExp("^hsl\\("+[T,M,M]+"\\)$"),_t=new RegExp("^hsla\\("+[T,M,M,T]+"\\)$"),wn={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};hn(C,z,{copy:function(n){return Object.assign(new this.constructor,this,n)},displayable:function(){return this.rgb().displayable()},hex:Mn,formatHex:Mn,formatHsl:Nt,formatRgb:vn,toString:vn});function Mn(){return this.rgb().formatHex()}function Nt(){return Cn(this).formatHsl()}function vn(){return this.rgb().formatRgb()}function z(n){var t,e;return n=(n+"").trim().toLowerCase(),(t=bt.exec(n))?(e=t[1].length,t=parseInt(t[1],16),e===6?_n(t):e===3?new x(t>>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):e===8?O(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):e===4?O(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=pt.exec(n))?new x(t[1],t[2],t[3],1):(t=yt.exec(n))?new x(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=wt.exec(n))?O(t[1],t[2],t[3],t[4]):(t=Mt.exec(n))?O(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=vt.exec(n))?An(t[1],t[2]/100,t[3]/100,1):(t=_t.exec(n))?An(t[1],t[2]/100,t[3]/100,t[4]):wn.hasOwnProperty(n)?_n(wn[n]):n==="transparent"?new x(NaN,NaN,NaN,0):null}function _n(n){return new x(n>>16&255,n>>8&255,n&255,1)}function O(n,t,e,r){return r<=0&&(n=t=e=NaN),new x(n,t,e,r)}function kt(n){return n instanceof C||(n=z(n)),n?(n=n.rgb(),new x(n.r,n.g,n.b,n.opacity)):new x}function X(n,t,e,r){return arguments.length===1?kt(n):new x(n,t,e,r??1)}function x(n,t,e,r){this.r=+n,this.g=+t,this.b=+e,this.opacity=+r}hn(x,X,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new x(this.r*n,this.g*n,this.b*n,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Nn,formatHex:Nn,formatRgb:kn,toString:kn}));function Nn(){return"#"+Y(this.r)+Y(this.g)+Y(this.b)}function kn(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(n===1?")":", "+n+")")}function Y(n){return n=Math.max(0,Math.min(255,Math.round(n)||0)),(n<16?"0":"")+n.toString(16)}function An(n,t,e,r){return r<=0?n=t=e=NaN:e<=0||e>=1?n=t=NaN:t<=0&&(n=NaN),new w(n,t,e,r)}function Cn(n){if(n instanceof w)return new w(n.h,n.s,n.l,n.opacity);if(n instanceof C||(n=z(n)),!n)return new w;if(n instanceof w)return n;n=n.rgb();var t=n.r/255,e=n.g/255,r=n.b/255,i=Math.min(t,e,r),f=Math.max(t,e,r),o=NaN,a=f-i,u=(f+i)/2;return a?(t===f?o=(e-r)/a+(e<r)*6:e===f?o=(r-t)/a+2:o=(t-e)/a+4,a/=u<.5?f+i:2-f-i,o*=60):a=u>0&&u<1?0:o,new w(o,a,u,n.opacity)}function At(n,t,e,r){return arguments.length===1?Cn(n):new w(n,t,e,r??1)}function w(n,t,e,r){this.h=+n,this.s=+t,this.l=+e,this.opacity=+r}hn(w,At,zn(C,{brighter:function(n){return n=n==null?V:Math.pow(V,n),new w(this.h,this.s,this.l*n,this.opacity)},darker:function(n){return n=n==null?I:Math.pow(I,n),new w(this.h,this.s,this.l*n,this.opacity)},rgb:function(){var n=this.h%360+(this.h<0)*360,t=isNaN(n)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*t,i=2*e-r;return new x(J(n>=240?n-240:n+120,i,r),J(n,i,r),J(n<120?n+240:n-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var n=this.opacity;return n=isNaN(n)?1:Math.max(0,Math.min(1,n)),(n===1?"hsl(":"hsla(")+(this.h||0)+", "+(this.s||0)*100+"%, "+(this.l||0)*100+"%"+(n===1?")":", "+n+")")}}));function J(n,t,e){return(n<60?t+(e-t)*n/60:n<180?e:n<240?t+(e-t)*(240-n)/60:t)*255}function Fn(n,t,e,r,i){var f=n*n,o=f*n;return((1-3*n+3*f-o)*t+(4-6*f+3*o)*e+(1+3*n+3*f-3*o)*r+o*i)/6}function St(n){var t=n.length-1;return function(e){var r=e<=0?e=0:e>=1?(e=1,t-1):Math.floor(e*t),i=n[r],f=n[r+1],o=r>0?n[r-1]:2*i-f,a=r<t-1?n[r+2]:2*f-i;return Fn((e-r/t)*t,o,i,f,a)}}function Rt(n){var t=n.length;return function(e){var r=Math.floor(((e%=1)<0?++e:e)*t),i=n[(r+t-1)%t],f=n[r%t],o=n[(r+1)%t],a=n[(r+2)%t];return Fn((e-r/t)*t,i,f,o,a)}}const U=n=>()=>n;function $n(n,t){return function(e){return n+e*t}}function Et(n,t,e){return n=Math.pow(n,e),t=Math.pow(t,e)-n,e=1/e,function(r){return Math.pow(n+r*t,e)}}function Kt(n,t){var e=t-n;return e?$n(n,e>180||e<-180?e-360*Math.round(e/360):e):U(isNaN(n)?t:n)}function Pt(n){return(n=+n)==1?Bn:function(t,e){return e-t?Et(t,e,n):U(isNaN(t)?e:t)}}function Bn(n,t){var e=t-n;return e?$n(n,e):U(isNaN(n)?t:n)}const Sn=function n(t){var e=Pt(t);function r(i,f){var o=e((i=X(i)).r,(f=X(f)).r),a=e(i.g,f.g),u=e(i.b,f.b),s=Bn(i.opacity,f.opacity);return function(c){return i.r=o(c),i.g=a(c),i.b=u(c),i.opacity=s(c),i+""}}return r.gamma=n,r}(1);function Dn(n){return function(t){var e=t.length,r=new Array(e),i=new Array(e),f=new Array(e),o,a;for(o=0;o<e;++o)a=X(t[o]),r[o]=a.r||0,i[o]=a.g||0,f[o]=a.b||0;return r=n(r),i=n(i),f=n(f),a.opacity=1,function(u){return a.r=r(u),a.g=i(u),a.b=f(u),a+""}}}var Wt=Dn(St),ne=Dn(Rt);function On(n,t){t||(t=[]);var e=n?Math.min(t.length,n.length):0,r=t.slice(),i;return function(f){for(i=0;i<e;++i)r[i]=n[i]*(1-f)+t[i]*f;return r}}function Gn(n){return ArrayBuffer.isView(n)&&!(n instanceof DataView)}function te(n,t){return(Gn(t)?On:Zn)(n,t)}function Zn(n,t){var e=t?t.length:0,r=n?Math.min(e,n.length):0,i=new Array(r),f=new Array(e),o;for(o=0;o<r;++o)i[o]=cn(n[o],t[o]);for(;o<e;++o)f[o]=t[o];return function(a){for(o=0;o<r;++o)f[o]=i[o](a);return f}}function jt(n,t){var e=new Date;return n=+n,t=+t,function(r){return e.setTime(n*(1-r)+t*r),e}}function Q(n,t){return n=+n,t=+t,function(e){return n*(1-e)+t*e}}function qt(n,t){var e={},r={},i;(n===null||typeof n!="object")&&(n={}),(t===null||typeof t!="object")&&(t={});for(i in t)i in n?e[i]=cn(n[i],t[i]):r[i]=t[i];return function(f){for(i in e)r[i]=e[i](f);return r}}var on=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g,K=new RegExp(on.source,"g");function Lt(n){return function(){return n}}function Ht(n){return function(t){return n(t)+""}}function It(n,t){var e=on.lastIndex=K.lastIndex=0,r,i,f,o=-1,a=[],u=[];for(n=n+"",t=t+"";(r=on.exec(n))&&(i=K.exec(t));)(f=i.index)>e&&(f=t.slice(e,f),a[o]?a[o]+=f:a[++o]=f),(r=r[0])===(i=i[0])?a[o]?a[o]+=i:a[++o]=i:(a[++o]=null,u.push({i:o,x:Q(r,i)})),e=K.lastIndex;return e<t.length&&(f=t.slice(e),a[o]?a[o]+=f:a[++o]=f),a.length<2?u[0]?Ht(u[0].x):Lt(t):(t=u.length,function(s){for(var c=0,h;c<t;++c)a[(h=u[c]).i]=h.x(s);return a.join("")})}function cn(n,t){var e=typeof t,r;return t==null||e==="boolean"?U(t):(e==="number"?Q:e==="string"?(r=z(t))?(t=r,Sn):It:t instanceof z?Sn:t instanceof Date?jt:Gn(t)?On:Array.isArray(t)?Zn:typeof t.valueOf!="function"&&typeof t.toString!="function"||isNaN(t)?qt:Q)(n,t)}function Tt(n,t){return n=+n,t=+t,function(e){return Math.round(n*(1-e)+t*e)}}function zt(n){return function(){return n}}function Ct(n){return+n}var Rn=[0,1];function j(n){return n}function un(n,t){return(t-=n=+n)?function(e){return(e-n)/t}:zt(isNaN(t)?NaN:.5)}function Ft(n,t){var e;return n>t&&(e=n,n=t,t=e),function(r){return Math.max(n,Math.min(t,r))}}function $t(n,t,e){var r=n[0],i=n[1],f=t[0],o=t[1];return i<r?(r=un(i,r),f=e(o,f)):(r=un(r,i),f=e(f,o)),function(a){return f(r(a))}}function Bt(n,t,e){var r=Math.min(n.length,t.length)-1,i=new Array(r),f=new Array(r),o=-1;for(n[r]<n[0]&&(n=n.slice().reverse(),t=t.slice().reverse());++o<r;)i[o]=un(n[o],n[o+1]),f[o]=e(t[o],t[o+1]);return function(a){var u=Jn(n,a,1,r)-1;return f[u](i[u](a))}}function Dt(n,t){return t.domain(n.domain()).range(n.range()).interpolate(n.interpolate()).clamp(n.clamp()).unknown(n.unknown())}function Ot(){var n=Rn,t=Rn,e=cn,r,i,f,o=j,a,u,s;function c(){var l=Math.min(n.length,t.length);return o!==j&&(o=Ft(n[0],n[l-1])),a=l>2?Bt:$t,u=s=null,h}function h(l){return l==null||isNaN(l=+l)?f:(u||(u=a(n.map(r),t,e)))(r(o(l)))}return h.invert=function(l){return o(i((s||(s=a(t,n.map(r),Q)))(l)))},h.domain=function(l){return arguments.length?(n=Array.from(l,Ct),c()):n.slice()},h.range=function(l){return arguments.length?(t=Array.from(l),c()):t.slice()},h.rangeRound=function(l){return t=Array.from(l),e=Tt,c()},h.clamp=function(l){return arguments.length?(o=l?!0:j,c()):o!==j},h.interpolate=function(l){return arguments.length?(e=l,c()):e},h.unknown=function(l){return arguments.length?(f=l,h):f},function(l,p){return r=l,i=p,c()}}function Gt(){return Ot()(j,j)}function Zt(n,t,e,r){var i=Wn(n,t,e),f;switch(r=Z(r??",f"),r.type){case"s":{var o=Math.max(Math.abs(n),Math.abs(t));return r.precision==null&&!isNaN(f=st(i,o))&&(r.precision=f),Hn(r,o)}case"":case"e":case"g":case"p":case"r":{r.precision==null&&!isNaN(f=ht(i,Math.max(Math.abs(n),Math.abs(t))))&&(r.precision=f-(r.type==="e"));break}case"f":case"%":{r.precision==null&&!isNaN(f=ut(i))&&(r.precision=f-(r.type==="%")*2);break}}return Ln(r)}function Vt(n){var t=n.domain;return n.ticks=function(e){var r=t();return Kn(r[0],r[r.length-1],e??10)},n.tickFormat=function(e,r){var i=t();return Zt(i[0],i[i.length-1],e??10,r)},n.nice=function(e){e==null&&(e=10);var r=t(),i=0,f=r.length-1,o=r[i],a=r[f],u,s,c=10;for(a<o&&(s=o,o=a,a=s,s=i,i=f,f=s);c-- >0;){if(s=jn(o,a,e),s===u)return r[i]=o,r[f]=a,t(r);if(s>0)o=Math.floor(o/s)*s,a=Math.ceil(a/s)*s;else if(s<0)o=Math.ceil(o*s)/s,a=Math.floor(a*s)/s;else break;u=s}return n},n}function Xt(){var n=Gt();return n.copy=function(){return Dt(n,Xt())},mt.apply(n,arguments),Vt(n)}export{Yn as $,At as A,Bn as B,C,cn as D,te as E,St as F,Rt as G,jt as H,On as I,qt as J,Sn as K,Wt as L,ne as M,Tt as N,It as O,Ct as P,Vt as Q,x as R,Ot as S,Dt as T,Kn as U,j as V,Jn as W,Gt as X,Jt as Y,Xt as Z,Yt as _,W as a,Zt as a0,X as a1,Ut as a2,Un as b,En as c,ht as d,st as e,Z as f,Ln as g,Hn as h,ft as i,P as j,In as k,dt as l,lt as m,Qt as n,mt as o,ut as p,hn as q,kt as r,zn as s,Wn as t,V as u,I as v,Kt as w,gt as x,xt as y,Q as z};
2
- //# sourceMappingURL=linear-58a44b5e.js.map
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/UploadText-28892309.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as h,e as S,s as T,N as g,P as c,O as y,K as U,p as q,M as l,R as v,n as b,A as w,a4 as A}from"./index-3370be2a.js";import{X as C}from"./Blocks-f0129fcd.js";function K(t){let e,o=t[1](t[2][t[0]])+"",i,r,s,n,_=t[1]("or")+"",d,m,k,f=t[1]("interface.click_to_upload")+"",u;return{c(){e=g("div"),i=c(o),r=y(),s=g("span"),n=c("- "),d=c(_),m=c(" -"),k=y(),u=c(f),U(s,"class","or svelte-1ck5uk8"),U(e,"class","wrap svelte-1ck5uk8")},m(a,p){q(a,e,p),l(e,i),l(e,r),l(e,s),l(s,n),l(s,d),l(s,m),l(e,k),l(e,u)},p(a,[p]){p&3&&o!==(o=a[1](a[2][a[0]])+"")&&v(i,o),p&2&&_!==(_=a[1]("or")+"")&&v(d,_),p&2&&f!==(f=a[1]("interface.click_to_upload")+"")&&v(u,f)},i:b,o:b,d(a){a&&w(e)}}}function M(t,e,o){let i;A(t,C,n=>o(1,i=n));let{type:r="file"}=e;const s={image:"interface.drop_image",video:"interface.drop_video",audio:"interface.drop_audio",file:"interface.drop_file",csv:"interface.drop_csv"};return t.$$set=n=>{"type"in n&&o(0,r=n.type)},[r,i,s]}class P extends h{constructor(e){super(),S(this,e,M,K,T,{type:0})}}export{P as U};
2
- //# sourceMappingURL=UploadText-28892309.js.map
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/utils.py DELETED
@@ -1,575 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import asyncio
4
- import base64
5
- import json
6
- import mimetypes
7
- import os
8
- import pkgutil
9
- import secrets
10
- import shutil
11
- import tempfile
12
- import warnings
13
- from concurrent.futures import CancelledError
14
- from dataclasses import dataclass, field
15
- from datetime import datetime
16
- from enum import Enum
17
- from pathlib import Path
18
- from threading import Lock
19
- from typing import Any, Callable, Optional
20
-
21
- import fsspec.asyn
22
- import httpx
23
- import huggingface_hub
24
- import requests
25
- from huggingface_hub import SpaceStage
26
- from websockets.legacy.protocol import WebSocketCommonProtocol
27
-
28
- API_URL = "api/predict/"
29
- WS_URL = "queue/join"
30
- UPLOAD_URL = "upload"
31
- CONFIG_URL = "config"
32
- API_INFO_URL = "info"
33
- RAW_API_INFO_URL = "info?serialize=False"
34
- SPACE_FETCHER_URL = "https://gradio-space-api-fetcher-v2.hf.space/api"
35
- RESET_URL = "reset"
36
- SPACE_URL = "https://hf.space/{}"
37
-
38
- SKIP_COMPONENTS = {
39
- "state",
40
- "row",
41
- "column",
42
- "tabs",
43
- "tab",
44
- "tabitem",
45
- "box",
46
- "form",
47
- "accordion",
48
- "group",
49
- "interpretation",
50
- "dataset",
51
- }
52
- STATE_COMPONENT = "state"
53
- INVALID_RUNTIME = [
54
- SpaceStage.NO_APP_FILE,
55
- SpaceStage.CONFIG_ERROR,
56
- SpaceStage.BUILD_ERROR,
57
- SpaceStage.RUNTIME_ERROR,
58
- SpaceStage.PAUSED,
59
- ]
60
-
61
- __version__ = (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip()
62
-
63
-
64
- class TooManyRequestsError(Exception):
65
- """Raised when the API returns a 429 status code."""
66
-
67
- pass
68
-
69
-
70
- class QueueError(Exception):
71
- """Raised when the queue is full or there is an issue adding a job to the queue."""
72
-
73
- pass
74
-
75
-
76
- class InvalidAPIEndpointError(Exception):
77
- """Raised when the API endpoint is invalid."""
78
-
79
- pass
80
-
81
-
82
- class SpaceDuplicationError(Exception):
83
- """Raised when something goes wrong with a Space Duplication."""
84
-
85
- pass
86
-
87
-
88
- class Status(Enum):
89
- """Status codes presented to client users."""
90
-
91
- STARTING = "STARTING"
92
- JOINING_QUEUE = "JOINING_QUEUE"
93
- QUEUE_FULL = "QUEUE_FULL"
94
- IN_QUEUE = "IN_QUEUE"
95
- SENDING_DATA = "SENDING_DATA"
96
- PROCESSING = "PROCESSING"
97
- ITERATING = "ITERATING"
98
- PROGRESS = "PROGRESS"
99
- FINISHED = "FINISHED"
100
- CANCELLED = "CANCELLED"
101
-
102
- @staticmethod
103
- def ordering(status: Status) -> int:
104
- """Order of messages. Helpful for testing."""
105
- order = [
106
- Status.STARTING,
107
- Status.JOINING_QUEUE,
108
- Status.QUEUE_FULL,
109
- Status.IN_QUEUE,
110
- Status.SENDING_DATA,
111
- Status.PROCESSING,
112
- Status.PROGRESS,
113
- Status.ITERATING,
114
- Status.FINISHED,
115
- Status.CANCELLED,
116
- ]
117
- return order.index(status)
118
-
119
- def __lt__(self, other: Status):
120
- return self.ordering(self) < self.ordering(other)
121
-
122
- @staticmethod
123
- def msg_to_status(msg: str) -> Status:
124
- """Map the raw message from the backend to the status code presented to users."""
125
- return {
126
- "send_hash": Status.JOINING_QUEUE,
127
- "queue_full": Status.QUEUE_FULL,
128
- "estimation": Status.IN_QUEUE,
129
- "send_data": Status.SENDING_DATA,
130
- "process_starts": Status.PROCESSING,
131
- "process_generating": Status.ITERATING,
132
- "process_completed": Status.FINISHED,
133
- "progress": Status.PROGRESS,
134
- }[msg]
135
-
136
-
137
- @dataclass
138
- class ProgressUnit:
139
- index: Optional[int]
140
- length: Optional[int]
141
- unit: Optional[str]
142
- progress: Optional[float]
143
- desc: Optional[str]
144
-
145
- @classmethod
146
- def from_ws_msg(cls, data: list[dict]) -> list[ProgressUnit]:
147
- return [
148
- cls(
149
- index=d.get("index"),
150
- length=d.get("length"),
151
- unit=d.get("unit"),
152
- progress=d.get("progress"),
153
- desc=d.get("desc"),
154
- )
155
- for d in data
156
- ]
157
-
158
-
159
- @dataclass
160
- class StatusUpdate:
161
- """Update message sent from the worker thread to the Job on the main thread."""
162
-
163
- code: Status
164
- rank: int | None
165
- queue_size: int | None
166
- eta: float | None
167
- success: bool | None
168
- time: datetime | None
169
- progress_data: list[ProgressUnit] | None
170
-
171
-
172
- def create_initial_status_update():
173
- return StatusUpdate(
174
- code=Status.STARTING,
175
- rank=None,
176
- queue_size=None,
177
- eta=None,
178
- success=None,
179
- time=datetime.now(),
180
- progress_data=None,
181
- )
182
-
183
-
184
- @dataclass
185
- class JobStatus:
186
- """The job status.
187
-
188
- Keeps track of the latest status update and intermediate outputs (not yet implements).
189
- """
190
-
191
- latest_status: StatusUpdate = field(default_factory=create_initial_status_update)
192
- outputs: list[Any] = field(default_factory=list)
193
-
194
-
195
- @dataclass
196
- class Communicator:
197
- """Helper class to help communicate between the worker thread and main thread."""
198
-
199
- lock: Lock
200
- job: JobStatus
201
- prediction_processor: Callable[..., tuple]
202
- reset_url: str
203
- should_cancel: bool = False
204
-
205
-
206
- ########################
207
- # Network utils
208
- ########################
209
-
210
-
211
- def is_http_url_like(possible_url: str) -> bool:
212
- """
213
- Check if the given string looks like an HTTP(S) URL.
214
- """
215
- return possible_url.startswith(("http://", "https://"))
216
-
217
-
218
- def probe_url(possible_url: str) -> bool:
219
- """
220
- Probe the given URL to see if it responds with a 200 status code (to HEAD, then to GET).
221
- """
222
- headers = {"User-Agent": "gradio (https://gradio.app/; [email protected])"}
223
- try:
224
- with requests.session() as sess:
225
- head_request = sess.head(possible_url, headers=headers)
226
- if head_request.status_code == 405:
227
- return sess.get(possible_url, headers=headers).ok
228
- return head_request.ok
229
- except Exception:
230
- return False
231
-
232
-
233
- def is_valid_url(possible_url: str) -> bool:
234
- """
235
- Check if the given string is a valid URL.
236
- """
237
- warnings.warn(
238
- "is_valid_url should not be used. "
239
- "Use is_http_url_like() and probe_url(), as suitable, instead.",
240
- )
241
- return is_http_url_like(possible_url) and probe_url(possible_url)
242
-
243
-
244
- async def get_pred_from_ws(
245
- websocket: WebSocketCommonProtocol,
246
- data: str,
247
- hash_data: str,
248
- helper: Communicator | None = None,
249
- ) -> dict[str, Any]:
250
- completed = False
251
- resp = {}
252
- while not completed:
253
- # Receive message in the background so that we can
254
- # cancel even while running a long pred
255
- task = asyncio.create_task(websocket.recv())
256
- while not task.done():
257
- if helper:
258
- with helper.lock:
259
- if helper.should_cancel:
260
- # Need to reset the iterator state since the client
261
- # will not reset the session
262
- async with httpx.AsyncClient() as http:
263
- reset = http.post(
264
- helper.reset_url, json=json.loads(hash_data)
265
- )
266
- # Retrieve cancel exception from task
267
- # otherwise will get nasty warning in console
268
- task.cancel()
269
- await asyncio.gather(task, reset, return_exceptions=True)
270
- raise CancelledError()
271
- # Need to suspend this coroutine so that task actually runs
272
- await asyncio.sleep(0.01)
273
- msg = task.result()
274
- resp = json.loads(msg)
275
- if helper:
276
- with helper.lock:
277
- has_progress = "progress_data" in resp
278
- status_update = StatusUpdate(
279
- code=Status.msg_to_status(resp["msg"]),
280
- queue_size=resp.get("queue_size"),
281
- rank=resp.get("rank", None),
282
- success=resp.get("success"),
283
- time=datetime.now(),
284
- eta=resp.get("rank_eta"),
285
- progress_data=ProgressUnit.from_ws_msg(resp["progress_data"])
286
- if has_progress
287
- else None,
288
- )
289
- output = resp.get("output", {}).get("data", [])
290
- if output and status_update.code != Status.FINISHED:
291
- try:
292
- result = helper.prediction_processor(*output)
293
- except Exception as e:
294
- result = [e]
295
- helper.job.outputs.append(result)
296
- helper.job.latest_status = status_update
297
- if resp["msg"] == "queue_full":
298
- raise QueueError("Queue is full! Please try again.")
299
- if resp["msg"] == "send_hash":
300
- await websocket.send(hash_data)
301
- elif resp["msg"] == "send_data":
302
- await websocket.send(data)
303
- completed = resp["msg"] == "process_completed"
304
- return resp["output"]
305
-
306
-
307
- ########################
308
- # Data processing utils
309
- ########################
310
-
311
-
312
- def download_tmp_copy_of_file(
313
- url_path: str, hf_token: str | None = None, dir: str | None = None
314
- ) -> str:
315
- if dir is not None:
316
- os.makedirs(dir, exist_ok=True)
317
- headers = {"Authorization": "Bearer " + hf_token} if hf_token else {}
318
- directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
319
- directory.mkdir(exist_ok=True, parents=True)
320
- file_path = directory / Path(url_path).name
321
-
322
- with requests.get(url_path, headers=headers, stream=True) as r:
323
- r.raise_for_status()
324
- with open(file_path, "wb") as f:
325
- shutil.copyfileobj(r.raw, f)
326
- return str(file_path.resolve())
327
-
328
-
329
- def create_tmp_copy_of_file(file_path: str, dir: str | None = None) -> str:
330
- directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
331
- directory.mkdir(exist_ok=True, parents=True)
332
- dest = directory / Path(file_path).name
333
- shutil.copy2(file_path, dest)
334
- return str(dest.resolve())
335
-
336
-
337
- def get_mimetype(filename: str) -> str | None:
338
- if filename.endswith(".vtt"):
339
- return "text/vtt"
340
- mimetype = mimetypes.guess_type(filename)[0]
341
- if mimetype is not None:
342
- mimetype = mimetype.replace("x-wav", "wav").replace("x-flac", "flac")
343
- return mimetype
344
-
345
-
346
- def get_extension(encoding: str) -> str | None:
347
- encoding = encoding.replace("audio/wav", "audio/x-wav")
348
- type = mimetypes.guess_type(encoding)[0]
349
- if type == "audio/flac": # flac is not supported by mimetypes
350
- return "flac"
351
- elif type is None:
352
- return None
353
- extension = mimetypes.guess_extension(type)
354
- if extension is not None and extension.startswith("."):
355
- extension = extension[1:]
356
- return extension
357
-
358
-
359
- def encode_file_to_base64(f: str | Path):
360
- with open(f, "rb") as file:
361
- encoded_string = base64.b64encode(file.read())
362
- base64_str = str(encoded_string, "utf-8")
363
- mimetype = get_mimetype(str(f))
364
- return (
365
- "data:"
366
- + (mimetype if mimetype is not None else "")
367
- + ";base64,"
368
- + base64_str
369
- )
370
-
371
-
372
- def encode_url_to_base64(url: str):
373
- resp = requests.get(url)
374
- resp.raise_for_status()
375
- encoded_string = base64.b64encode(resp.content)
376
- base64_str = str(encoded_string, "utf-8")
377
- mimetype = get_mimetype(url)
378
- return (
379
- "data:" + (mimetype if mimetype is not None else "") + ";base64," + base64_str
380
- )
381
-
382
-
383
- def encode_url_or_file_to_base64(path: str | Path):
384
- path = str(path)
385
- if is_http_url_like(path):
386
- return encode_url_to_base64(path)
387
- return encode_file_to_base64(path)
388
-
389
-
390
- def decode_base64_to_binary(encoding: str) -> tuple[bytes, str | None]:
391
- extension = get_extension(encoding)
392
- data = encoding.rsplit(",", 1)[-1]
393
- return base64.b64decode(data), extension
394
-
395
-
396
- def strip_invalid_filename_characters(filename: str, max_bytes: int = 200) -> str:
397
- """Strips invalid characters from a filename and ensures that the file_length is less than `max_bytes` bytes."""
398
- filename = "".join([char for char in filename if char.isalnum() or char in "._- "])
399
- filename_len = len(filename.encode())
400
- if filename_len > max_bytes:
401
- while filename_len > max_bytes:
402
- if len(filename) == 0:
403
- break
404
- filename = filename[:-1]
405
- filename_len = len(filename.encode())
406
- return filename
407
-
408
-
409
- def sanitize_parameter_names(original_name: str) -> str:
410
- """Cleans up a Python parameter name to make the API info more readable."""
411
- return (
412
- "".join([char for char in original_name if char.isalnum() or char in " _"])
413
- .replace(" ", "_")
414
- .lower()
415
- )
416
-
417
-
418
- def decode_base64_to_file(
419
- encoding: str,
420
- file_path: str | None = None,
421
- dir: str | Path | None = None,
422
- prefix: str | None = None,
423
- ):
424
- directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20)
425
- directory.mkdir(exist_ok=True, parents=True)
426
- data, extension = decode_base64_to_binary(encoding)
427
- if file_path is not None and prefix is None:
428
- filename = Path(file_path).name
429
- prefix = filename
430
- if "." in filename:
431
- prefix = filename[0 : filename.index(".")]
432
- extension = filename[filename.index(".") + 1 :]
433
-
434
- if prefix is not None:
435
- prefix = strip_invalid_filename_characters(prefix)
436
-
437
- if extension is None:
438
- file_obj = tempfile.NamedTemporaryFile(
439
- delete=False, prefix=prefix, dir=directory
440
- )
441
- else:
442
- file_obj = tempfile.NamedTemporaryFile(
443
- delete=False,
444
- prefix=prefix,
445
- suffix="." + extension,
446
- dir=directory,
447
- )
448
- file_obj.write(data)
449
- file_obj.flush()
450
- return file_obj
451
-
452
-
453
- def dict_or_str_to_json_file(jsn: str | dict | list, dir: str | Path | None = None):
454
- if dir is not None:
455
- os.makedirs(dir, exist_ok=True)
456
-
457
- file_obj = tempfile.NamedTemporaryFile(
458
- delete=False, suffix=".json", dir=dir, mode="w+"
459
- )
460
- if isinstance(jsn, str):
461
- jsn = json.loads(jsn)
462
- json.dump(jsn, file_obj)
463
- file_obj.flush()
464
- return file_obj
465
-
466
-
467
- def file_to_json(file_path: str | Path) -> dict | list:
468
- with open(file_path) as f:
469
- return json.load(f)
470
-
471
-
472
- ###########################
473
- # HuggingFace Hub API Utils
474
- ###########################
475
- def set_space_timeout(
476
- space_id: str,
477
- hf_token: str | None = None,
478
- timeout_in_seconds: int = 300,
479
- ):
480
- headers = huggingface_hub.utils.build_hf_headers(
481
- token=hf_token,
482
- library_name="gradio_client",
483
- library_version=__version__,
484
- )
485
- req = requests.post(
486
- f"https://huggingface.co/api/spaces/{space_id}/sleeptime",
487
- json={"seconds": timeout_in_seconds},
488
- headers=headers,
489
- )
490
- try:
491
- huggingface_hub.utils.hf_raise_for_status(req)
492
- except huggingface_hub.utils.HfHubHTTPError as err:
493
- raise SpaceDuplicationError(
494
- f"Could not set sleep timeout on duplicated Space. Please visit {SPACE_URL.format(space_id)} "
495
- "to set a timeout manually to reduce billing charges."
496
- ) from err
497
-
498
-
499
- ########################
500
- # Misc utils
501
- ########################
502
-
503
-
504
- def synchronize_async(func: Callable, *args, **kwargs) -> Any:
505
- """
506
- Runs async functions in sync scopes. Can be used in any scope.
507
-
508
- Example:
509
- if inspect.iscoroutinefunction(block_fn.fn):
510
- predictions = utils.synchronize_async(block_fn.fn, *processed_input)
511
-
512
- Args:
513
- func:
514
- *args:
515
- **kwargs:
516
- """
517
- return fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, **kwargs) # type: ignore
518
-
519
-
520
- class APIInfoParseError(ValueError):
521
- pass
522
-
523
-
524
- def get_type(schema: dict):
525
- if "type" in schema:
526
- return schema["type"]
527
- elif schema.get("oneOf"):
528
- return "oneOf"
529
- elif schema.get("anyOf"):
530
- return "anyOf"
531
- else:
532
- raise APIInfoParseError(f"Cannot parse type for {schema}")
533
-
534
-
535
- def json_schema_to_python_type(schema: Any) -> str:
536
- """Convert the json schema into a python type hint"""
537
- type_ = get_type(schema)
538
- if type_ == {}:
539
- if "json" in schema["description"]:
540
- return "Dict[Any, Any]"
541
- else:
542
- return "Any"
543
- elif type_ == "null":
544
- return "None"
545
- elif type_ == "integer":
546
- return "int"
547
- elif type_ == "string":
548
- return "str"
549
- elif type_ == "boolean":
550
- return "bool"
551
- elif type_ == "number":
552
- return "int | float"
553
- elif type_ == "array":
554
- items = schema.get("items")
555
- if "prefixItems" in items:
556
- elements = ", ".join(
557
- [json_schema_to_python_type(i) for i in items["prefixItems"]]
558
- )
559
- return f"Tuple[{elements}]"
560
- else:
561
- elements = json_schema_to_python_type(items)
562
- return f"List[{elements}]"
563
- elif type_ == "object":
564
- des = ", ".join(
565
- [
566
- f"{n}: {json_schema_to_python_type(v)} ({v.get('description')})"
567
- for n, v in schema["properties"].items()
568
- ]
569
- )
570
- return f"Dict({des})"
571
- elif type_ in ["oneOf", "anyOf"]:
572
- desc = " | ".join([json_schema_to_python_type(i) for i in schema[type_]])
573
- return desc
574
- else:
575
- raise APIInfoParseError(f"Cannot parse schema {schema}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Deci/DeciLM-6b-instruct/USE_POLICY.md DELETED
@@ -1,50 +0,0 @@
1
- # Llama 2 Acceptable Use Policy
2
-
3
- Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
4
-
5
- ## Prohibited Uses
6
- We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
7
-
8
- 1. Violate the law or others’ rights, including to:
9
- 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
10
- 1. Violence or terrorism
11
- 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
12
- 3. Human trafficking, exploitation, and sexual violence
13
- 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
14
- 5. Sexual solicitation
15
- 6. Any other criminal activity
16
- 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
17
- 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
18
- 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
19
- 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
20
- 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
21
- 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
22
-
23
-
24
-
25
- 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
26
- 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
27
- 2. Guns and illegal weapons (including weapon development)
28
- 3. Illegal drugs and regulated/controlled substances
29
- 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
30
- 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
31
- 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
32
-
33
-
34
-
35
- 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
36
- 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
37
- 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
38
- 3. Generating, promoting, or further distributing spam
39
- 4. Impersonating another individual without consent, authorization, or legal right
40
- 5. Representing that the use of Llama 2 or outputs are human-generated
41
- 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
42
- 4. Fail to appropriately disclose to end users any known dangers of your AI system
43
-
44
- Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
45
-
46
- * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
47
- * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
48
- * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
49
- * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
50
-