parquet-converter commited on
Commit
deb9cd3
·
1 Parent(s): 6a6d80d

Update parquet files (step 29 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/utils.py +0 -49
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Battlefield 3 Xbox 360 Torrent [2021] Downloads.md +0 -20
  3. spaces/1line/AutoGPT/tests/integration/weaviate_memory_tests.py +0 -117
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download G Maps and Learn How to Share Your Location Routes and Lists.md +0 -106
  5. spaces/1phancelerku/anime-remove-background/Abjad A Unique and Fascinating Alphabet.md +0 -175
  6. spaces/1phancelerku/anime-remove-background/Crowd Evolution Mod APK The Ultimate Crowd Simulation Game with Amazing Graphics.md +0 -117
  7. spaces/1phancelerku/anime-remove-background/Download Scary Teacher 3D Mod APK for Free and Enjoy Unlimited Money and Energy.md +0 -93
  8. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/latent_diffusion/openaimodel.py +0 -1069
  9. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/loss.py +0 -307
  10. spaces/ASJMO/freegpt/g4f/Provider/Providers/ChatFree.py +0 -48
  11. spaces/Abhilashvj/planogram-compliance/inference.py +0 -226
  12. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/__init__.py +0 -9
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/TextObjectMethods.js +0 -36
  14. spaces/AlexWang/lama/bin/paper_runfiles/update_test_data_stats.sh +0 -30
  15. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py +0 -26
  16. spaces/Alpaca233/SadTalker/src/face3d/options/test_options.py +0 -21
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/lora/README.md +0 -83
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +0 -417
  19. spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py +0 -105
  20. spaces/Andy1621/uniformer_image_detection/configs/reppoints/README.md +0 -54
  21. spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py +0 -37
  22. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py +0 -2
  23. spaces/AnimalEquality/chatbot/_proc/_docs/app.html +0 -660
  24. spaces/Aniquel/WizApp/app.py +0 -3
  25. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/engine/test.py +0 -202
  26. spaces/Artgor/digit-draw-detect/.github/README.md +0 -13
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/proxy.py +0 -57
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__init__.py +0 -331
  29. spaces/Benson/text-generation/Examples/Arrow Fest Apk.md +0 -47
  30. spaces/BridgeEight/internlm-20B-chat-w4-turbomind/install_lmdeploy.sh +0 -27
  31. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/notes/contributing.md +0 -1
  32. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/__init__.py +0 -1
  33. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mmnasnet/nasnet.py +0 -218
  34. spaces/CVPR/LIVE/thrust/thrust/detail/complex/cexp.h +0 -183
  35. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/internal/copy_device_to_device.h +0 -64
  36. spaces/CVPR/LIVE/thrust/thrust/system/error_code.h +0 -523
  37. spaces/CVPR/Text2Human/Text2Human/ui/mouse_event.py +0 -129
  38. spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py +0 -83
  39. spaces/Cat125/text-generator-v2/classes.py +0 -49
  40. spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/Dockerfile +0 -27
  41. spaces/CikeyQI/meme-api/meme_generator/memes/forbid/__init__.py +0 -22
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/http.py +0 -862
  43. spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/01_🎥_Input_YouTube_Link.py +0 -258
  44. spaces/Danielzero/GPT3.5/assets/custom.css +0 -353
  45. spaces/Detomo/ai-comic-generation/src/components/ui/toast.tsx +0 -127
  46. spaces/DragGan/DragGan/scripts/gui.sh +0 -11
  47. spaces/EduardoPacheco/DINOv2-Features-Visualization/README.md +0 -12
  48. spaces/ElainaFanBoy/MusicGen/audiocraft/modules/conv.py +0 -245
  49. spaces/EleutherAI/magma/example_inference.py +0 -27
  50. spaces/EronSamez/RVC_HFmeu/demucs/train.py +0 -127
spaces/101-5/gpt4free/g4f/utils.py DELETED
@@ -1,49 +0,0 @@
1
- import browser_cookie3
2
-
3
-
4
- class Utils:
5
- browsers = [
6
- browser_cookie3.chrome, # 62.74% market share
7
- browser_cookie3.safari, # 24.12% market share
8
- browser_cookie3.firefox, # 4.56% market share
9
- browser_cookie3.edge, # 2.85% market share
10
- browser_cookie3.opera, # 1.69% market share
11
- browser_cookie3.brave, # 0.96% market share
12
- browser_cookie3.opera_gx, # 0.64% market share
13
- browser_cookie3.vivaldi, # 0.32% market share
14
- ]
15
-
16
- def get_cookies(domain: str, setName: str = None, setBrowser: str = False) -> dict:
17
- cookies = {}
18
-
19
- if setBrowser != False:
20
- for browser in Utils.browsers:
21
- if browser.__name__ == setBrowser:
22
- try:
23
- for c in browser(domain_name=domain):
24
- if c.name not in cookies:
25
- cookies = cookies | {c.name: c.value}
26
-
27
- except Exception as e:
28
- pass
29
-
30
- else:
31
- for browser in Utils.browsers:
32
- try:
33
- for c in browser(domain_name=domain):
34
- if c.name not in cookies:
35
- cookies = cookies | {c.name: c.value}
36
-
37
- except Exception as e:
38
- pass
39
-
40
- if setName:
41
- try:
42
- return {setName: cookies[setName]}
43
-
44
- except ValueError:
45
- print(f'Error: could not find {setName} cookie in any browser.')
46
- exit(1)
47
-
48
- else:
49
- return cookies
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Battlefield 3 Xbox 360 Torrent [2021] Downloads.md DELETED
@@ -1,20 +0,0 @@
1
- <br />
2
- <h1>Battlefield 3 Xbox 360 Torrent Downloads: How to Play the Game for Free</h1>
3
- <p>Battlefield 3 is one of the most popular first-person shooter games of all time. It was released in 2011 by EA DICE and Electronic Arts for Xbox 360, PlayStation 3 and Microsoft Windows. The game features a realistic and immersive military warfare experience, with stunning graphics, dynamic audio, destructible environments and realistic animations. The game also has a single-player campaign, a co-operative mode and a multiplayer mode with various modes and maps.</p>
4
- <p>However, not everyone can afford to buy the game or has access to an Xbox 360 console. That's why some people resort to downloading torrents of the game and playing it on their PCs using an Xbox 360 emulator. Torrents are files that contain data from other users who have downloaded the game and are sharing it with others. Emulators are software that mimic the functions of a console and allow you to play games that are not compatible with your PC.</p>
5
- <h2>Battlefield 3 Xbox 360 Torrent Downloads</h2><br /><p><b><b>Download File</b> &middot;&middot;&middot;&middot;&middot; <a href="https://byltly.com/2uKxqe">https://byltly.com/2uKxqe</a></b></p><br /><br />
6
- <p>But how can you find and download Battlefield 3 Xbox 360 torrents? And how can you play them on your PC? In this article, we will answer these questions and provide you with some tips and tricks to enjoy the game for free.</p>
7
- <h2>How to Find and Download Battlefield 3 Xbox 360 Torrents</h2>
8
- <p>There are many websites that offer torrents of various games, including Battlefield 3 Xbox 360. However, not all of them are reliable or safe. Some of them may contain viruses, malware, fake files or low-quality downloads. Therefore, you need to be careful and choose a reputable and trustworthy website that has positive reviews and feedback from other users.</p>
9
- <p>One of the websites that we recommend is GamesTorrents.fm[^1^]. This website has a large collection of Xbox 360 games in different languages and regions. You can easily find Battlefield 3 by typing the name in the search bar or browsing through the categories. The website also provides detailed information about the game, such as the release date, the genre, the size, the region and the format. You can also see screenshots and videos of the game to get an idea of what it looks like.</p>
10
- <p>To download Battlefield 3 Xbox 360 torrent from GamesTorrents.fm, you need to have a torrent client installed on your PC. A torrent client is a software that allows you to download and upload files using the BitTorrent protocol. Some of the most popular torrent clients are uTorrent, BitTorrent, qBittorrent and Vuze. You can download any of them from their official websites for free.</p>
11
- <p>Once you have a torrent client installed, you can click on the "Descargar Torrent" button on GamesTorrents.fm and choose where to save the file on your PC. The file will have an .iso extension, which means it is an image file that contains all the data of the game disc. You will need to extract this file using a software like WinRAR or 7-Zip before you can play it.</p>
12
- <h2>How to Play Battlefield 3 Xbox 360 Torrents on PC</h2>
13
- <p>After you have downloaded and extracted Battlefield 3 Xbox 360 torrent, you will need an Xbox 360 emulator to play it on your PC. An Xbox 360 emulator is a software that simulates the hardware and software of an Xbox 360 console on your PC. This way, you can run games that are not compatible with your PC as if they were running on an actual console.</p>
14
- <p>However, not all Xbox 360 emulators are created equal. Some of them may not work properly or may have compatibility issues with certain games. Some of them may also require high-end PC specifications or configurations to run smoothly. Therefore, you need to do some research and find an emulator that works well with Battlefield 3 Xbox 360.</p>
15
- <p></p>
16
- <p>One of the emulators that we recommend is Xenia[^4^]. Xenia is an open-source Xbox 360 emulator that supports many games, including Battlefield 3 Xbox 360. It is also easy to use and has regular updates and improvements from its developers. You can download Xenia from its official website for free.</p>
17
- <p>To play Battlefield 3 Xbox 360 torrent on PC using Xenia, you need to follow these steps:</p>
18
- <ol</p> cec2833e83<br />
19
- <br />
20
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/tests/integration/weaviate_memory_tests.py DELETED
@@ -1,117 +0,0 @@
1
- import os
2
- import sys
3
- import unittest
4
- from unittest import mock
5
- from uuid import uuid4
6
-
7
- from weaviate import Client
8
- from weaviate.util import get_valid_uuid
9
-
10
- from autogpt.config import Config
11
- from autogpt.memory.base import get_ada_embedding
12
- from autogpt.memory.weaviate import WeaviateMemory
13
-
14
-
15
- class TestWeaviateMemory(unittest.TestCase):
16
- cfg = None
17
- client = None
18
- index = None
19
-
20
- @classmethod
21
- def setUpClass(cls):
22
- # only create the connection to weaviate once
23
- cls.cfg = Config()
24
-
25
- if cls.cfg.use_weaviate_embedded:
26
- from weaviate.embedded import EmbeddedOptions
27
-
28
- cls.client = Client(
29
- embedded_options=EmbeddedOptions(
30
- hostname=cls.cfg.weaviate_host,
31
- port=int(cls.cfg.weaviate_port),
32
- persistence_data_path=cls.cfg.weaviate_embedded_path,
33
- )
34
- )
35
- else:
36
- cls.client = Client(
37
- f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}"
38
- )
39
-
40
- cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
41
-
42
- """
43
- In order to run these tests you will need a local instance of
44
- Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose
45
- for creating local instances using docker.
46
- Alternatively in your .env file set the following environmental variables to run Weaviate embedded (see: https://weaviate.io/developers/weaviate/installation/embedded):
47
-
48
- USE_WEAVIATE_EMBEDDED=True
49
- WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
50
- """
51
-
52
- def setUp(self):
53
- try:
54
- self.client.schema.delete_class(self.index)
55
- except:
56
- pass
57
-
58
- self.memory = WeaviateMemory(self.cfg)
59
-
60
- def test_add(self):
61
- doc = "You are a Titan name Thanos and you are looking for the Infinity Stones"
62
- self.memory.add(doc)
63
- result = self.client.query.get(self.index, ["raw_text"]).do()
64
- actual = result["data"]["Get"][self.index]
65
-
66
- self.assertEqual(len(actual), 1)
67
- self.assertEqual(actual[0]["raw_text"], doc)
68
-
69
- def test_get(self):
70
- doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos"
71
-
72
- with self.client.batch as batch:
73
- batch.add_data_object(
74
- uuid=get_valid_uuid(uuid4()),
75
- data_object={"raw_text": doc},
76
- class_name=self.index,
77
- vector=get_ada_embedding(doc),
78
- )
79
-
80
- batch.flush()
81
-
82
- actual = self.memory.get(doc)
83
-
84
- self.assertEqual(len(actual), 1)
85
- self.assertEqual(actual[0], doc)
86
-
87
- def test_get_stats(self):
88
- docs = [
89
- "You are now about to count the number of docs in this index",
90
- "And then you about to find out if you can count correctly",
91
- ]
92
-
93
- [self.memory.add(doc) for doc in docs]
94
-
95
- stats = self.memory.get_stats()
96
-
97
- self.assertTrue(stats)
98
- self.assertTrue("count" in stats)
99
- self.assertEqual(stats["count"], 2)
100
-
101
- def test_clear(self):
102
- docs = [
103
- "Shame this is the last test for this class",
104
- "Testing is fun when someone else is doing it",
105
- ]
106
-
107
- [self.memory.add(doc) for doc in docs]
108
-
109
- self.assertEqual(self.memory.get_stats()["count"], 2)
110
-
111
- self.memory.clear()
112
-
113
- self.assertEqual(self.memory.get_stats()["count"], 0)
114
-
115
-
116
- if __name__ == "__main__":
117
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download G Maps and Learn How to Share Your Location Routes and Lists.md DELETED
@@ -1,106 +0,0 @@
1
-
2
- <h1>How to Download and Use Google Maps for Offline Navigation</h1>
3
- <p>Google Maps is one of the most popular and useful apps for navigating the world. It provides real-time GPS navigation, traffic, transit, and local information for over 220 countries and territories. You can also discover new places, explore local neighborhoods, and find reviews and ratings for restaurants, hotels, attractions, and more.</p>
4
- <h2>download g maps</h2><br /><p><b><b>Download File</b> &#10084;&#10084;&#10084; <a href="https://urlin.us/2uT19F">https://urlin.us/2uT19F</a></b></p><br /><br />
5
- <p>But what if you don't have a reliable internet connection or want to save mobile data? Don't worry, you can still use Google Maps offline. You can download areas from Google Maps to your phone or tablet and use them when you're not connected to the internet. You can also save battery life by turning on Wi-Fi only mode.</p>
6
- <p>In this article, we will show you how to download and use Google Maps for offline navigation. We will also share some tips and tricks to make the most of this feature. Let's get started!</p>
7
- <h2>How to Download Google Maps for Offline Use</h2>
8
- <p>To download an area from Google Maps for offline use, follow these steps:</p>
9
- <h3>Step 1: Open the Google Maps app and search for a place</h3>
10
- <p>On your Android phone or tablet, open the <a href="(^1^)"> Google Maps app</a> on your device. If you don't have it, you can download it from the <a href="">Google Play Store</a>. Then, search for a place you want to download, such as a city, a country, or a landmark. For example, you can search for "New York City".</p>
11
- <p>download g maps offline<br />
12
- download g maps for windows 10<br />
13
- download g maps for pc<br />
14
- download g maps for android<br />
15
- download g maps voice navigation<br />
16
- download g maps pro<br />
17
- download g maps area<br />
18
- download g maps apk<br />
19
- download g maps app for iphone<br />
20
- download g maps to sd card<br />
21
- download g maps to garmin<br />
22
- download g maps to phone<br />
23
- download g maps to computer<br />
24
- download g maps to use offline<br />
25
- download g maps to car<br />
26
- download g maps route planner<br />
27
- download g maps satellite view<br />
28
- download g maps street view<br />
29
- download g maps earth view<br />
30
- download g maps 3d view<br />
31
- download g maps directions<br />
32
- download g maps navigation free<br />
33
- download g maps navigation offline<br />
34
- download g maps navigation voice change<br />
35
- download g maps navigation for truckers<br />
36
- download g maps latest version<br />
37
- download g maps update 2020<br />
38
- download g maps update 2021<br />
39
- download g maps update offline<br />
40
- download g maps update for android auto<br />
41
- download g maps with gps tracker<br />
42
- download g maps with voice guidance<br />
43
- download g maps with traffic information<br />
44
- download g maps with speed limit display<br />
45
- download g maps with street names<br />
46
- how to download g maps on iphone<br />
47
- how to download g maps on ipad<br />
48
- how to download g maps on macbook pro<br />
49
- how to download g maps on laptop windows 10<br />
50
- how to download g maps on samsung smart tv<br />
51
- can i download g maps on my apple watch<br />
52
- can i download g maps on my fitbit versa 2<br />
53
- can i download g maps on my kindle fire hd 8<br />
54
- can i download g maps on my garmin nuvi 255w<br />
55
- can i download g maps on my carplay</p>
56
- <h3>Step 2: Tap the menu icon and select Offline maps</h3>
57
- <p>After you find the place you want to download, tap the menu icon (three horizontal lines) at the top left corner of the screen. Then, select Offline maps from the menu. You will see a list of your downloaded maps and a button to select your own map.</p>
58
- <h3>Step 3: Tap Select your own map and adjust the area you want to save</h3>
59
- <p>Tap Select your own map and you will see a blue box on the map. You can zoom in or out, drag, or resize the box to adjust the area you want to save. You can also see the name and size of the area at the bottom of the screen. Try to choose an area that covers the places you want to visit or navigate.</p>
60
- <h3>Step 4: Tap Download and name your offline map</h3>
61
- <p>Once you are happy with the area you selected, tap Download at the bottom right corner of the screen. You will be asked to name your offline map. You can use the suggested name or enter your own name. Then, tap Save. Your offline map will start downloading and you will see a progress bar on the screen.</p>
62
- <h3>Step 5: Save offline maps on an SD card (optional)</h3>
63
- <p>If you have an SD card in your device, you can save your offline maps on it to save internal storage space. To do this, go to the Offline maps menu and tap Settings at the top right corner of the screen. Then, under Storage preferences, select Device or SD card and choose SD card. You can also change this setting anytime.</p>
64
- <h2>How to Use Google Maps Offline</h2>
65
- <p>To use Google Maps offline, follow these steps:</p>
66
- <h3>Step 1: Open the Google Maps app and tap your profile picture or initial</h3>
67
- <p>On your Android phone or tablet, open the Google Maps app and tap your profile picture or initial at the top right corner of the screen. Then, tap Turn on Wi-Fi only from the menu. This will prevent Google Maps from using mobile data and only use Wi-Fi when available.</p>
68
- <h3>Step 2: Tap Offline maps and select the map you want to use</h3>
69
- <p>After turning on Wi-Fi only mode, tap Offline maps from the menu. You will see a list of your downloaded maps and their expiration dates. Tap the map you want to use and it will open on the screen.</p>
70
- <h3>Step 3: Get directions and show routes with offline maps</h3>
71
- <p>To get directions and show routes with offline maps, tap Directions at the bottom right corner of the screen. Then, enter your destination and choose your mode of transportation (car, motorcycle, taxi, etc.). You will see a list of possible routes with their estimated time and distance. Tap Start to begin navigation.</p>
72
- <h3>Step 4: Search for locations and access information with offline maps</h3>
73
- <p>To search for locations and access information with offline maps, tap Search here at the bottom of the screen. Then, enter a keyword or a category (such as restaurants, hotels, museums, etc.). You will see a list of nearby places that match your search. Tap any place to see its name, address, phone number, website, rating, reviews, photos, and more.</p>
74
- <h3>Step 5: Manage offline maps and update or delete them as needed</h3>
75
- <p>To manage offline maps and update or delete them as needed, go to the Offline maps menu and tap any map to see its details. You can see its size, expiration date, last update date, and coverage area. You can also tap Update to download any changes or new information for that map. To delete a map, tap Delete.</p>
76
- <h2>Tips and Tricks for Using Google Maps Offline</h2>
77
- <p>Here are some tips and tricks for using Google Maps offline:</p>
78
- <h3>Tip 1: Save battery and mobile data by turning on Wi-Fi only mode</h3>
79
- <p>As mentioned above, you can turn on Wi-Fi only mode to save battery and mobile data when using Google Maps offline. This will prevent Google Maps from using mobile data and only use Wi-Fi when available. To turn on Wi-Fi only mode, go to your profile picture or initial > Turn on Wi-Fi only.</p>
80
- <h3>Tip 2: Customize your vehicle icon and choose from different options</h3>
81
- <p>You can customize your vehicle icon and choose from different options when using Google Maps offline. This can make your navigation more fun and personalized. To change your vehicle icon, tap the arrow icon at the bottom of the screen. Then, swipe left or right to choose from different options, such as a car, a truck, a motorcycle, a scooter, or a taxi.</p>
82
- <h3>Tip 3: Add music to your drive by syncing Google Maps with YouTube Music, Spotify, or Apple Music</h3>
83
- <p>You can add music to your drive by syncing Google Maps with YouTube Music, Spotify, or Apple Music. This can make your drive more enjoyable and relaxing. To sync Google Maps with your music app, tap the menu icon at the top left corner of the screen. Then, tap Settings > Navigation settings > Show media playback controls. Then, choose your music app and sign in with your account.</p>
84
- <h3>Tip 4: Choose more eco-friendly driving options by selecting the most fuel-efficient route</h3>
85
- <p>You can choose more eco-friendly driving options by selecting the most fuel-efficient route when using Google Maps offline. This can help you save gas and reduce your carbon footprint. To select the most fuel-efficient route, tap Directions at the bottom right corner of the screen. Then, tap Options at the top right corner of the screen. Then, under Route options, select Prefer fuel-efficient routes.</p>
86
- <h3>Tip 5: Use Live View to get an AR view of the street you're on (available in some cities)</h3>
87
- <p>You can use Live View to get an AR view of the street you're on when using Google Maps offline. This can help you orient yourself and find your way more easily. To use Live View, tap Directions at the bottom right corner of the screen. Then, enter your destination and choose walking mode. Then, tap Live View at the bottom of the screen. You will see arrows and directions overlaid on the real world.</p>
88
- <h2>Conclusion</h2>
89
- <p>Google Maps is a great app for navigating the world, but you don't need an internet connection to use it. You can download and use Google Maps offline for offline navigation. You can also save battery and mobile data by turning on Wi-Fi only mode. You can also customize your vehicle icon, add music to your drive, choose more eco-friendly driving options, and use Live View to get an AR view of the street you're on.</p>
90
- <p>We hope this article has helped you learn how to download and use Google Maps offline. Try it out and let us know what you think in the comments below. Happy navigating!</p>
91
- <h2>FAQs</h2>
92
- <p>Here are some frequently asked questions about downloading and using Google Maps offline:</p>
93
- <ul>
94
- <li><b>Q1. How long do offline maps last?</b></li>
95
- <li>A1. Offline maps will expire after 30 days, but you can update them before they expire or download them again.</li>
96
- <li><b>Q2. How much storage space do offline maps take?</b></li>
97
- <li>A2. The size of offline maps depends on the area you download, but you can check the size before you download them. You can also save them on an SD card to save internal storage space.</li>
98
- <li><b>Q3. Can I use offline maps for transit, bicycling, or walking directions?</b></li>
99
- <li>A3. No, offline maps only support driving directions. You need an internet connection to get transit, bicycling, or walking directions.</li>
100
- <li><b>Q4. Can I use offline maps in any country or region?</b></li>
101
- <li>A4. No, some countries or regions may not allow downloading offline maps due to contractual limitations, language support, address formats, or other reasons.</li>
102
- <li><b>Q5. Can I use offline maps with other apps or services?</b></li>
103
- <li>A5. Yes, you can use offline maps with other apps or services that support Google Maps, such as Uber, Lyft, Waze, etc.</li>
104
- </ul></p> 197e85843d<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Abjad A Unique and Fascinating Alphabet.md DELETED
@@ -1,175 +0,0 @@
1
-
2
- <table>
3
- <tr>
4
- <td>
5
- <h1>Abjad: A Writing System That Only Uses Consonants</h1>
6
- <p>Have you ever wondered how some languages can be written without vowels? How do people read and write such languages? What are the advantages and disadvantages of using such a writing system? In this article, we will explore the fascinating world of abjads, a type of writing system that only uses consonants.</p>
7
- <p>An abjad is a writing system in which only consonants are represented, leaving vowel sounds to be inferred by the reader. This contrasts with other alphabets, which provide graphemes for both consonants and vowels. The term abjad was introduced in 1990 by Peter T. Daniels, a linguist who studied different types of writing systems. He derived the word from the first four letters of the Arabic alphabet: alif, ba, jim, and dal.</p>
8
- <h2>abjad</h2><br /><p><b><b>Download File</b> &mdash;&mdash;&mdash; <a href="https://jinyurl.com/2uNRdk">https://jinyurl.com/2uNRdk</a></b></p><br /><br />
9
- <p>Abjads are mainly used in languages that belong to the Afro-Asiatic language family, such as Arabic, Hebrew, Amharic, etc. These languages have a feature called consonantal roots, which means that the meaning of a word is determined by its consonants, while the vowels indicate grammatical variations. For example, in Arabic, the root k-t-b means "write", while different vowel patterns can form words such as kataba (he wrote), kitab (book), kutub (books), etc.</p>
10
- <p>Abjads are not only interesting from a linguistic perspective but also from a historical and cultural one. They have been used for thousands of years to record some of the most ancient and influential civilizations and religions in human history. They have also influenced other writing systems and contributed to the development of science, literature, art, and more.</p>
11
- <h2>The History of Abjads</h2>
12
- <p>Abjads are one of the oldest types of writing systems in the world. They originated from pictographic and cuneiform scripts that were used by ancient civilizations in Mesopotamia and Egypt. These scripts consisted of symbols that represented objects, actions, or sounds. However, over time, these symbols became simplified and abstracted, and only the consonantal sounds were retained. This led to the emergence of the first abjads, such as Ugaritic, Phoenician, Aramaic, and Hebrew.</p>
13
- <p>The earliest known abjad is the Ugaritic script, which was used to write the Ugaritic language, a Northwest Semitic language spoken in the city-state of Ugarit (modern-day Syria) from around 1400 to 1200 BCE. The Ugaritic script consisted of 30 letters, each representing a consonant. It was written from left to right on clay tablets using a stylus.</p>
14
- <p>The most influential abjad in history is the Phoenician script, which was used to write the Phoenician language, a Canaanite language spoken by the Phoenicians, a seafaring people who lived in the eastern Mediterranean region from around 1500 to 300 BCE. The Phoenician script consisted of 22 letters, each representing a consonant. It was written from right to left on various materials such as stone, metal, wood, or parchment.</p>
15
- <p>abjad writing system<br />
16
- abjad vs alphabet<br />
17
- abjad vs abugida<br />
18
- abjad notation<br />
19
- abjad numerals<br />
20
- abjad arabic<br />
21
- abjad hebrew<br />
22
- abjad phoenician<br />
23
- abjad python<br />
24
- abjad music<br />
25
- abjad examples<br />
26
- abjad history<br />
27
- abjad origin<br />
28
- abjad definition<br />
29
- abjad app<br />
30
- abjad books<br />
31
- abjad characters<br />
32
- abjad chart<br />
33
- abjad diacritics<br />
34
- abjad etymology<br />
35
- abjad fonts<br />
36
- abjad gematria<br />
37
- abjad generator<br />
38
- abjad hindi<br />
39
- abjad in english<br />
40
- abjad in urdu<br />
41
- abjad in persian<br />
42
- abjad in malayalam<br />
43
- abjad in turkish<br />
44
- abjad keyboard<br />
45
- abjad letters<br />
46
- abjad lilypond<br />
47
- abjad meaning<br />
48
- abjad names<br />
49
- abjad online<br />
50
- abjad order<br />
51
- abjad pronunciation<br />
52
- abjad pdf<br />
53
- abjad quizlet<br />
54
- abjad reading<br />
55
- abjad script<br />
56
- abjad symbols<br />
57
- abjad translation<br />
58
- abjad tutorial<br />
59
- abjad unicode<br />
60
- abjad vowels<br />
61
- abjad words<br />
62
- what is an example of an impure or incomplete or defective or partial phonemic script or segmentally linear defective phonographic script or consonantary or consonant writing or consonantal alphabet?</p>
63
- <p>The Phoenician script was widely adopted and adapted by other peoples and cultures, giving rise to many other writing systems, such as Greek, Latin, Arabic, Hebrew, and more. Some of these writing systems added vowel symbols to the Phoenician script, creating alphabets, while others retained the abjad structure but modified the shapes and sounds of the letters.</p>
64
- <h3>The Phoenician Abjad</h3>
65
- <p>The Phoenician abjad is considered to be the ancestor of many modern writing systems. It was developed by the Phoenicians, a maritime civilization that dominated trade and commerce in the ancient Mediterranean world. The Phoenicians used their script to record their history, culture, religion, and business transactions. They also spread their script to other regions through their trade contacts and colonies.</p>
66
- <p>The Phoenician abjad consisted of 22 letters, each representing a consonant sound. The letters were named after objects that started with that sound. For example, the letter aleph (?) represented the sound /ʔ/ (a glottal stop) and was named after an ox (ʾālep), because the shape of the letter resembled an ox's head. The letter beth (?) represented the sound /b/ and was named after a house (bayt), because the shape of the letter resembled a house.</p>
67
- <p>The Phoenician abjad was written from right to left in horizontal lines. The letters were usually written without any spaces or punctuation marks between them. The vowel sounds were not written but inferred by the reader based on the context and the consonantal roots. The direction of writing sometimes changed depending on the medium or the purpose. For example, some inscriptions were written in boustrophedon style, which means "as the ox plows", alternating between right-to-left and left-to-right lines.</p>
68
- <p>The Phoenician abjad had a significant impact on other writing systems and languages. It was adopted and adapted by many peoples and cultures in different regions and times. Some of these adaptations include:</p>
69
- <ul>
70
- <li>The Greek alphabet: The Greeks borrowed the Phoenician abjad around the 9th century BCE and added vowel symbols to it, creating an alphabet that could represent all the sounds of their language. The Greek alphabet also changed the direction of writing from right-to-left to left-to-right.</li>
71
- <li>The Latin alphabet: The Latin alphabet is derived from an Etruscan adaptation of the Greek alphabet, which in turn was derived from a western variant of the Phoenician abjad. The Latin alphabet was used to write Latin, the language of ancient Rome, and later became the basis for many modern alphabets such as English, French, Spanish, etc.</li>
72
- the Phoenician abjad. The Arabic abjad is used to write Arabic, the language of Islam and one of the most widely spoken languages in the world. The Arabic abjad has 28 letters, each representing a consonant sound. The letters have different shapes depending on their position in a word (initial, medial, final, or isolated). The Arabic abjad also uses diacritical marks to indicate vowel sounds, but they are usually omitted in most texts.</li>
73
- <li>The Hebrew abjad: The Hebrew abjad is derived from a variant of the Phoenician abjad. The Hebrew abjad is used to write Hebrew, the language of Judaism and the official language of Israel. The Hebrew abjad has 22 letters, each representing a consonant sound. Some of the letters can also represent vowel sounds depending on their position or context. The Hebrew abjad also uses diacritical marks called niqqud to indicate vowel sounds, but they are usually omitted in most texts.</li>
74
- </ul>
75
- <h3>The Arabic Abjad</h3>
76
- <p>The Arabic abjad is the most widely used abjad in the world today. It is used to write Arabic, the official language of 26 countries and a co-official language in six others. Arabic is also the liturgical language of Islam, the religion of about 1.8 billion Muslims worldwide. The Arabic abjad is also used to write other languages that use Arabic script, such as Persian, Urdu, Pashto, etc.</p>
77
- <p>The Arabic abjad consists of 28 letters, each representing a consonant sound. The letters are written from right to left in horizontal lines. The letters have different shapes depending on their position in a word: initial (at the beginning), medial (in the middle), final (at the end), or isolated (standing alone). For example, the letter ba (ب) has four different shapes: ـب (final), بـ (initial), ـبـ (medial), and ب (isolated).</p>
78
- <p>The Arabic abjad does not represent vowel sounds explicitly, but it uses diacritical marks called harakat to indicate them. These marks are placed above or below the consonant letters and can change the meaning and pronunciation of a word. For example, the word kataba (he wrote) is written as كَتَبَ with three harakat: a fatha (a short /a/ sound) above the first and second letters, and a sukun (no vowel sound) above the third letter. However, these marks are usually omitted in most texts, except for religious texts, children's books, dictionaries, or texts for learners.</p>
79
- <p>The Arabic abjad also has other symbols and signs that modify or enhance the letters and words. Some of these include:</p>
80
- <ul>
81
- <li>The hamza (ء), which represents a glottal stop sound (/ʔ/). It can appear alone or with a carrier letter such as alif (ا), waw (و), or ya (ي).</li>
82
- <li>The shadda (ّ), which represents a gemination or doubling of a consonant sound. It is placed above a letter and indicates that it is pronounced twice. For example, the word madrasa (school) is written as مَدْرَسَة with a shadda above the letter sad (ص), indicating that it is pronounced as /madras.sa/.</li>
83
- <li>The tanwin (ـً ـٍ ـٌ), which represents an /n/ sound added to the end of a word in certain grammatical cases. It consists of a haraka followed by an alif maksura (ى), which looks like a short tail. For example, the word kitabun (a book) is written as كِتَابٌ with a kasra (a short /i/ sound) below the first letter and a tanwin with a damma (a short /u/ sound) above the last letter.</li>
84
- <li>The alif maqsura (ى), which represents a long /a/ sound at the end of a word. It looks like an alif without a hamza or a dotless ya. For example, the word layla (night) is written as لَيْلَى with an alif maqsura at the end.</li>
85
- <li>The alif lam (ال), which represents the definite article "the" in Arabic. It consists of an alif followed by a lam and is attached to the beginning of a word. For example, the word kitab (book) becomes al-kitab (the book) when written with an alif lam.</li>
86
- </ul>
87
- <p>The Hebrew abjad is the writing system of the Hebrew language, the language of Judaism and the official language of Israel. The Hebrew abjad is also used to write other Jewish languages, such as Yiddish, Ladino, Judeo-Arabic, etc. The Hebrew abjad has a long and rich history, dating back to the 10th century BCE. It has been used to record some of the most sacred and influential texts in human history, such as the Torah, the Talmud, and the Kabbalah.</p>
88
- <p>The Hebrew abjad consists of 22 letters, each representing a consonant sound. The letters are written from right to left in horizontal lines. The letters have different shapes depending on their position in a word: regular (in most cases), final (at the end of a word), or medial (in some cases). For example, the letter kaf (כ) has two shapes: ך (final) and כ (regular or medial).</p>
89
- <p>The Hebrew abjad does not represent vowel sounds explicitly, but it uses diacritical marks called niqqud to indicate them. These marks are placed below or above the consonant letters and can change the meaning and pronunciation of a word. For example, the word shalom (peace) is written as שָׁלוֹם with four niqqud: a kamatz (a long /a/ sound) below the first letter, a shva (no vowel sound) below the second letter, a holam (a long /o/ sound) above the third letter, and a dagesh (a dot that indicates gemination or doubling of a consonant sound) inside the fourth letter. However, these marks are usually omitted in most texts, except for religious texts, children's books, dictionaries, or texts for learners.</p>
90
- <p>The Hebrew abjad also has other symbols and signs that modify or enhance the letters and words. Some of these include:</p>
91
- <ul>
92
- <li>The alef (א), which represents a glottal stop sound (/ʔ/) or a silent letter that serves as a placeholder for a vowel sound. It can also indicate a long vowel sound when combined with other letters.</li>
93
- <li>The vav (ו), which represents a consonant sound (/v/) or a vowel sound (/u/ or /o/). It can also indicate a long vowel sound when combined with other letters.</li>
94
- <li>The yod (י), which represents a consonant sound (/j/) or a vowel sound (/i/ or /e/). It can also indicate a long vowel sound when combined with other letters.</li>
95
- <li>The he (ה), which represents a consonant sound (/h/) or a silent letter that serves as an indicator of grammatical gender or number. It can also indicate a long vowel sound when combined with other letters.</li>
96
- <li>The geresh (׳), which represents a modification of a consonant sound or an abbreviation of a word. For example, the letter gimel (ג) with a geresh becomes ג׳ and represents the sound /ʒ/ (as in measure). The letter shin (ש) with a geresh becomes ש׳ and represents an abbreviation of the word shekel (שֶׁקֶל), the currency of Israel.</li>
97
- <li>The gershayim (״), which represents an abbreviation of a word or a quotation mark. For example, the letters alef and lamed with gershayim become א״ל and represent an abbreviation of the word aluf (אַלּוּף), meaning general or chief. The gershayim can also be used to enclose a quotation within a text.</li>
98
- </ul>
99
- <h3>Other Abjads</h3>
100
- <p>Besides Phoenician, Arabic, and Hebrew, there are other abjads that have been used to write various languages in different regions and times. Some of these abjads include:</p>
101
- <ul>
102
- <li>The Ugaritic abjad: As mentioned earlier, this is the earliest known abjad that was used to write the Ugaritic language in ancient Syria. It had 30 letters and was written from left to right on clay tablets.</li>
103
- <li>The Syriac abjad: This is a descendant of the Aramaic abjad that was used to write the Syriac language, a dialect of Aramaic that was spoken by Christians in the Middle East from the 4th to the 8th centuries CE. It had 22 letters and was written from right to left on parchment or paper. It also had vowel marks and other symbols to indicate pronunciation and grammar.</li>
104
- <li>The Ge'ez abjad: This is an adaptation of the South Arabian abjad that was used to write the Ge'ez language, an ancient Semitic language that was spoken in Ethiopia and Eritrea until the 10th century CE. It had 26 letters and was written from left to right on parchment or stone. It also had vowel marks that were attached to the consonant letters, creating syllabic symbols.</li>
105
- <li>The Brahmi abjad: This is an adaptation of the Aramaic abjad that was used to write various languages in ancient India, such as Sanskrit, Prakrit, Pali, etc. It had 33 letters and was written from left to right on stone, metal, or palm leaves. It also had vowel marks that were attached to the consonant letters, creating syllabic symbols.</li>
106
- </ul>
107
- <h2>The Advantages and Disadvantages of Abjads</h2>
108
- <p>Abjads are a unique and fascinating type of writing system, but they also have their pros and cons. Depending on the language, the context, and the purpose, abjads can offer some benefits and drawbacks compared to other writing systems. Here are some of them:</p>
109
- <h3>Advantages of Abjads</h3>
110
- <p>Some of the advantages of using abjads are:</p>
111
- <ul>
112
- <li>They can save space and time: Abjads can be more compact and concise than other writing systems, as they only use consonant letters and omit vowel marks. This can save space on writing materials and time for writing and reading.</li>
113
- <li>They can preserve meaning and ambiguity: Abjads can preserve the meaning of words by focusing on their consonantal roots, which are usually more stable and consistent than their vowel patterns. This can also allow for some intentional ambiguity or flexibility in interpretation, which can be useful for poetry, rhetoric, or humor.</li>
114
- <li>They can reflect linguistic features: Abjads can reflect some linguistic features of the languages they are used for, such as consonantal roots, morphological patterns, phonetic variations, etc. This can make them more suitable and natural for representing these languages than other writing systems.</li>
115
- </ul>
116
- <h3>Disadvantages of Abjads</h3>
117
- <p>Some of the disadvantages of using abjads are:</p>
118
- <ul>
119
- <li>They can cause ambiguity and confusion: Abjads can cause ambiguity and confusion for readers and learners, as they do not provide clear information about vowel sounds, which can change the meaning and pronunciation of words. This can make it difficult to read unfamiliar words, names, or foreign terms.</li>
120
- <li>They can require memorization and inference: Abjads can require memorization and inference for readers and learners, as they have to rely on their knowledge of the language, the context, and the conventions to infer the vowel sounds and meanings of words. This can make it challenging to learn and master these writing systems.</li>
121
- <li>They can limit communication and expression: Abjads can limit communication and expression for writers and speakers, as they do not allow for precise and accurate representation of vowel sounds, which can convey nuances, emotions, tones, etc. This can make it hard to express oneself clearly and effectively in these writing systems.</li>
122
- </ul>
123
- <p>Abjads are a type of writing system that only uses consonants, leaving vowel sounds to be inferred by the reader. Alphabets are another type of writing system that uses both consonants and vowels, providing graphemes for all the sounds of a language. How do abjads and alphabets differ in terms of structure, function, and usage? Let's find out.</p>
124
- <h3>The Definition of Alphabets</h3>
125
- <p>An alphabet is a writing system in which each letter represents a phoneme, a basic unit of sound in a language. An alphabet usually consists of two types of letters: consonants and vowels. Consonants are letters that represent sounds that are produced by obstructing or constricting the airflow in the vocal tract, such as /b/, /k/, /s/, etc. Vowels are letters that represent sounds that are produced by vibrating the vocal cords without any obstruction or constriction, such as /a/, /i/, /u/, etc.</p>
126
- <p>An alphabet can represent all the sounds of a language with a relatively small number of letters, usually between 20 and 30. This makes it easier to learn and use than other writing systems that have more complex or numerous symbols, such as logographic or syllabic systems. An alphabet can also allow for more accurate and consistent spelling and pronunciation of words, as each letter corresponds to a specific sound.</p>
127
- <h3>The Contrast of Abjads and Alphabets</h3>
128
- <p>Abjads and alphabets are both types of writing systems that use letters to represent sounds, but they differ in how they treat vowel sounds. Abjads only represent consonant sounds, leaving vowel sounds to be inferred by the reader based on the context and the consonantal roots. Alphabets represent both consonant and vowel sounds, providing graphemes for all the phonemes of a language.</p>
129
- <p>This difference has implications for the structure, function, and usage of these writing systems. Abjads tend to be more compact and concise than alphabets, as they only use consonant letters and omit vowel marks. However, abjads also tend to be more ambiguous and confusing than alphabets, as they do not provide clear information about vowel sounds, which can change the meaning and pronunciation of words. Abjads also tend to reflect some linguistic features of the languages they are used for, such as consonantal roots, morphological patterns, phonetic variations, etc. Alphabets tend to be more precise and consistent than abjads, as they provide graphemes for all the sounds of a language. However, alphabets also tend to be more complex and diverse than abjads, as they have different letters and rules for different languages.</p>
130
- <h3>The Examples of Alphabets</h3>
131
- <p>Some of the most common alphabets in the world are:</p>
132
- <ul>
133
- <li>The Latin alphabet: This is the most widely used alphabet in the world today. It is used to write many languages such as English, French, Spanish, German, Italian, etc. It has 26 letters: 21 consonants and 5 vowels.</li>
134
- <li>The Greek alphabet: This is the alphabet that was derived from the Phoenician abjad by adding vowel symbols. It is used to write Greek, the official language of Greece and Cyprus. It has 24 letters: 17 consonants and 7 vowels.</li>
135
- <li>The Cyrillic alphabet: This is an adaptation of the Greek alphabet that was created by Saint Cyril and Saint Methodius in the 9th century CE to write Slavic languages. It is used to write many languages such as Russian, Ukrainian, Bulgarian, Serbian, etc. It has 33 letters: 21 consonants and 12 vowels.</li>
136
- <li>The Devanagari alphabet: This is an adaptation of the Brahmi abjad that was developed in India around the 10th century CE to write Sanskrit and other languages. It is used to write many languages such as Hindi, Nepali, Marathi, etc. It has 47 letters: 33 consonants and 14 vowels.</li>
137
- </ul>
138
- <p>In this article, we have learned about abjads, a type of writing system that only uses consonants. We have explored the history of abjads, their advantages and disadvantages compared to other writing systems, and how they differ from alphabets. We have also seen some examples of abjads and alphabets that are used to write various languages in the world.</p>
139
- <p>Abjads are a fascinating and unique way of writing that reflect the linguistic and cultural features of the languages they are used for. They have been used for thousands of years to record some of the most ancient and influential civilizations and religions in human history. They have also influenced other writing systems and contributed to the development of science, literature, art, and more.</p>
140
- <p>If you are interested in learning more about abjads or other writing systems, you can visit some of the following websites:</p>
141
- <ul>
142
- <li>[Omniglot]: A website that provides information and examples of various writing systems and languages.</li>
143
- <li>[ScriptSource]: A website that provides resources and tools for studying, using, and developing writing systems.</li>
144
- <li>[Ancient Scripts]: A website that provides an introduction to different ancient writing systems and their evolution.</li>
145
- </ul>
146
- <p>We hope you enjoyed reading this article and learned something new. If you have any questions or comments, please feel free to share them with us. Thank you for your time and attention.</p>
147
- <h2>FAQs About Abjads</h2>
148
- <p>Here are some frequently asked questions about abjads and their answers:</p>
149
- <ol>
150
- <li>What is the difference between an abjad and an abugida?</li>
151
- <p>An abjad is a writing system that only represents consonant sounds, leaving vowel sounds to be inferred by the reader. An abugida is a writing system that represents consonant sounds with letters and vowel sounds with diacritical marks that are attached to the consonant letters, creating syllabic symbols. For example, Arabic is an abjad, while Ge'ez is an abugida.</p>
152
- <li>What is the difference between an alphabet and a syllabary?</li>
153
- <p>An alphabet is a writing system that uses letters to represent phonemes, basic units of sound in a language. An alphabet usually consists of two types of letters: consonants and vowels. A syllabary is a writing system that uses symbols to represent syllables, units of sound that consist of one or more phonemes. A syllabary usually has more symbols than an alphabet, as each symbol represents a different combination of consonants and vowels. For example, Latin is an alphabet, while Japanese is a syllabary.</li>
154
- <li>What is the difference between a script and a language?</li>
155
- <p>A script is a system of symbols that are used to write one or more languages. A language is a system of communication that consists of sounds, words, grammar, etc. A script can be used to write different languages, and a language can be written in different scripts. For example, the Latin script is used to write many languages such as English, French, Spanish, etc. The English language can be written in different scripts such as Latin, Braille, Morse code, etc.</li>
156
- <li>What are some of the benefits of learning different writing systems?</li>
157
- <p>Learning different writing systems can have many benefits for personal and professional development. Some of these benefits include:</p>
158
- <ul>
159
- <li>Enhancing cognitive skills: Learning different writing systems can improve memory, attention, creativity, problem-solving, etc.</li>
160
- <li>Expanding cultural knowledge: Learning different writing systems can increase awareness and appreciation of different cultures, histories, religions, etc.</li>
161
- <li>Improving communication skills: Learning different writing systems can improve reading, writing, speaking, listening, etc.</li>
162
- <li>Boosting career opportunities: Learning different writing systems can open up new possibilities for education, work, travel, etc.</li>
163
- </ul>
164
- <li>How can I learn different writing systems?</li>
165
- <p>There are many ways to learn different writing systems depending on your goals, preferences, and resources. Some of these ways include:</p>
166
- <ul>
167
- <li>Taking online courses: There are many online platforms that offer courses on different writing systems and languages.</li>
168
- <li>Using apps or software: There are many apps or software that provide interactive and engaging tools for learning different writing systems and languages.</li>
169
- <li>Reading books or articles: There are many books or articles that provide information and examples of different writing systems and languages.</li>
170
- <li>Watching videos or podcasts: There are many videos or podcasts that provide visual and auditory explanations and demonstrations of different writing systems and languages.</li>
171
- <li>Joining communities or groups: There are many communities or groups that provide opportunities and support for learning different writing systems and languages.</li>
172
- <li>Practicing and applying: There are many ways to practice and apply what you have learned, such as writing, reading, speaking, listening, etc.</li>
173
- </ul></p> 401be4b1e0<br />
174
- <br />
175
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Crowd Evolution Mod APK The Ultimate Crowd Simulation Game with Amazing Graphics.md DELETED
@@ -1,117 +0,0 @@
1
-
2
- <h1>Crowd Evolution APK Mod Download: A Fun and Addictive Game for Android Users</h1>
3
- <p>Do you love games that let you build your own army, fight against other crowds, and travel through different time periods? If so, you should check out Crowd Evolution, a fun and addictive game for Android devices. In this game, you can grow and evolve your crowd, equip them with various weapons and items, and defeat your enemies in exciting battles. You can also download the Crowd Evolution APK mod to get unlimited money, gems, and no ads. In this article, we will tell you more about this game, its features, why you should download the mod, how to install it, and some tips and tricks to help you play better.</p>
4
- <h2>crowd evolution apk mod download</h2><br /><p><b><b>Download</b> &#10003;&#10003;&#10003; <a href="https://jinyurl.com/2uNJ9p">https://jinyurl.com/2uNJ9p</a></b></p><br /><br />
5
- <h2>What is Crowd Evolution?</h2>
6
- <p>Crowd Evolution is a game developed by Rollic Games, a popular studio that has created many other hit games such as Tangle Master 3D, Go Knots 3D, Picker 3D, and more. Crowd Evolution is a game that combines elements of action, strategy, simulation, and arcade. It has a simple premise: you start with a small crowd of people, and you have to run around the map to recruit more followers, avoid or fight other crowds, and reach the end of the level. Along the way, you will also pass through different gates that will either increase or decrease your crowd size, time period, or weapon type. The game has hundreds of levels to play, each with different challenges and environments.</p>
7
- <h3>A game about growing and evolving your crowd</h3>
8
- <p>One of the main aspects of Crowd Evolution is growing and evolving your crowd. You start with a few people, but you can add more by running into them or by passing through green gates. The more people you have in your crowd, the stronger you will be in combat. You can also evolve your crowd by upgrading their stats such as health, damage, fire rate, speed, etc. You can do this by spending coins that you earn from completing levels or by watching videos. Evolving your crowd will make them more powerful and resilient against enemies.</p>
9
- <h3>A game about fighting and defeating your enemies</h3>
10
- <p>Another aspect of Crowd Evolution is fighting and defeating your enemies. You will encounter many other crowds on your way to the end of the level, some of them bigger or smaller than yours. You can either avoid them or engage them in combat. If you choose to fight them, you will have to use your weapons and items to shoot them down or knock them off the map. You can also use traps or obstacles to hinder their progress. Fighting enemies will earn you more coins and gems, which you can use to buy new weapons or items.</p>
11
- <h3>A game about time travel and different eras</h3>
12
- <p>The last aspect of Crowd Evolution is time travel and different eras. As you play the game, you will notice that there are different gates that will change the time period of your crowd. You can travel from the Stone Age to the Medieval Age, from the Industrial Age to the Modern Age, and even to the Future Age. Each era has its own weapons and items that you can use, such as clubs, swords, guns, lasers, etc. You can also see the changes in the environment and the enemies as you travel through time. Time travel adds more variety and fun to the game, as you can experience different scenarios and challenges.</p>
13
- <h2>What are the features of Crowd Evolution?</h2>
14
- <p>Crowd Evolution is a game that has many features that make it enjoyable and addictive. Here are some of them:</p>
15
- <p>crowd evolution mod apk latest version<br />
16
- crowd evolution hack apk download<br />
17
- crowd evolution mod menu apk<br />
18
- crowd evolution unlimited money apk<br />
19
- crowd evolution mod apk android 1<br />
20
- crowd evolution mod apk revdl<br />
21
- crowd evolution mod apk free download<br />
22
- crowd evolution mod apk no ads<br />
23
- crowd evolution mod apk unlimited gems<br />
24
- crowd evolution mod apk offline<br />
25
- crowd evolution mod apk 2023<br />
26
- crowd evolution mod apk happymod<br />
27
- crowd evolution mod apk rexdl<br />
28
- crowd evolution mod apk all items unlocked<br />
29
- crowd evolution mod apk unlimited coins<br />
30
- crowd evolution premium mod apk<br />
31
- crowd evolution pro mod apk<br />
32
- crowd evolution vip mod apk<br />
33
- crowd evolution full mod apk<br />
34
- crowd evolution mega mod apk<br />
35
- crowd evolution cracked apk download<br />
36
- crowd evolution cheat apk download<br />
37
- crowd evolution unlocked apk download<br />
38
- crowd evolution paid apk download<br />
39
- crowd evolution patched apk download<br />
40
- crowd evolution hack mod download<br />
41
- crowd evolution hack version download<br />
42
- crowd evolution hack tool download<br />
43
- crowd evolution hack online download<br />
44
- crowd evolution hack generator download<br />
45
- crowd evolution android mod download<br />
46
- crowd evolution ios mod download<br />
47
- crowd evolution pc mod download<br />
48
- crowd evolution windows mod download<br />
49
- crowd evolution mac mod download<br />
50
- crowd evolution linux mod download<br />
51
- crowd evolution arcade game mod download<br />
52
- crowd evolution simulation game mod download<br />
53
- crowd evolution casual game mod download<br />
54
- crowd evolution fun game mod download<br />
55
- crowd evolution free game mod download<br />
56
- crowd evolution best game mod download<br />
57
- crowd evolution new game mod download<br />
58
- crowd evolution latest game mod download<br />
59
- crowd evolution update game mod download<br />
60
- crowd evolution 2023 game mod download<br />
61
- how to download crowd evolution mod apk</p>
62
- <h3>Weapons and equipment of different eras</h3>
63
- <p>As mentioned before, Crowd Evolution lets you use different weapons and items depending on the time period of your crowd. You can equip your crowd with clubs, spears, axes, swords, shields, bows, arrows, guns, grenades, rockets, lasers, plasma guns, and more. Each weapon has its own advantages and disadvantages, such as range, damage, fire rate, accuracy, etc. You can also use items such as helmets, armor, boots, jetpacks, etc. to enhance your crowd's performance. You can buy new weapons and items with coins or gems, or find them on the map.</p>
64
- <h3>Upgrade your crowd and unlock new abilities</h3>
65
- <p>Crowd Evolution also lets you upgrade your crowd and unlock new abilities that will help you in your journey. You can upgrade your crowd's stats such as health, damage, fire rate, speed, etc. by spending coins. You can also unlock new abilities such as double jump, dash, freeze time, etc. by spending gems. Upgrading your crowd and unlocking new abilities will make them more powerful and versatile against enemies.</p>
66
- <h3>Diverse levels and environments</h3>
67
- <p>Crowd Evolution has hundreds of levels to play, each with different objectives and challenges. Some levels require you to reach the end of the map with a certain number of people in your crowd. Some levels require you to defeat a boss or a rival crowd. Some levels require you to collect a certain amount of coins or gems. Some levels require you to survive for a certain amount of time. Each level also has different environments that match the time period of your crowd. You can see forests, deserts, castles, cities, factories, spaceships, etc. Each environment also has different traps and obstacles that you have to avoid or use to your advantage.</p>
68
- <h3>Simple and intuitive controls</h3>
69
- <p>Crowd Evolution has simple and intuitive controls that make it easy to play. You just have to swipe on the screen to move your crowd around the map. You can also tap on the screen to shoot your weapons or use your items. The game also has an auto-aim feature that helps you target your enemies more easily. The controls are responsive and smooth, making the game fun and satisfying.</p>
70
- <h3>Colorful and cartoonish graphics</h3>
71
- <p>Crowd Evolution has colorful and cartoonish graphics that make it appealing and attractive. The game has a bright and vibrant color scheme that suits the mood and theme of the game. The game also has a cute and funny art style that makes the characters and enemies look adorable and hilarious. The game also has smooth animations and effects that add more life and charm to the game.</p>
72
- <h2>Why download the Crowd Evolution APK mod?</h2>
73
- <p>Crowd Evolution is a free-to-play game that you can download from the Google Play Store or the App Store. However, if you want to enjoy the game more fully and without any limitations or interruptions, you should download the Crowd Evolution APK mod. The APK mod is a modified version of the game that gives you some extra benefits and features that are not available in the original version. Here are some of the reasons why you should download the Crowd Evolution APK mod:</p>
74
- <h3>Unlimited money and gems</h3>
75
- <p>One of the main reasons to download the Crowd Evolution APK mod is that it gives you unlimited money and gems. Money and gems are the two currencies in the game that you can use to buy new weapons, items, upgrades, and abilities. However, in the original version of the game, you have to earn them by completing levels, watching videos, or spending real money. This can be time-consuming, boring, or expensive. With the Crowd Evolution APK mod, you don't have to worry about that. You will have unlimited money and gems from the start, and you can spend them as much as you want without running out. This way, you can buy and unlock everything in the game without any hassle or restriction.</p>
76
- <h3>No ads and no interruptions</h3>
77
- <p>Another reason to download the Crowd Evolution APK mod is that it removes all the ads and interruptions from the game. Ads are annoying and distracting, especially when they pop up in the middle of your gameplay or when you are trying to enjoy the game. They can also slow down your device or consume your data. In the original version of the game, you have to watch ads to get extra rewards or to access some features. With the Crowd Evolution APK mod, you don't have to do that. The mod removes all the ads from the game, and you can play without any interruption or disturbance. You can also access all the features without watching any videos.</p>
78
- <h3>Easy installation and compatibility</h3>
79
- <p>The last reason to download the Crowd Evolution APK mod is that it is easy to install and compatible with most Android devices. The mod does not require any root access or special permissions to install. You just have to download the APK file from a trusted source, enable unknown sources on your device settings, locate the downloaded file and tap on it to install, and launch the game and enjoy. The mod also works on most Android devices, regardless of their model or version. The mod is also updated regularly to ensure its functionality and security.</p>
80
- <h2>How to download and install the Crowd Evolution APK mod?</h2>
81
- <p>If you are convinced by the reasons above and want to download and install the Crowd Evolution APK mod, here are the steps that you need to follow:</p>
82
- <h3>Step 1: Download the APK file from a trusted source</h3>
83
- <p>The first step is to download the APK file from a trusted source. You can find many websites that offer the Crowd Evolution APK mod for free, but not all of them are safe or reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. To avoid that, you should only download the APK file from a trusted source that has positive reviews and feedback from other users. You can also scan the file with an antivirus program before installing it.</p>
84
- <h3>Step 2: Enable unknown sources on your device settings</h3>
85
- <p>The second step is to enable unknown sources on your device settings. This is necessary because Android devices normally do not allow installing apps from sources other than the Google Play Store or the App Store. To install the Crowd Evolution APK mod, you have to enable unknown sources on your device settings. To do this, you have to go to your device settings, find the security or privacy option, and look for the unknown sources option. Then, you have to toggle it on or check the box to allow installing apps from unknown sources. This will enable you to install the APK file that you downloaded.</p>
86
- <h3>Step 3: Locate the downloaded file and tap on it to install</h3>
87
- <p>The third step is to locate the downloaded file and tap on it to install. After you have downloaded the APK file and enabled unknown sources, you have to find the file on your device storage. You can use a file manager app or your device's default file explorer to do this. You have to look for the folder where you saved the APK file, usually the downloads folder. Then, you have to tap on the file to start the installation process. You may see a pop-up window asking for your confirmation or permission to install the app. You have to tap on install or allow to proceed with the installation.</p>
88
- <h3>Step 4: Launch the game and enjoy</h3>
89
- <p>The fourth and final step is to launch the game and enjoy. After you have installed the Crowd Evolution APK mod, you will see a new icon on your device's home screen or app drawer. You have to tap on the icon to launch the game and start playing. You will notice that you have unlimited money and gems, no ads, and all the features unlocked from the start. You can now enjoy the game without any limitations or interruptions.</p>
90
- <h2>Tips and tricks for playing Crowd Evolution</h2>
91
- <p>Crowd Evolution is a game that is easy to play but hard to master. It requires some skills and strategies to complete all the levels and defeat all the enemies. Here are some tips and tricks that can help you play better and have more fun:</p>
92
- <h3>Check the gates and choose the best one</h3>
93
- <p>As you play the game, you will encounter different gates that will change your crowd size, time period, or weapon type. Some of these gates are beneficial, while some of them are detrimental. You should always check the gates before passing through them and choose the best one for your situation. For example, if you have a small crowd, you should look for a green gate that will increase your crowd size. If you have a weak weapon, you should look for a gate that will change your weapon type to a stronger one. If you are in a dangerous era, you should look for a gate that will take you to a safer one.</p>
94
- <h3>Upgrade smartly and balance your stats</h3>
95
- <p>Crowd Evolution also lets you upgrade your crowd's stats such as health, damage, fire rate, speed, etc. by spending coins. You should always upgrade your crowd smartly and balance your stats according to your needs and preferences. For example, if you want to have a fast and agile crowd, you should focus on upgrading your speed and fire rate. If you want to have a durable and resilient crowd, you should focus on upgrading your health and damage. You should also avoid upgrading only one stat and neglecting the others, as this will make your crowd unbalanced and vulnerable.</p>
96
- <h3>Kill as many enemies as you can to earn more cash</h3>
97
- <p>Crowd Evolution also lets you kill enemies by shooting them with your weapons or knocking them off the map. You should always try to kill as many enemies as you can, as this will earn you more cash that you can use to buy new weapons, items, upgrades, and abilities. Killing enemies will also reduce their crowd size and make them easier to defeat. You can also use traps or obstacles to kill enemies more efficiently and creatively.</p>
98
- <h3>Push the buttons to activate traps on your foes</h3>
99
- <p>Crowd Evolution also has some levels that have buttons that you can push to activate traps on your foes. These traps can be spikes, saws, lasers, bombs, etc. that can damage or kill your enemies instantly. You should always look for these buttons and push them when you see a large group of enemies approaching. This will help you clear the way and save your ammo and health. You can also use these traps to kill the boss or the rival crowd more easily.</p>
100
- <h3>Watch the videos to get extra rewards (optional)</h3>
101
- <p>Crowd Evolution also gives you the option to watch videos to get extra rewards such as coins, gems, weapons, items, etc. You can watch these videos after completing a level or when you see a special offer on the screen. Watching these videos will give you more resources that you can use to improve your crowd and gameplay. However, this is optional and not necessary if you download the Crowd Evolution APK mod, as you will already have unlimited money and gems.</p>
102
- <h2>Conclusion</h2>
103
- <p>Crowd Evolution is a fun and addictive game for Android users that lets you grow and evolve your crowd, equip them with various weapons and items, and defeat your enemies in exciting battles. You can also download the Crowd Evolution APK mod to get unlimited money, gems, and no ads. In this article, we have told you more about this game, its features, why you should download the mod, how to install it, and some tips and tricks to help you play better. We hope that you have enjoyed reading this article and that you will try out this game and have fun with it.</p>
104
- <h2>FAQs</h2>
105
- <p>Here are some frequently asked questions about Crowd Evolution:</p>
106
- <h3>Q: Is Crowd Evolution a safe game to play?</h3>
107
- <p>A: Yes, Crowd Evolution is a safe game to play. It does not contain any violence, gore, or inappropriate content that may be harmful or offensive to some players. It is suitable for all ages and audiences.</p>
108
- <h3>Q: Is Crowd Evolution a multiplayer game?</h3>
109
- <p>A: No, Crowd Evolution is not a multiplayer game. It is a single-player game that does not require an internet connection or a social media account to play. You can play it offline and by yourself.</p>
110
- <h3>Q: How can I contact the developers of Crowd Evolution?</h3>
111
- <p>A: You can contact the developers of Crowd Evolution by sending them an email at [email protected] or by visiting their website at https://www.rollicgames.com/. You can also follow them on Facebook, Twitter, Instagram, or YouTube for more updates and news about their games.</p>
112
- <h3>Q: How can I get more coins and gems in Crowd Evolution?</h3>
113
- <p>A: You can get more coins and gems in Crowd Evolution by completing levels, killing enemies, watching videos, or spending real money. However, if you want to get unlimited coins and gems without any effort or cost, you should download the Crowd Evolution APK mod from a trusted source.</p>
114
- <h3>Q: What are some other games like Crowd Evolution?</h3>
115
- <p>A: Some other games like Crowd Evolution are Crowd City, Join Clash 3D, Run Race 3D, Crowd Master 3D, and Crowd Simulator. These are some of the games that have similar gameplay and mechanics as Crowd Evolution, such as running, growing, fighting, and evolving your crowd. You can find these games on the Google Play Store or the App Store and try them out for yourself.</p> 401be4b1e0<br />
116
- <br />
117
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Scary Teacher 3D Mod APK for Free and Enjoy Unlimited Money and Energy.md DELETED
@@ -1,93 +0,0 @@
1
- <br />
2
- <h1>Download Scary Teacher 3D Mod APK Unlimited Money and Energy</h1>
3
- <p>Have you ever wanted to get revenge on your worst high school teacher? Do you enjoy playing pranks and solving mysteries? If so, you will love Scary Teacher 3D, a horror-themed adventure game where you can scare the creepy teacher by performing various activities and releasing pets under her custody. But what if you could make the game even more fun and exciting by having unlimited money and energy? That's where the mod APK comes in. In this article, we will tell you everything you need to know about downloading and installing Scary Teacher 3D mod APK unlimited money and energy on your Android device.</p>
4
- <h2>Features of Scary Teacher 3D Mod APK</h2>
5
- <p>Scary Teacher 3D is a popular game that has been downloaded over 100 million times on Google Play Store. It has many features that make it appealing to players of all ages, such as:</p>
6
- <h2>download scary teacher 3d mod apk unlimited money and energy</h2><br /><p><b><b>Download Zip</b> &#9658; <a href="https://jinyurl.com/2uNPvj">https://jinyurl.com/2uNPvj</a></b></p><br /><br />
7
- <ul>
8
- <li><b>Unlimited money and energy to prank the teacher.</b> With the mod APK, you don't have to worry about running out of coins or stamina while playing. You can buy any item or upgrade you want, and perform as many pranks as you like without getting tired. This way, you can enjoy the game without any limitations or frustrations.</li>
9
- <li><b>Open world style interactive house with 15 rooms and mysteries.</b> The game takes place in the scary teacher's house, which consists of 15 different rooms, each with its own unsolved mystery. You can explore the house freely and find clues, objects, and secrets that will help you complete your missions. You can also interact with various items and use them to your advantage.</li>
10
- <li><b>Horror themes but suitable for kids of all age.</b> The game has a spooky atmosphere and sound effects that create a sense of tension and suspense. However, it is not too scary or violent for kids to play. The graphics are cartoonish and colorful, and the pranks are humorous and harmless. The game also has a rating of Teen on Google Play Store, which means it is suitable for players aged 13 and above.</li>
11
- <li><b>Easy controls and fun gameplay.</b> The game has simple and intuitive controls that make it easy to play. You can move around using the joystick on the left side of the screen, and interact with items using the buttons on the right side. You can also swipe to change the camera angle and zoom in or out. The gameplay is fun and addictive, as you have to sneak around the house without getting caught by the teacher, and set up pranks that will make her scream or faint.</li>
12
- </ul>
13
- <h2>How to Download and Install Scary Teacher 3D Mod APK</h2>
14
- <p>If you want to download and install Scary Teacher 3D mod APK unlimited money and energy on your Android device, you need to follow these steps:</p>
15
- <ol>
16
- <li><b>Allow unknown apps on your Android device.</b> Before you can install any app that is not from Google Play Store, you need to enable unknown sources on your device settings. To do this, go to Settings > Apps & Notifications > Special Access > Install Unknown Apps > Chrome (or whichever browser you use ) and toggle on the Allow from this source option. This will allow you to install apps from outside the official store.</li>
17
- <li><b>Download the mod APK file from a reputable source.</b> Next, you need to find a reliable website that offers the mod APK file for Scary Teacher 3D. You can search for it on Google or use the link we provide below. Make sure you download the latest version of the mod APK that is compatible with your device and has no viruses or malware. The file size should be around 100 MB.</li>
18
- <li><b>Install the mod APK using a file manager app.</b> After you download the mod APK file, you need to locate it on your device using a file manager app. You can use any app that can access your internal storage or SD card, such as Files by Google or ES File Explorer. Once you find the file, tap on it and follow the instructions to install it. You may need to grant some permissions to the app during the installation process.</li>
19
- <li><b>Enjoy the game with unlimited money and energy.</b> Finally, you can launch the game from your app drawer or home screen and enjoy playing Scary Teacher 3D with unlimited money and energy. You can use the money to buy anything you want from the shop, such as costumes, weapons, or gadgets. You can also use the energy to perform as many pranks as you want without getting exhausted. Have fun scaring the teacher and discovering her secrets!</li>
20
- </ol>
21
- <h2>Pros and Cons of Scary Teacher 3D Mod APK</h2>
22
- <p>Downloading and installing Scary Teacher 3D mod APK unlimited money and energy has its advantages and disadvantages. Here are some of them:</p>
23
- <table>
24
- <tr>
25
- <th>Pros</th>
26
- <th>Cons</th>
27
- </tr>
28
- <tr>
29
- <td><ul>
30
- <li>More fun, less frustration. You can enjoy the game without worrying about running out of money or energy, which can be annoying and frustrating. You can also skip the ads and the in-app purchases that may interrupt your gameplay.</li>
31
- <li>No ads, no in-app purchases. The mod APK removes all the ads and the in-app purchases that are present in the original game. This means you don't have to watch any videos or spend any real money to play the game.</li>
32
- </ul></td>
33
- <td><ul>
34
- <li>Potential security risks. Downloading and installing apps from unknown sources can expose your device to viruses, malware, or spyware that may harm your data or privacy. You should always be careful and use a trusted antivirus app to scan any file before installing it.</li>
35
- <li>Possible compatibility issues. The mod APK may not work with some devices or Android versions, depending on how it was modified. It may also crash or freeze during gameplay, causing you to lose your progress or data.</li>
36
- <li>May not work with future updates. The mod APK may not be compatible with future updates of the game, which may add new features, bug fixes, or improvements. You may have to wait for a new version of the mod APK or stick with the old one.</li>
37
- </ul></td>
38
- </tr>
39
- </table>
40
- <h2>Conclusion and FAQs</h2>
41
- <p>In conclusion, Scary Teacher 3D is a fun and exciting game that lets you prank and scare your creepy high school teacher in her own house. You can make the game even more enjoyable by downloading and installing Scary Teacher 3D mod APK unlimited money and energy, which gives you access to everything you need to have a blast. However, you should also be aware of the potential risks and drawbacks of using a modded app, such as security issues, compatibility problems, or update conflicts. We hope this article has helped you learn more about Scary Teacher 3D mod APK unlimited money and energy and how to download and install it on your Android device.</p>
42
- <p>Here are some FAQs that may answer some of your questions:</p>
43
- <h3>Q: Is Scary Teacher 3D mod APK safe to use?</h3>
44
- <p>A: Generally speaking, yes, as long as you download it from a reputable source that has no viruses or malware. However, you should always be careful and use a trusted antivirus app to scan any file before installing it. You should also avoid granting any unnecessary permissions to the app during the installation process.</p>
45
- <h3>Q: Is Scary Teacher 3D mod APK legal to use?</h3>
46
- <p>A: That depends on where you live and what laws apply there. Some countries may have strict rules against modifying or distributing apps without permission from the developers or owners. Others may have more lenient regulations or none at all. You should always check your local laws before using any modded app.</p>
47
- <h3>Q: Can I play Scary Teacher 3D mod APK online with other players?</h3 <p>A: No, you cannot. Scary Teacher 3D mod APK is a single-player game that does not support online multiplayer mode. You can only play it offline on your own device. If you want to play online with other players, you need to download the original game from Google Play Store and use a stable internet connection.</p>
48
- <p>scary teacher 3d hack apk free download with unlimited coins and gems<br />
49
- how to install scary teacher 3d mod apk on android device with infinite energy<br />
50
- scary teacher 3d mod menu apk download latest version with unlimited money<br />
51
- download scary teacher 3d mod apk for pc windows 10 with unlimited energy<br />
52
- scary teacher 3d mod apk offline download no root with unlimited money and gems<br />
53
- scary teacher 3d mod apk unlimited everything download for android with infinite energy<br />
54
- download scary teacher 3d mod apk revdl with unlimited money and coins<br />
55
- scary teacher 3d mod apk rexdl download free with unlimited energy and gems<br />
56
- download scary teacher 3d mod apk happymod with unlimited money and coins<br />
57
- scary teacher 3d mod apk android 1 download with infinite energy and gems<br />
58
- download scary teacher 3d mod apk pure with unlimited money and coins<br />
59
- scary teacher 3d mod apk obb download free with unlimited energy and gems<br />
60
- download scary teacher 3d mod apk latest version with unlimited money and coins<br />
61
- scary teacher 3d mod apk old version download with infinite energy and gems<br />
62
- download scary teacher 3d mod apk new update with unlimited money and coins<br />
63
- scary teacher 3d mod apk all chapters unlocked download with unlimited energy and gems<br />
64
- download scary teacher 3d mod apk all levels unlocked with unlimited money and coins<br />
65
- scary teacher 3d mod apk all characters unlocked download with infinite energy and gems<br />
66
- download scary teacher 3d mod apk no ads with unlimited money and coins<br />
67
- scary teacher 3d mod apk no verification download free with unlimited energy and gems<br />
68
- download scary teacher 3d mod apk ios with unlimited money and coins<br />
69
- scary teacher 3d mod ipa download for iphone with infinite energy and gems<br />
70
- download scary teacher 3d mod apk for ipad with unlimited money and coins<br />
71
- scary teacher 3d mod apk for mac download free with unlimited energy and gems<br />
72
- download scary teacher 3d mod apk online with unlimited money and coins<br />
73
- scary teacher 3d online multiplayer mod apk download with infinite energy and gems<br />
74
- download scary teacher 3d zombie mode mod apk with unlimited money and coins<br />
75
- scary teacher 3d christmas mode mod apk download free with unlimited energy and gems<br />
76
- download scary teacher 3d halloween mode mod apk with unlimited money and coins<br />
77
- scary teacher 3d summer mode mod apk download with infinite energy and gems<br />
78
- download scary teacher 3d winter mode mod apk with unlimited money and coins<br />
79
- scary teacher 3d valentine mode mod apk download free with unlimited energy and gems<br />
80
- download scary teacher 3d easter mode mod apk with unlimited money and coins<br />
81
- scary teacher 3d school mode mod apk download with infinite energy and gems<br />
82
- download scary teacher 3d hospital mode mod apk with unlimited money and coins<br />
83
- scary teacher 3d prison mode mod apk download free with unlimited energy and gems<br />
84
- download scary teacher 3d mansion mode mod apk with unlimited money and coins<br />
85
- scary teacher 3d garden mode mod apk download with infinite energy and gems<br />
86
- download scary teacher 3d city mode mod apk with unlimited money and coins<br />
87
- scary teacher 3d beach mode mod apk download free with unlimited energy and gems</p>
88
- <h3>Q: Will I get banned from the game if I use Scary Teacher 3D mod APK?</h3>
89
- <p>A: No, you will not. Scary Teacher 3D mod APK does not interfere with the game's servers or data, so there is no risk of getting banned or suspended from the game. You can play the game as normal without any worries.</p>
90
- <h3>Q: Can I update Scary Teacher 3D mod APK to the latest version?</h3>
91
- <p>A: Yes, you can, but only if there is a new version of the mod APK available that matches the latest version of the game. You cannot update the mod APK from the game itself or from Google Play Store, as that will overwrite the modded features and restore the original settings. You need to download and install the new version of the mod APK from the same source you got it from.</p> 401be4b1e0<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/latent_diffusion/openaimodel.py DELETED
@@ -1,1069 +0,0 @@
1
- from abc import abstractmethod
2
- import math
3
-
4
- import numpy as np
5
- import torch as th
6
- import torch.nn as nn
7
- import torch.nn.functional as F
8
-
9
- from audioldm.latent_diffusion.util import (
10
- checkpoint,
11
- conv_nd,
12
- linear,
13
- avg_pool_nd,
14
- zero_module,
15
- normalization,
16
- timestep_embedding,
17
- )
18
- from audioldm.latent_diffusion.attention import SpatialTransformer
19
-
20
-
21
- # dummy replace
22
- def convert_module_to_f16(x):
23
- pass
24
-
25
-
26
- def convert_module_to_f32(x):
27
- pass
28
-
29
-
30
- ## go
31
- class AttentionPool2d(nn.Module):
32
- """
33
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
34
- """
35
-
36
- def __init__(
37
- self,
38
- spacial_dim: int,
39
- embed_dim: int,
40
- num_heads_channels: int,
41
- output_dim: int = None,
42
- ):
43
- super().__init__()
44
- self.positional_embedding = nn.Parameter(
45
- th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5
46
- )
47
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
48
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
49
- self.num_heads = embed_dim // num_heads_channels
50
- self.attention = QKVAttention(self.num_heads)
51
-
52
- def forward(self, x):
53
- b, c, *_spatial = x.shape
54
- x = x.reshape(b, c, -1).contiguous() # NC(HW)
55
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
56
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
57
- x = self.qkv_proj(x)
58
- x = self.attention(x)
59
- x = self.c_proj(x)
60
- return x[:, :, 0]
61
-
62
-
63
- class TimestepBlock(nn.Module):
64
- """
65
- Any module where forward() takes timestep embeddings as a second argument.
66
- """
67
-
68
- @abstractmethod
69
- def forward(self, x, emb):
70
- """
71
- Apply the module to `x` given `emb` timestep embeddings.
72
- """
73
-
74
-
75
- class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
76
- """
77
- A sequential module that passes timestep embeddings to the children that
78
- support it as an extra input.
79
- """
80
-
81
- def forward(self, x, emb, context=None):
82
- for layer in self:
83
- if isinstance(layer, TimestepBlock):
84
- x = layer(x, emb)
85
- elif isinstance(layer, SpatialTransformer):
86
- x = layer(x, context)
87
- else:
88
- x = layer(x)
89
- return x
90
-
91
-
92
- class Upsample(nn.Module):
93
- """
94
- An upsampling layer with an optional convolution.
95
- :param channels: channels in the inputs and outputs.
96
- :param use_conv: a bool determining if a convolution is applied.
97
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
98
- upsampling occurs in the inner-two dimensions.
99
- """
100
-
101
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
102
- super().__init__()
103
- self.channels = channels
104
- self.out_channels = out_channels or channels
105
- self.use_conv = use_conv
106
- self.dims = dims
107
- if use_conv:
108
- self.conv = conv_nd(
109
- dims, self.channels, self.out_channels, 3, padding=padding
110
- )
111
-
112
- def forward(self, x):
113
- assert x.shape[1] == self.channels
114
- if self.dims == 3:
115
- x = F.interpolate(
116
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
117
- )
118
- else:
119
- x = F.interpolate(x, scale_factor=2, mode="nearest")
120
- if self.use_conv:
121
- x = self.conv(x)
122
- return x
123
-
124
-
125
- class TransposedUpsample(nn.Module):
126
- "Learned 2x upsampling without padding"
127
-
128
- def __init__(self, channels, out_channels=None, ks=5):
129
- super().__init__()
130
- self.channels = channels
131
- self.out_channels = out_channels or channels
132
-
133
- self.up = nn.ConvTranspose2d(
134
- self.channels, self.out_channels, kernel_size=ks, stride=2
135
- )
136
-
137
- def forward(self, x):
138
- return self.up(x)
139
-
140
-
141
- class Downsample(nn.Module):
142
- """
143
- A downsampling layer with an optional convolution.
144
- :param channels: channels in the inputs and outputs.
145
- :param use_conv: a bool determining if a convolution is applied.
146
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
147
- downsampling occurs in the inner-two dimensions.
148
- """
149
-
150
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
151
- super().__init__()
152
- self.channels = channels
153
- self.out_channels = out_channels or channels
154
- self.use_conv = use_conv
155
- self.dims = dims
156
- stride = 2 if dims != 3 else (1, 2, 2)
157
- if use_conv:
158
- self.op = conv_nd(
159
- dims,
160
- self.channels,
161
- self.out_channels,
162
- 3,
163
- stride=stride,
164
- padding=padding,
165
- )
166
- else:
167
- assert self.channels == self.out_channels
168
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
169
-
170
- def forward(self, x):
171
- assert x.shape[1] == self.channels
172
- return self.op(x)
173
-
174
-
175
- class ResBlock(TimestepBlock):
176
- """
177
- A residual block that can optionally change the number of channels.
178
- :param channels: the number of input channels.
179
- :param emb_channels: the number of timestep embedding channels.
180
- :param dropout: the rate of dropout.
181
- :param out_channels: if specified, the number of out channels.
182
- :param use_conv: if True and out_channels is specified, use a spatial
183
- convolution instead of a smaller 1x1 convolution to change the
184
- channels in the skip connection.
185
- :param dims: determines if the signal is 1D, 2D, or 3D.
186
- :param use_checkpoint: if True, use gradient checkpointing on this module.
187
- :param up: if True, use this block for upsampling.
188
- :param down: if True, use this block for downsampling.
189
- """
190
-
191
- def __init__(
192
- self,
193
- channels,
194
- emb_channels,
195
- dropout,
196
- out_channels=None,
197
- use_conv=False,
198
- use_scale_shift_norm=False,
199
- dims=2,
200
- use_checkpoint=False,
201
- up=False,
202
- down=False,
203
- ):
204
- super().__init__()
205
- self.channels = channels
206
- self.emb_channels = emb_channels
207
- self.dropout = dropout
208
- self.out_channels = out_channels or channels
209
- self.use_conv = use_conv
210
- self.use_checkpoint = use_checkpoint
211
- self.use_scale_shift_norm = use_scale_shift_norm
212
-
213
- self.in_layers = nn.Sequential(
214
- normalization(channels),
215
- nn.SiLU(),
216
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
217
- )
218
-
219
- self.updown = up or down
220
-
221
- if up:
222
- self.h_upd = Upsample(channels, False, dims)
223
- self.x_upd = Upsample(channels, False, dims)
224
- elif down:
225
- self.h_upd = Downsample(channels, False, dims)
226
- self.x_upd = Downsample(channels, False, dims)
227
- else:
228
- self.h_upd = self.x_upd = nn.Identity()
229
-
230
- self.emb_layers = nn.Sequential(
231
- nn.SiLU(),
232
- linear(
233
- emb_channels,
234
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
235
- ),
236
- )
237
- self.out_layers = nn.Sequential(
238
- normalization(self.out_channels),
239
- nn.SiLU(),
240
- nn.Dropout(p=dropout),
241
- zero_module(
242
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
243
- ),
244
- )
245
-
246
- if self.out_channels == channels:
247
- self.skip_connection = nn.Identity()
248
- elif use_conv:
249
- self.skip_connection = conv_nd(
250
- dims, channels, self.out_channels, 3, padding=1
251
- )
252
- else:
253
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
254
-
255
- def forward(self, x, emb):
256
- """
257
- Apply the block to a Tensor, conditioned on a timestep embedding.
258
- :param x: an [N x C x ...] Tensor of features.
259
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
260
- :return: an [N x C x ...] Tensor of outputs.
261
- """
262
- return checkpoint(
263
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
264
- )
265
-
266
- def _forward(self, x, emb):
267
- if self.updown:
268
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
269
- h = in_rest(x)
270
- h = self.h_upd(h)
271
- x = self.x_upd(x)
272
- h = in_conv(h)
273
- else:
274
- h = self.in_layers(x)
275
- emb_out = self.emb_layers(emb).type(h.dtype)
276
- while len(emb_out.shape) < len(h.shape):
277
- emb_out = emb_out[..., None]
278
- if self.use_scale_shift_norm:
279
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
280
- scale, shift = th.chunk(emb_out, 2, dim=1)
281
- h = out_norm(h) * (1 + scale) + shift
282
- h = out_rest(h)
283
- else:
284
- h = h + emb_out
285
- h = self.out_layers(h)
286
- return self.skip_connection(x) + h
287
-
288
-
289
- class AttentionBlock(nn.Module):
290
- """
291
- An attention block that allows spatial positions to attend to each other.
292
- Originally ported from here, but adapted to the N-d case.
293
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
294
- """
295
-
296
- def __init__(
297
- self,
298
- channels,
299
- num_heads=1,
300
- num_head_channels=-1,
301
- use_checkpoint=False,
302
- use_new_attention_order=False,
303
- ):
304
- super().__init__()
305
- self.channels = channels
306
- if num_head_channels == -1:
307
- self.num_heads = num_heads
308
- else:
309
- assert (
310
- channels % num_head_channels == 0
311
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
312
- self.num_heads = channels // num_head_channels
313
- self.use_checkpoint = use_checkpoint
314
- self.norm = normalization(channels)
315
- self.qkv = conv_nd(1, channels, channels * 3, 1)
316
- if use_new_attention_order:
317
- # split qkv before split heads
318
- self.attention = QKVAttention(self.num_heads)
319
- else:
320
- # split heads before split qkv
321
- self.attention = QKVAttentionLegacy(self.num_heads)
322
-
323
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
324
-
325
- def forward(self, x):
326
- return checkpoint(
327
- self._forward, (x,), self.parameters(), True
328
- ) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
329
- # return pt_checkpoint(self._forward, x) # pytorch
330
-
331
- def _forward(self, x):
332
- b, c, *spatial = x.shape
333
- x = x.reshape(b, c, -1).contiguous()
334
- qkv = self.qkv(self.norm(x)).contiguous()
335
- h = self.attention(qkv).contiguous()
336
- h = self.proj_out(h).contiguous()
337
- return (x + h).reshape(b, c, *spatial).contiguous()
338
-
339
-
340
- def count_flops_attn(model, _x, y):
341
- """
342
- A counter for the `thop` package to count the operations in an
343
- attention operation.
344
- Meant to be used like:
345
- macs, params = thop.profile(
346
- model,
347
- inputs=(inputs, timestamps),
348
- custom_ops={QKVAttention: QKVAttention.count_flops},
349
- )
350
- """
351
- b, c, *spatial = y[0].shape
352
- num_spatial = int(np.prod(spatial))
353
- # We perform two matmuls with the same number of ops.
354
- # The first computes the weight matrix, the second computes
355
- # the combination of the value vectors.
356
- matmul_ops = 2 * b * (num_spatial**2) * c
357
- model.total_ops += th.DoubleTensor([matmul_ops])
358
-
359
-
360
- class QKVAttentionLegacy(nn.Module):
361
- """
362
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
363
- """
364
-
365
- def __init__(self, n_heads):
366
- super().__init__()
367
- self.n_heads = n_heads
368
-
369
- def forward(self, qkv):
370
- """
371
- Apply QKV attention.
372
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
373
- :return: an [N x (H * C) x T] tensor after attention.
374
- """
375
- bs, width, length = qkv.shape
376
- assert width % (3 * self.n_heads) == 0
377
- ch = width // (3 * self.n_heads)
378
- q, k, v = (
379
- qkv.reshape(bs * self.n_heads, ch * 3, length).contiguous().split(ch, dim=1)
380
- )
381
- scale = 1 / math.sqrt(math.sqrt(ch))
382
- weight = th.einsum(
383
- "bct,bcs->bts", q * scale, k * scale
384
- ) # More stable with f16 than dividing afterwards
385
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
386
- a = th.einsum("bts,bcs->bct", weight, v)
387
- return a.reshape(bs, -1, length).contiguous()
388
-
389
- @staticmethod
390
- def count_flops(model, _x, y):
391
- return count_flops_attn(model, _x, y)
392
-
393
-
394
- class QKVAttention(nn.Module):
395
- """
396
- A module which performs QKV attention and splits in a different order.
397
- """
398
-
399
- def __init__(self, n_heads):
400
- super().__init__()
401
- self.n_heads = n_heads
402
-
403
- def forward(self, qkv):
404
- """
405
- Apply QKV attention.
406
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
407
- :return: an [N x (H * C) x T] tensor after attention.
408
- """
409
- bs, width, length = qkv.shape
410
- assert width % (3 * self.n_heads) == 0
411
- ch = width // (3 * self.n_heads)
412
- q, k, v = qkv.chunk(3, dim=1)
413
- scale = 1 / math.sqrt(math.sqrt(ch))
414
- weight = th.einsum(
415
- "bct,bcs->bts",
416
- (q * scale).view(bs * self.n_heads, ch, length),
417
- (k * scale).view(bs * self.n_heads, ch, length),
418
- ) # More stable with f16 than dividing afterwards
419
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
420
- a = th.einsum(
421
- "bts,bcs->bct",
422
- weight,
423
- v.reshape(bs * self.n_heads, ch, length).contiguous(),
424
- )
425
- return a.reshape(bs, -1, length).contiguous()
426
-
427
- @staticmethod
428
- def count_flops(model, _x, y):
429
- return count_flops_attn(model, _x, y)
430
-
431
-
432
- class UNetModel(nn.Module):
433
- """
434
- The full UNet model with attention and timestep embedding.
435
- :param in_channels: channels in the input Tensor.
436
- :param model_channels: base channel count for the model.
437
- :param out_channels: channels in the output Tensor.
438
- :param num_res_blocks: number of residual blocks per downsample.
439
- :param attention_resolutions: a collection of downsample rates at which
440
- attention will take place. May be a set, list, or tuple.
441
- For example, if this contains 4, then at 4x downsampling, attention
442
- will be used.
443
- :param dropout: the dropout probability.
444
- :param channel_mult: channel multiplier for each level of the UNet.
445
- :param conv_resample: if True, use learned convolutions for upsampling and
446
- downsampling.
447
- :param dims: determines if the signal is 1D, 2D, or 3D.
448
- :param num_classes: if specified (as an int), then this model will be
449
- class-conditional with `num_classes` classes.
450
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
451
- :param num_heads: the number of attention heads in each attention layer.
452
- :param num_heads_channels: if specified, ignore num_heads and instead use
453
- a fixed channel width per attention head.
454
- :param num_heads_upsample: works with num_heads to set a different number
455
- of heads for upsampling. Deprecated.
456
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
457
- :param resblock_updown: use residual blocks for up/downsampling.
458
- :param use_new_attention_order: use a different attention pattern for potentially
459
- increased efficiency.
460
- """
461
-
462
- def __init__(
463
- self,
464
- image_size,
465
- in_channels,
466
- model_channels,
467
- out_channels,
468
- num_res_blocks,
469
- attention_resolutions,
470
- dropout=0,
471
- channel_mult=(1, 2, 4, 8),
472
- conv_resample=True,
473
- dims=2,
474
- num_classes=None,
475
- extra_film_condition_dim=None,
476
- use_checkpoint=False,
477
- use_fp16=False,
478
- num_heads=-1,
479
- num_head_channels=-1,
480
- num_heads_upsample=-1,
481
- use_scale_shift_norm=False,
482
- extra_film_use_concat=False, # If true, concatenate extrafilm condition with time embedding, else addition
483
- resblock_updown=False,
484
- use_new_attention_order=False,
485
- use_spatial_transformer=False, # custom transformer support
486
- transformer_depth=1, # custom transformer support
487
- context_dim=None, # custom transformer support
488
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
489
- legacy=True,
490
- ):
491
- super().__init__()
492
- if num_heads_upsample == -1:
493
- num_heads_upsample = num_heads
494
-
495
- if num_heads == -1:
496
- assert (
497
- num_head_channels != -1
498
- ), "Either num_heads or num_head_channels has to be set"
499
-
500
- if num_head_channels == -1:
501
- assert (
502
- num_heads != -1
503
- ), "Either num_heads or num_head_channels has to be set"
504
-
505
- self.image_size = image_size
506
- self.in_channels = in_channels
507
- self.model_channels = model_channels
508
- self.out_channels = out_channels
509
- self.num_res_blocks = num_res_blocks
510
- self.attention_resolutions = attention_resolutions
511
- self.dropout = dropout
512
- self.channel_mult = channel_mult
513
- self.conv_resample = conv_resample
514
- self.num_classes = num_classes
515
- self.extra_film_condition_dim = extra_film_condition_dim
516
- self.use_checkpoint = use_checkpoint
517
- self.dtype = th.float16 if use_fp16 else th.float32
518
- self.num_heads = num_heads
519
- self.num_head_channels = num_head_channels
520
- self.num_heads_upsample = num_heads_upsample
521
- self.predict_codebook_ids = n_embed is not None
522
- self.extra_film_use_concat = extra_film_use_concat
523
- time_embed_dim = model_channels * 4
524
- self.time_embed = nn.Sequential(
525
- linear(model_channels, time_embed_dim),
526
- nn.SiLU(),
527
- linear(time_embed_dim, time_embed_dim),
528
- )
529
-
530
- assert not (
531
- self.num_classes is not None and self.extra_film_condition_dim is not None
532
- ), "As for the condition of theh UNet model, you can only set using class label or an extra embedding vector (such as from CLAP). You cannot set both num_classes and extra_film_condition_dim."
533
-
534
- if self.num_classes is not None:
535
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
536
-
537
- self.use_extra_film_by_concat = (
538
- self.extra_film_condition_dim is not None and self.extra_film_use_concat
539
- )
540
- self.use_extra_film_by_addition = (
541
- self.extra_film_condition_dim is not None and not self.extra_film_use_concat
542
- )
543
-
544
- if self.extra_film_condition_dim is not None:
545
- self.film_emb = nn.Linear(self.extra_film_condition_dim, time_embed_dim)
546
- # print("+ Use extra condition on UNet channel using Film. Extra condition dimension is %s. " % self.extra_film_condition_dim)
547
- # if(self.use_extra_film_by_concat):
548
- # print("\t By concatenation with time embedding")
549
- # elif(self.use_extra_film_by_concat):
550
- # print("\t By addition with time embedding")
551
-
552
- if use_spatial_transformer and (
553
- self.use_extra_film_by_concat or self.use_extra_film_by_addition
554
- ):
555
- # print("+ Spatial transformer will only be used as self-attention. Because you have choose to use film as your global condition.")
556
- spatial_transformer_no_context = True
557
- else:
558
- spatial_transformer_no_context = False
559
-
560
- if use_spatial_transformer and not spatial_transformer_no_context:
561
- assert (
562
- context_dim is not None
563
- ), "Fool!! You forgot to include the dimension of your cross-attention conditioning..."
564
-
565
- if context_dim is not None and not spatial_transformer_no_context:
566
- assert (
567
- use_spatial_transformer
568
- ), "Fool!! You forgot to use the spatial transformer for your cross-attention conditioning..."
569
- from omegaconf.listconfig import ListConfig
570
-
571
- if type(context_dim) == ListConfig:
572
- context_dim = list(context_dim)
573
-
574
- self.input_blocks = nn.ModuleList(
575
- [
576
- TimestepEmbedSequential(
577
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
578
- )
579
- ]
580
- )
581
- self._feature_size = model_channels
582
- input_block_chans = [model_channels]
583
- ch = model_channels
584
- ds = 1
585
- for level, mult in enumerate(channel_mult):
586
- for _ in range(num_res_blocks):
587
- layers = [
588
- ResBlock(
589
- ch,
590
- time_embed_dim
591
- if (not self.use_extra_film_by_concat)
592
- else time_embed_dim * 2,
593
- dropout,
594
- out_channels=mult * model_channels,
595
- dims=dims,
596
- use_checkpoint=use_checkpoint,
597
- use_scale_shift_norm=use_scale_shift_norm,
598
- )
599
- ]
600
- ch = mult * model_channels
601
- if ds in attention_resolutions:
602
- if num_head_channels == -1:
603
- dim_head = ch // num_heads
604
- else:
605
- num_heads = ch // num_head_channels
606
- dim_head = num_head_channels
607
- if legacy:
608
- dim_head = (
609
- ch // num_heads
610
- if use_spatial_transformer
611
- else num_head_channels
612
- )
613
- layers.append(
614
- AttentionBlock(
615
- ch,
616
- use_checkpoint=use_checkpoint,
617
- num_heads=num_heads,
618
- num_head_channels=dim_head,
619
- use_new_attention_order=use_new_attention_order,
620
- )
621
- if not use_spatial_transformer
622
- else SpatialTransformer(
623
- ch,
624
- num_heads,
625
- dim_head,
626
- depth=transformer_depth,
627
- context_dim=context_dim,
628
- no_context=spatial_transformer_no_context,
629
- )
630
- )
631
- self.input_blocks.append(TimestepEmbedSequential(*layers))
632
- self._feature_size += ch
633
- input_block_chans.append(ch)
634
- if level != len(channel_mult) - 1:
635
- out_ch = ch
636
- self.input_blocks.append(
637
- TimestepEmbedSequential(
638
- ResBlock(
639
- ch,
640
- time_embed_dim
641
- if (not self.use_extra_film_by_concat)
642
- else time_embed_dim * 2,
643
- dropout,
644
- out_channels=out_ch,
645
- dims=dims,
646
- use_checkpoint=use_checkpoint,
647
- use_scale_shift_norm=use_scale_shift_norm,
648
- down=True,
649
- )
650
- if resblock_updown
651
- else Downsample(
652
- ch, conv_resample, dims=dims, out_channels=out_ch
653
- )
654
- )
655
- )
656
- ch = out_ch
657
- input_block_chans.append(ch)
658
- ds *= 2
659
- self._feature_size += ch
660
-
661
- if num_head_channels == -1:
662
- dim_head = ch // num_heads
663
- else:
664
- num_heads = ch // num_head_channels
665
- dim_head = num_head_channels
666
- if legacy:
667
- # num_heads = 1
668
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
669
- self.middle_block = TimestepEmbedSequential(
670
- ResBlock(
671
- ch,
672
- time_embed_dim
673
- if (not self.use_extra_film_by_concat)
674
- else time_embed_dim * 2,
675
- dropout,
676
- dims=dims,
677
- use_checkpoint=use_checkpoint,
678
- use_scale_shift_norm=use_scale_shift_norm,
679
- ),
680
- AttentionBlock(
681
- ch,
682
- use_checkpoint=use_checkpoint,
683
- num_heads=num_heads,
684
- num_head_channels=dim_head,
685
- use_new_attention_order=use_new_attention_order,
686
- )
687
- if not use_spatial_transformer
688
- else SpatialTransformer(
689
- ch,
690
- num_heads,
691
- dim_head,
692
- depth=transformer_depth,
693
- context_dim=context_dim,
694
- no_context=spatial_transformer_no_context,
695
- ),
696
- ResBlock(
697
- ch,
698
- time_embed_dim
699
- if (not self.use_extra_film_by_concat)
700
- else time_embed_dim * 2,
701
- dropout,
702
- dims=dims,
703
- use_checkpoint=use_checkpoint,
704
- use_scale_shift_norm=use_scale_shift_norm,
705
- ),
706
- )
707
- self._feature_size += ch
708
-
709
- self.output_blocks = nn.ModuleList([])
710
- for level, mult in list(enumerate(channel_mult))[::-1]:
711
- for i in range(num_res_blocks + 1):
712
- ich = input_block_chans.pop()
713
- layers = [
714
- ResBlock(
715
- ch + ich,
716
- time_embed_dim
717
- if (not self.use_extra_film_by_concat)
718
- else time_embed_dim * 2,
719
- dropout,
720
- out_channels=model_channels * mult,
721
- dims=dims,
722
- use_checkpoint=use_checkpoint,
723
- use_scale_shift_norm=use_scale_shift_norm,
724
- )
725
- ]
726
- ch = model_channels * mult
727
- if ds in attention_resolutions:
728
- if num_head_channels == -1:
729
- dim_head = ch // num_heads
730
- else:
731
- num_heads = ch // num_head_channels
732
- dim_head = num_head_channels
733
- if legacy:
734
- # num_heads = 1
735
- dim_head = (
736
- ch // num_heads
737
- if use_spatial_transformer
738
- else num_head_channels
739
- )
740
- layers.append(
741
- AttentionBlock(
742
- ch,
743
- use_checkpoint=use_checkpoint,
744
- num_heads=num_heads_upsample,
745
- num_head_channels=dim_head,
746
- use_new_attention_order=use_new_attention_order,
747
- )
748
- if not use_spatial_transformer
749
- else SpatialTransformer(
750
- ch,
751
- num_heads,
752
- dim_head,
753
- depth=transformer_depth,
754
- context_dim=context_dim,
755
- no_context=spatial_transformer_no_context,
756
- )
757
- )
758
- if level and i == num_res_blocks:
759
- out_ch = ch
760
- layers.append(
761
- ResBlock(
762
- ch,
763
- time_embed_dim
764
- if (not self.use_extra_film_by_concat)
765
- else time_embed_dim * 2,
766
- dropout,
767
- out_channels=out_ch,
768
- dims=dims,
769
- use_checkpoint=use_checkpoint,
770
- use_scale_shift_norm=use_scale_shift_norm,
771
- up=True,
772
- )
773
- if resblock_updown
774
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
775
- )
776
- ds //= 2
777
- self.output_blocks.append(TimestepEmbedSequential(*layers))
778
- self._feature_size += ch
779
-
780
- self.out = nn.Sequential(
781
- normalization(ch),
782
- nn.SiLU(),
783
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
784
- )
785
- if self.predict_codebook_ids:
786
- self.id_predictor = nn.Sequential(
787
- normalization(ch),
788
- conv_nd(dims, model_channels, n_embed, 1),
789
- # nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
790
- )
791
-
792
- self.shape_reported = False
793
-
794
- def convert_to_fp16(self):
795
- """
796
- Convert the torso of the model to float16.
797
- """
798
- self.input_blocks.apply(convert_module_to_f16)
799
- self.middle_block.apply(convert_module_to_f16)
800
- self.output_blocks.apply(convert_module_to_f16)
801
-
802
- def convert_to_fp32(self):
803
- """
804
- Convert the torso of the model to float32.
805
- """
806
- self.input_blocks.apply(convert_module_to_f32)
807
- self.middle_block.apply(convert_module_to_f32)
808
- self.output_blocks.apply(convert_module_to_f32)
809
-
810
- def forward(self, x, timesteps=None, context=None, y=None, **kwargs):
811
- """
812
- Apply the model to an input batch.
813
- :param x: an [N x C x ...] Tensor of inputs.
814
- :param timesteps: a 1-D batch of timesteps.
815
- :param context: conditioning plugged in via crossattn
816
- :param y: an [N] Tensor of labels, if class-conditional. an [N, extra_film_condition_dim] Tensor if film-embed conditional
817
- :return: an [N x C x ...] Tensor of outputs.
818
- """
819
- if not self.shape_reported:
820
- # print("The shape of UNet input is", x.size())
821
- self.shape_reported = True
822
-
823
- assert (y is not None) == (
824
- self.num_classes is not None or self.extra_film_condition_dim is not None
825
- ), "must specify y if and only if the model is class-conditional or film embedding conditional"
826
- hs = []
827
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
828
- emb = self.time_embed(t_emb)
829
-
830
- if self.num_classes is not None:
831
- assert y.shape == (x.shape[0],)
832
- emb = emb + self.label_emb(y)
833
-
834
- if self.use_extra_film_by_addition:
835
- emb = emb + self.film_emb(y)
836
- elif self.use_extra_film_by_concat:
837
- emb = th.cat([emb, self.film_emb(y)], dim=-1)
838
-
839
- h = x.type(self.dtype)
840
- for module in self.input_blocks:
841
- h = module(h, emb, context)
842
- hs.append(h)
843
- h = self.middle_block(h, emb, context)
844
- for module in self.output_blocks:
845
- h = th.cat([h, hs.pop()], dim=1)
846
- h = module(h, emb, context)
847
- h = h.type(x.dtype)
848
- if self.predict_codebook_ids:
849
- return self.id_predictor(h)
850
- else:
851
- return self.out(h)
852
-
853
-
854
- class EncoderUNetModel(nn.Module):
855
- """
856
- The half UNet model with attention and timestep embedding.
857
- For usage, see UNet.
858
- """
859
-
860
- def __init__(
861
- self,
862
- image_size,
863
- in_channels,
864
- model_channels,
865
- out_channels,
866
- num_res_blocks,
867
- attention_resolutions,
868
- dropout=0,
869
- channel_mult=(1, 2, 4, 8),
870
- conv_resample=True,
871
- dims=2,
872
- use_checkpoint=False,
873
- use_fp16=False,
874
- num_heads=1,
875
- num_head_channels=-1,
876
- num_heads_upsample=-1,
877
- use_scale_shift_norm=False,
878
- resblock_updown=False,
879
- use_new_attention_order=False,
880
- pool="adaptive",
881
- *args,
882
- **kwargs,
883
- ):
884
- super().__init__()
885
-
886
- if num_heads_upsample == -1:
887
- num_heads_upsample = num_heads
888
-
889
- self.in_channels = in_channels
890
- self.model_channels = model_channels
891
- self.out_channels = out_channels
892
- self.num_res_blocks = num_res_blocks
893
- self.attention_resolutions = attention_resolutions
894
- self.dropout = dropout
895
- self.channel_mult = channel_mult
896
- self.conv_resample = conv_resample
897
- self.use_checkpoint = use_checkpoint
898
- self.dtype = th.float16 if use_fp16 else th.float32
899
- self.num_heads = num_heads
900
- self.num_head_channels = num_head_channels
901
- self.num_heads_upsample = num_heads_upsample
902
-
903
- time_embed_dim = model_channels * 4
904
- self.time_embed = nn.Sequential(
905
- linear(model_channels, time_embed_dim),
906
- nn.SiLU(),
907
- linear(time_embed_dim, time_embed_dim),
908
- )
909
-
910
- self.input_blocks = nn.ModuleList(
911
- [
912
- TimestepEmbedSequential(
913
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
914
- )
915
- ]
916
- )
917
- self._feature_size = model_channels
918
- input_block_chans = [model_channels]
919
- ch = model_channels
920
- ds = 1
921
- for level, mult in enumerate(channel_mult):
922
- for _ in range(num_res_blocks):
923
- layers = [
924
- ResBlock(
925
- ch,
926
- time_embed_dim,
927
- dropout,
928
- out_channels=mult * model_channels,
929
- dims=dims,
930
- use_checkpoint=use_checkpoint,
931
- use_scale_shift_norm=use_scale_shift_norm,
932
- )
933
- ]
934
- ch = mult * model_channels
935
- if ds in attention_resolutions:
936
- layers.append(
937
- AttentionBlock(
938
- ch,
939
- use_checkpoint=use_checkpoint,
940
- num_heads=num_heads,
941
- num_head_channels=num_head_channels,
942
- use_new_attention_order=use_new_attention_order,
943
- )
944
- )
945
- self.input_blocks.append(TimestepEmbedSequential(*layers))
946
- self._feature_size += ch
947
- input_block_chans.append(ch)
948
- if level != len(channel_mult) - 1:
949
- out_ch = ch
950
- self.input_blocks.append(
951
- TimestepEmbedSequential(
952
- ResBlock(
953
- ch,
954
- time_embed_dim,
955
- dropout,
956
- out_channels=out_ch,
957
- dims=dims,
958
- use_checkpoint=use_checkpoint,
959
- use_scale_shift_norm=use_scale_shift_norm,
960
- down=True,
961
- )
962
- if resblock_updown
963
- else Downsample(
964
- ch, conv_resample, dims=dims, out_channels=out_ch
965
- )
966
- )
967
- )
968
- ch = out_ch
969
- input_block_chans.append(ch)
970
- ds *= 2
971
- self._feature_size += ch
972
-
973
- self.middle_block = TimestepEmbedSequential(
974
- ResBlock(
975
- ch,
976
- time_embed_dim,
977
- dropout,
978
- dims=dims,
979
- use_checkpoint=use_checkpoint,
980
- use_scale_shift_norm=use_scale_shift_norm,
981
- ),
982
- AttentionBlock(
983
- ch,
984
- use_checkpoint=use_checkpoint,
985
- num_heads=num_heads,
986
- num_head_channels=num_head_channels,
987
- use_new_attention_order=use_new_attention_order,
988
- ),
989
- ResBlock(
990
- ch,
991
- time_embed_dim,
992
- dropout,
993
- dims=dims,
994
- use_checkpoint=use_checkpoint,
995
- use_scale_shift_norm=use_scale_shift_norm,
996
- ),
997
- )
998
- self._feature_size += ch
999
- self.pool = pool
1000
- if pool == "adaptive":
1001
- self.out = nn.Sequential(
1002
- normalization(ch),
1003
- nn.SiLU(),
1004
- nn.AdaptiveAvgPool2d((1, 1)),
1005
- zero_module(conv_nd(dims, ch, out_channels, 1)),
1006
- nn.Flatten(),
1007
- )
1008
- elif pool == "attention":
1009
- assert num_head_channels != -1
1010
- self.out = nn.Sequential(
1011
- normalization(ch),
1012
- nn.SiLU(),
1013
- AttentionPool2d(
1014
- (image_size // ds), ch, num_head_channels, out_channels
1015
- ),
1016
- )
1017
- elif pool == "spatial":
1018
- self.out = nn.Sequential(
1019
- nn.Linear(self._feature_size, 2048),
1020
- nn.ReLU(),
1021
- nn.Linear(2048, self.out_channels),
1022
- )
1023
- elif pool == "spatial_v2":
1024
- self.out = nn.Sequential(
1025
- nn.Linear(self._feature_size, 2048),
1026
- normalization(2048),
1027
- nn.SiLU(),
1028
- nn.Linear(2048, self.out_channels),
1029
- )
1030
- else:
1031
- raise NotImplementedError(f"Unexpected {pool} pooling")
1032
-
1033
- def convert_to_fp16(self):
1034
- """
1035
- Convert the torso of the model to float16.
1036
- """
1037
- self.input_blocks.apply(convert_module_to_f16)
1038
- self.middle_block.apply(convert_module_to_f16)
1039
-
1040
- def convert_to_fp32(self):
1041
- """
1042
- Convert the torso of the model to float32.
1043
- """
1044
- self.input_blocks.apply(convert_module_to_f32)
1045
- self.middle_block.apply(convert_module_to_f32)
1046
-
1047
- def forward(self, x, timesteps):
1048
- """
1049
- Apply the model to an input batch.
1050
- :param x: an [N x C x ...] Tensor of inputs.
1051
- :param timesteps: a 1-D batch of timesteps.
1052
- :return: an [N x K] Tensor of outputs.
1053
- """
1054
- emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
1055
-
1056
- results = []
1057
- h = x.type(self.dtype)
1058
- for module in self.input_blocks:
1059
- h = module(h, emb)
1060
- if self.pool.startswith("spatial"):
1061
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
1062
- h = self.middle_block(h, emb)
1063
- if self.pool.startswith("spatial"):
1064
- results.append(h.type(x.dtype).mean(dim=(2, 3)))
1065
- h = th.cat(results, axis=-1)
1066
- return self.out(h)
1067
- else:
1068
- h = h.type(x.dtype)
1069
- return self.out(h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/loss.py DELETED
@@ -1,307 +0,0 @@
1
- from multiprocessing.sharedctypes import Value
2
- import torch
3
- import torch.distributed.nn
4
- from torch import distributed as dist, nn as nn
5
- from torch.nn import functional as F
6
- import numpy as np
7
- from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
8
-
9
- try:
10
- import horovod.torch as hvd
11
- except ImportError:
12
- hvd = None
13
-
14
-
15
- def gather_features(
16
- audio_features,
17
- text_features,
18
- audio_features_mlp=None,
19
- text_features_mlp=None,
20
- local_loss=False,
21
- gather_with_grad=False,
22
- rank=0,
23
- world_size=1,
24
- use_horovod=False,
25
- mlp_loss=False
26
- ):
27
- if use_horovod:
28
- assert hvd is not None, 'Please install horovod'
29
- if gather_with_grad:
30
- all_audio_features = hvd.allgather(audio_features)
31
- all_text_features = hvd.allgather(text_features)
32
- if mlp_loss:
33
- all_audio_features_mlp = hvd.allgather(audio_features_mlp)
34
- all_text_features_mlp = hvd.allgather(text_features_mlp)
35
- else:
36
- with torch.no_grad():
37
- all_audio_features = hvd.allgather(audio_features)
38
- all_text_features = hvd.allgather(text_features)
39
- if mlp_loss:
40
- all_audio_features_mlp = hvd.allgather(audio_features_mlp)
41
- all_text_features_mlp = hvd.allgather(text_features_mlp)
42
- if not local_loss:
43
- # ensure grads for local rank when all_* features don't have a gradient
44
- gathered_audio_features = list(all_audio_features.chunk(world_size, dim=0))
45
- gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
46
- gathered_audio_features[rank] = audio_features
47
- gathered_text_features[rank] = text_features
48
- all_audio_features = torch.cat(gathered_audio_features, dim=0)
49
- all_text_features = torch.cat(gathered_text_features, dim=0)
50
- if mlp_loss:
51
- gathered_audio_features_mlp = list(all_audio_features_mlp.chunk(world_size, dim=0))
52
- gathered_text_features_mlp = list(all_text_features_mlp.chunk(world_size, dim=0))
53
- gathered_audio_features_mlp[rank] = audio_features_mlp
54
- gathered_text_features_mlp[rank] = text_features_mlp
55
- all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
56
- all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
57
- else:
58
- # We gather tensors from all gpus
59
- if gather_with_grad:
60
- all_audio_features = torch.cat(torch.distributed.nn.all_gather(audio_features), dim=0)
61
- all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
62
- if mlp_loss:
63
- all_audio_features_mlp = torch.cat(torch.distributed.nn.all_gather(audio_features_mlp), dim=0)
64
- all_text_features_mlp = torch.cat(torch.distributed.nn.all_gather(text_features_mlp), dim=0)
65
- else:
66
- gathered_audio_features = [torch.zeros_like(audio_features) for _ in range(world_size)]
67
- gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
68
- dist.all_gather(gathered_audio_features, audio_features)
69
- dist.all_gather(gathered_text_features, text_features)
70
- if mlp_loss:
71
- gathered_audio_features_mlp = [torch.zeros_like(audio_features_mlp) for _ in range(world_size)]
72
- gathered_text_features_mlp = [torch.zeros_like(text_features_mlp) for _ in range(world_size)]
73
- dist.all_gather(gathered_audio_features_mlp, audio_features_mlp)
74
- dist.all_gather(gathered_text_features_mlp, text_features_mlp)
75
- if not local_loss:
76
- # ensure grads for local rank when all_* features don't have a gradient
77
- gathered_audio_features[rank] = audio_features
78
- gathered_text_features[rank] = text_features
79
- if mlp_loss:
80
- gathered_audio_features_mlp[rank] = audio_features_mlp
81
- gathered_text_features_mlp[rank] = text_features_mlp
82
-
83
- all_audio_features = torch.cat(gathered_audio_features, dim=0)
84
- all_text_features = torch.cat(gathered_text_features, dim=0)
85
- if mlp_loss:
86
- all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
87
- all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
88
- if mlp_loss:
89
- return all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp
90
- else:
91
- return all_audio_features, all_text_features
92
-
93
- class ClipLoss(nn.Module):
94
-
95
- def __init__(
96
- self,
97
- local_loss=False,
98
- gather_with_grad=False,
99
- cache_labels=False,
100
- rank=0,
101
- world_size=1,
102
- use_horovod=False,
103
- mlp_loss=False,
104
- weight_loss_kappa=0,
105
- ):
106
- super().__init__()
107
- self.local_loss = local_loss
108
- self.gather_with_grad = gather_with_grad
109
- self.cache_labels = cache_labels
110
- self.rank = rank
111
- self.world_size = world_size
112
- self.use_horovod = use_horovod
113
- self.mlp_loss = mlp_loss
114
- self.weighted_loss = bool(weight_loss_kappa!=0)
115
- self.weight_loss_kappa = weight_loss_kappa
116
- # cache state
117
- self.prev_num_logits = 0
118
- self.labels = {}
119
-
120
- def forward(self, audio_features, text_features, logit_scale_a, logit_scale_t=None, audio_features_mlp=None, text_features_mlp=None):
121
- device = audio_features.device
122
- if self.mlp_loss:
123
- if self.world_size > 1:
124
- all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp = gather_features(
125
- audio_features=audio_features,text_features=text_features,
126
- audio_features_mlp=audio_features_mlp,text_features_mlp=text_features_mlp,
127
- local_loss=self.local_loss,gather_with_grad=self.gather_with_grad,
128
- rank=self.rank,world_size=self.world_size,use_horovod=self.use_horovod,
129
- mlp_loss=self.mlp_loss
130
- )
131
- if self.local_loss:
132
- a_logits_per_audio = logit_scale_a * audio_features @ all_text_features_mlp.T
133
- a_logits_per_text = logit_scale_a * text_features_mlp @ all_audio_features.T
134
- t_logits_per_audio = logit_scale_t * audio_features_mlp @ all_text_features.T
135
- t_logits_per_text = logit_scale_t * text_features @ all_audio_features_mlp.T
136
- else:
137
- a_logits_per_audio = logit_scale_a * all_audio_features @ all_text_features_mlp.T
138
- a_logits_per_text = a_logits_per_audio.T
139
- t_logits_per_audio = logit_scale_t * all_audio_features_mlp @ all_text_features.T
140
- t_logits_per_text = t_logits_per_audio.T
141
- else:
142
- a_logits_per_audio = logit_scale_a * audio_features @ text_features_mlp.T
143
- a_logits_per_text = logit_scale_a * text_features_mlp @ audio_features.T
144
- t_logits_per_audio = logit_scale_t * audio_features_mlp @ text_features.T
145
- t_logits_per_text = logit_scale_t * text_features @ audio_features_mlp.T
146
-
147
- # calculated ground-truth and cache if enabled
148
- num_logits = a_logits_per_audio.shape[0]
149
- if self.prev_num_logits != num_logits or device not in self.labels:
150
- labels = torch.arange(num_logits, device=device, dtype=torch.long)
151
- if self.world_size > 1 and self.local_loss:
152
- labels = labels + num_logits * self.rank
153
- if self.cache_labels:
154
- self.labels[device] = labels
155
- self.prev_num_logits = num_logits
156
- else:
157
- labels = self.labels[device]
158
-
159
- if not self.weighted_loss:
160
- total_loss = (
161
- F.cross_entropy(a_logits_per_audio, labels) +
162
- F.cross_entropy(a_logits_per_text, labels) +
163
- F.cross_entropy(t_logits_per_audio, labels) +
164
- F.cross_entropy(t_logits_per_text, labels)
165
- ) / 4
166
- else:
167
- audio_weight = (audio_features@audio_features.T).detach()
168
- audio_weight = (torch.exp(torch.sum(audio_weight, axis=1)/(self.weight_loss_kappa*len(audio_weight)))).detach()
169
- text_weight = (text_features@text_features.T).detach()
170
- text_weight = (torch.exp(torch.sum(text_weight, axis=1)/(self.weight_loss_kappa*len(text_features)))).detach()
171
- total_loss = (
172
- F.cross_entropy(a_logits_per_audio, labels, weight=audio_weight) +
173
- F.cross_entropy(a_logits_per_text, labels, weight=audio_weight) +
174
- F.cross_entropy(t_logits_per_audio, labels, weight=text_weight) +
175
- F.cross_entropy(t_logits_per_text, labels, weight=text_weight)
176
- ) / 4
177
- else:
178
- if self.world_size > 1:
179
- all_audio_features, all_text_features = gather_features(
180
- audio_features=audio_features,text_features=text_features,
181
- local_loss=self.local_loss,gather_with_grad=self.gather_with_grad,
182
- rank=self.rank,world_size=self.world_size,use_horovod=self.use_horovod,
183
- mlp_loss=self.mlp_loss
184
- )
185
-
186
- if self.local_loss:
187
- logits_per_audio = logit_scale_a * audio_features @ all_text_features.T
188
- logits_per_text = logit_scale_a * text_features @ all_audio_features.T
189
- else:
190
- logits_per_audio = logit_scale_a * all_audio_features @ all_text_features.T
191
- logits_per_text = logits_per_audio.T
192
- else:
193
- logits_per_audio = logit_scale_a * audio_features @ text_features.T
194
- logits_per_text = logit_scale_a * text_features @ audio_features.T
195
-
196
- # calculated ground-truth and cache if enabled
197
- num_logits = logits_per_audio.shape[0]
198
- if self.prev_num_logits != num_logits or device not in self.labels:
199
- labels = torch.arange(num_logits, device=device, dtype=torch.long)
200
- if self.world_size > 1 and self.local_loss:
201
- labels = labels + num_logits * self.rank
202
- if self.cache_labels:
203
- self.labels[device] = labels
204
- self.prev_num_logits = num_logits
205
- else:
206
- labels = self.labels[device]
207
- if not self.weighted_loss:
208
- total_loss = (
209
- F.cross_entropy(logits_per_audio, labels) +
210
- F.cross_entropy(logits_per_text, labels)
211
- ) / 2
212
- else:
213
- audio_weight = (all_audio_features@all_audio_features.T).detach()
214
- audio_weight = (torch.exp(torch.sum(audio_weight, axis=1)/(self.weight_loss_kappa*len(all_audio_features)))).detach()
215
- text_weight = (all_text_features@all_text_features.T).detach()
216
- text_weight = (torch.exp(torch.sum(text_weight, axis=1)/(self.weight_loss_kappa*len(all_text_features)))).detach()
217
- total_loss = (
218
- F.cross_entropy(logits_per_audio, labels, weight=text_weight) +
219
- F.cross_entropy(logits_per_text, labels, weight=audio_weight)
220
- ) / 2
221
- return total_loss
222
-
223
- def lp_gather_features(
224
- pred,
225
- target,
226
- world_size=1,
227
- use_horovod=False
228
- ):
229
- if use_horovod:
230
- assert hvd is not None, 'Please install horovod'
231
- with torch.no_grad():
232
- all_preds = hvd.allgather(pred)
233
- all_targets = hvd.allgath(target)
234
- else:
235
- gathered_preds = [torch.zeros_like(pred) for _ in range(world_size)]
236
- gathered_targets = [torch.zeros_like(target) for _ in range(world_size)]
237
-
238
- dist.all_gather(gathered_preds, pred)
239
- dist.all_gather(gathered_targets, target)
240
- all_preds = torch.cat(gathered_preds, dim=0)
241
- all_targets = torch.cat(gathered_targets, dim=0)
242
-
243
- return all_preds, all_targets
244
-
245
-
246
- def get_map(pred, target):
247
- pred = torch.sigmoid(pred).numpy()
248
- target = target.numpy()
249
- return np.mean(average_precision_score(target, pred, average=None))
250
-
251
- def get_acc(pred, target):
252
- pred = torch.argmax(pred,1).numpy()
253
- target = torch.argmax(target,1).numpy()
254
- return accuracy_score(target, pred)
255
-
256
- def get_mauc(pred, target):
257
- pred = torch.sigmoid(pred).numpy()
258
- target = target.numpy()
259
- return np.mean(roc_auc_score(target, pred, average=None))
260
-
261
-
262
- class LPMetrics(object):
263
- def __init__(self, metric_names = ['map','acc','mauc']):
264
- self.metrics = []
265
- for name in metric_names:
266
- self.metrics.append(self.get_metric(name))
267
- self.metric_names = metric_names
268
-
269
- def get_metric(self,name):
270
- if name == 'map':
271
- return get_map
272
- elif name == 'acc':
273
- return get_acc
274
- elif name == 'mauc':
275
- return get_mauc
276
- else:
277
- raise ValueError(f'the metric should be at least one of [map, acc, mauc]')
278
-
279
- def evaluate_mertics(self, pred, target):
280
- metric_dict = {}
281
- for i in range(len(self.metric_names)):
282
- metric_dict[self.metric_names[i]] = self.metrics[i](pred, target)
283
- return metric_dict
284
-
285
-
286
- def calc_celoss(pred, target):
287
- target = torch.argmax(target, 1).long()
288
- return nn.CrossEntropyLoss()(pred, target)
289
-
290
-
291
- class LPLoss(nn.Module):
292
-
293
- def __init__(self, loss_name):
294
- super().__init__()
295
- if loss_name == 'bce':
296
- self.loss_func = nn.BCEWithLogitsLoss()
297
- elif loss_name == 'ce':
298
- self.loss_func = calc_celoss
299
- elif loss_name == 'mse':
300
- self.loss_func = nn.MSELoss()
301
- else:
302
- raise ValueError(f'the loss func should be at least one of [bce, ce, mse]')
303
-
304
- def forward(self, pred, target):
305
- loss = self.loss_func(pred, target)
306
- return loss
307
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Providers/ChatFree.py DELETED
@@ -1,48 +0,0 @@
1
- import os, requests
2
- from ...typing import sha256, Dict, get_type_hints
3
- import json
4
-
5
- url = "https://v.chatfree.cc"
6
- model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k']
7
- supports_stream = False
8
- needs_auth = False
9
-
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
12
- headers = {
13
- 'authority': 'chat.dfehub.com',
14
- 'accept': '*/*',
15
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
16
- 'content-type': 'application/json',
17
- 'origin': 'https://v.chatfree.cc',
18
- 'referer': 'https://v.chatfree.cc/',
19
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
20
- 'sec-ch-ua-mobile': '?0',
21
- 'sec-ch-ua-platform': '"macOS"',
22
- 'sec-fetch-dest': 'empty',
23
- 'sec-fetch-mode': 'cors',
24
- 'sec-fetch-site': 'same-origin',
25
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
26
- 'x-requested-with': 'XMLHttpRequest',
27
- }
28
-
29
- json_data = {
30
- 'messages': messages,
31
- 'stream': True,
32
- 'model': model,
33
- 'temperature': 0.5,
34
- 'presence_penalty': 0,
35
- 'frequency_penalty': 0,
36
- 'top_p': 1,
37
- }
38
-
39
- response = requests.post('https://v.chatfree.cc/api/openai/v1/chat/completions',
40
- headers=headers, json=json_data)
41
-
42
- for chunk in response.iter_lines():
43
- if b'content' in chunk:
44
- data = json.loads(chunk.decode().split('data: ')[1])
45
- yield (data['choices'][0]['delta']['content'])
46
-
47
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
48
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/inference.py DELETED
@@ -1,226 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
-
5
- Usage - sources:
6
- $ python detect.py --weights yolov5s.pt --source 0 # webcam
7
- img.jpg # image
8
- vid.mp4 # video
9
- screen # screenshot
10
- path/ # directory
11
- list.txt # list of images
12
- list.streams # list of streams
13
- 'path/*.jpg' # glob
14
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
15
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
-
17
- Usage - formats:
18
- $ python detect.py --weights yolov5s.pt # PyTorch
19
- yolov5s.torchscript # TorchScript
20
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
21
- yolov5s_openvino_model # OpenVINO
22
- yolov5s.engine # TensorRT
23
- yolov5s.mlmodel # CoreML (macOS-only)
24
- yolov5s_saved_model # TensorFlow SavedModel
25
- yolov5s.pb # TensorFlow GraphDef
26
- yolov5s.tflite # TensorFlow Lite
27
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
28
- yolov5s_paddle_model # PaddlePaddle
29
- """
30
-
31
- import argparse
32
- import os
33
- import platform
34
- import sys
35
- from pathlib import Path
36
-
37
- import torch
38
-
39
- FILE = Path(__file__).resolve()
40
- ROOT = FILE.parents[0] # YOLOv5 root directory
41
- if str(ROOT) not in sys.path:
42
- sys.path.append(str(ROOT)) # add ROOT to PATH
43
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
44
-
45
- from models.common import DetectMultiBackend
46
- from utils.dataloaders import (
47
- IMG_FORMATS,
48
- VID_FORMATS,
49
- LoadImages,
50
- LoadScreenshots,
51
- LoadStreams,
52
- )
53
- from utils.general import (
54
- LOGGER,
55
- Profile,
56
- check_file,
57
- check_img_size,
58
- check_imshow,
59
- check_requirements,
60
- colorstr,
61
- cv2,
62
- increment_path,
63
- non_max_suppression,
64
- print_args,
65
- scale_boxes,
66
- strip_optimizer,
67
- xyxy2xywh,
68
- )
69
- from utils.plots import Annotator, colors, save_one_box
70
- from utils.torch_utils import select_device, smart_inference_mode
71
-
72
-
73
- @smart_inference_mode()
74
- def run(
75
- weights=ROOT / "yolov5s.pt", # model path or triton URL
76
- source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam)
77
- data=ROOT / "data/coco128.yaml", # dataset.yaml path
78
- imgsz=(640, 640), # inference size (height, width)
79
- conf_thres=0.25, # confidence threshold
80
- iou_thres=0.45, # NMS IOU threshold
81
- max_det=1000, # maximum detections per image
82
- device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
83
- view_img=False, # show results
84
- save_txt=False, # save results to *.txt
85
- save_conf=False, # save confidences in --save-txt labels
86
- save_crop=False, # save cropped prediction boxes
87
- nosave=False, # do not save images/videos
88
- classes=None, # filter by class: --class 0, or --class 0 2 3
89
- agnostic_nms=False, # class-agnostic NMS
90
- augment=False, # augmented inference
91
- visualize=False, # visualize features
92
- update=False, # update all models
93
- project=ROOT / "runs/detect", # save results to project/name
94
- name="exp", # save results to project/name
95
- exist_ok=False, # existing project/name ok, do not increment
96
- line_thickness=3, # bounding box thickness (pixels)
97
- hide_labels=False, # hide labels
98
- hide_conf=False, # hide confidences
99
- half=False, # use FP16 half-precision inference
100
- dnn=False, # use OpenCV DNN for ONNX inference
101
- vid_stride=1, # video frame-rate stride
102
- ):
103
- source = str(source)
104
- save_img = not nosave and not source.endswith(
105
- ".txt"
106
- ) # save inference images
107
- is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
108
- is_url = source.lower().startswith(
109
- ("rtsp://", "rtmp://", "http://", "https://")
110
- )
111
- webcam = (
112
- source.isnumeric()
113
- or source.endswith(".streams")
114
- or (is_url and not is_file)
115
- )
116
- screenshot = source.lower().startswith("screen")
117
- if is_url and is_file:
118
- source = check_file(source) # download
119
-
120
- # Directories
121
- save_dir = increment_path(
122
- Path(project) / name, exist_ok=exist_ok
123
- ) # increment run
124
- (save_dir / "labels" if save_txt else save_dir).mkdir(
125
- parents=True, exist_ok=True
126
- ) # make dir
127
-
128
- # Load model
129
- device = select_device(device)
130
- model = DetectMultiBackend(
131
- weights, device=device, dnn=dnn, data=data, fp16=half
132
- )
133
- stride, names, pt = model.stride, model.names, model.pt
134
- imgsz = check_img_size(imgsz, s=stride) # check image size
135
-
136
- # Dataloader
137
- bs = 1 # batch_size
138
- if webcam:
139
- view_img = check_imshow(warn=True)
140
- dataset = LoadStreams(
141
- source,
142
- img_size=imgsz,
143
- stride=stride,
144
- auto=pt,
145
- vid_stride=vid_stride,
146
- )
147
- bs = len(dataset)
148
- elif screenshot:
149
- dataset = LoadScreenshots(
150
- source, img_size=imgsz, stride=stride, auto=pt
151
- )
152
- else:
153
- dataset = LoadImages(
154
- source,
155
- img_size=imgsz,
156
- stride=stride,
157
- auto=pt,
158
- vid_stride=vid_stride,
159
- )
160
- vid_path, vid_writer = [None] * bs, [None] * bs
161
-
162
- # Run inference
163
- model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
164
- seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
165
- for path, im, im0s, vid_cap, s in dataset:
166
- with dt[0]:
167
- im = torch.from_numpy(im).to(model.device)
168
- im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
169
- im /= 255 # 0 - 255 to 0.0 - 1.0
170
- if len(im.shape) == 3:
171
- im = im[None] # expand for batch dim
172
-
173
- # Inference
174
- with dt[1]:
175
- visualize = (
176
- increment_path(save_dir / Path(path).stem, mkdir=True)
177
- if visualize
178
- else False
179
- )
180
- pred = model(im, augment=augment, visualize=visualize)
181
-
182
- # NMS
183
- with dt[2]:
184
- pred = non_max_suppression(
185
- pred,
186
- conf_thres,
187
- iou_thres,
188
- classes,
189
- agnostic_nms,
190
- max_det=max_det,
191
- )
192
-
193
- # Second-stage classifier (optional)
194
- # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
195
-
196
- # Process predictions
197
- for i, det in enumerate(pred): # per image
198
- seen += 1
199
- if webcam: # batch_size >= 1
200
- p, im0, frame = path[i], im0s[i].copy(), dataset.count
201
- s += f"{i}: "
202
- else:
203
- p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
204
-
205
- p = Path(p) # to Path
206
- save_path = str(save_dir / p.name) # im.jpg
207
- txt_path = str(save_dir / "labels" / p.stem) + (
208
- "" if dataset.mode == "image" else f"_{frame}"
209
- ) # im.txt
210
- s += "%gx%g " % im.shape[2:] # print string
211
- gn = torch.tensor(im0.shape)[
212
- [1, 0, 1, 0]
213
- ] # normalization gain whwh
214
- imc = im0.copy() if save_crop else im0 # for save_crop
215
- annotator = Annotator(
216
- im0, line_width=line_thickness, example=str(names)
217
- )
218
- results = []
219
- if len(det):
220
- # Rescale boxes from img_size to im0 size
221
- det[:, :4] = scale_boxes(
222
- im.shape[2:], det[:, :4], im0.shape
223
- ).round()
224
- results.append((path, det))
225
-
226
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- from agentverse.registry import Registry
2
-
3
- describer_registry = Registry(name="DescriberRegistry")
4
-
5
- from .base import BaseDescriber
6
- from .basic import BasicDescriber
7
- from .classroom import ClassroomDescriber
8
- from .pokemon import PokemonDescriber
9
- from .prisoner import PrisonerDescriber
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/TextObjectMethods.js DELETED
@@ -1,36 +0,0 @@
1
- var SetTextFormatCallback = function (callback, scope) {
2
- this.textFormatCallback = callback;
3
- this.textFormatCallbackScope = scope;
4
- return this;
5
- }
6
-
7
- var GetFormatText = function (value) {
8
- if (value === undefined) {
9
- value = this.value;
10
- }
11
-
12
- var text;
13
- if (this.textFormatCallbackScope) {
14
- text = this.textFormatCallback(value);
15
- } else {
16
- text = this.textFormatCallback.call(this.textFormatCallbackScope, value);
17
- }
18
- return text;
19
- }
20
-
21
- var UpdateText = function (value) {
22
- var textObject = this.sizerChildren.text;
23
- if (textObject && this.textFormatCallback) {
24
- textObject.setText(GetFormatText.call(this, value));
25
- if (textObject.layout) {
26
- textObject.layout();
27
- }
28
- }
29
- return this;
30
- }
31
-
32
- export default {
33
- setTextFormatCallback: SetTextFormatCallback,
34
- getFormatText: GetFormatText,
35
- updateText: UpdateText
36
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/paper_runfiles/update_test_data_stats.sh DELETED
@@ -1,30 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # paths to data are valid for mml7
4
-
5
- source "$(dirname $0)/env.sh"
6
-
7
- #INDIR="/data/inpainting/paper_data/Places365_val_test/test_large_30k"
8
- #
9
- #for dataset in random_medium_256 random_medium_512 random_thick_256 random_thick_512 random_thin_256 random_thin_512
10
- #do
11
- # "$BINDIR/calc_dataset_stats.py" "$INDIR/$dataset" "$INDIR/${dataset}_stats2"
12
- #done
13
- #
14
- #"$BINDIR/calc_dataset_stats.py" "/data/inpainting/evalset2" "/data/inpainting/evalset2_stats2"
15
-
16
-
17
- INDIR="/data/inpainting/paper_data/CelebA-HQ_val_test/test"
18
-
19
- for dataset in random_medium_256 random_thick_256 random_thin_256
20
- do
21
- "$BINDIR/calc_dataset_stats.py" "$INDIR/$dataset" "$INDIR/${dataset}_stats2"
22
- done
23
-
24
-
25
- INDIR="/data/inpainting/paper_data/Paris_StreetView_Dataset_val_256/paris_eval_gt"
26
-
27
- for dataset in random_medium_256 random_thick_256 random_thin_256
28
- do
29
- "$BINDIR/calc_dataset_stats.py" "$INDIR/$dataset" "$INDIR/${dataset}_stats2"
30
- done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py DELETED
@@ -1,26 +0,0 @@
1
- from easydict import EasyDict as edict
2
-
3
- # make training faster
4
- # our RAM is 256G
5
- # mount -t tmpfs -o size=140G tmpfs /train_tmp
6
-
7
- config = edict()
8
- config.loss = "arcface"
9
- config.network = "r34"
10
- config.resume = False
11
- config.output = None
12
- config.embedding_size = 512
13
- config.sample_rate = 1.0
14
- config.fp16 = True
15
- config.momentum = 0.9
16
- config.weight_decay = 5e-4
17
- config.batch_size = 128
18
- config.lr = 0.1 # batch size is 512
19
-
20
- config.rec = "/train_tmp/ms1m-retinaface-t1"
21
- config.num_classes = 93431
22
- config.num_image = 5179510
23
- config.num_epoch = 25
24
- config.warmup_epoch = -1
25
- config.decay_epoch = [10, 16, 22]
26
- config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/options/test_options.py DELETED
@@ -1,21 +0,0 @@
1
- """This script contains the test options for Deep3DFaceRecon_pytorch
2
- """
3
-
4
- from .base_options import BaseOptions
5
-
6
-
7
- class TestOptions(BaseOptions):
8
- """This class includes test options.
9
-
10
- It also includes shared options defined in BaseOptions.
11
- """
12
-
13
- def initialize(self, parser):
14
- parser = BaseOptions.initialize(self, parser) # define shared options
15
- parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
16
- parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]')
17
- parser.add_argument('--img_folder', type=str, default='examples', help='folder for test images.')
18
-
19
- # Dropout and Batchnorm has different behavior during training and test.
20
- self.isTrain = False
21
- return parser
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/lora/README.md DELETED
@@ -1,83 +0,0 @@
1
- # Stable Diffusion text-to-image fine-tuning
2
- This extended LoRA training script was authored by [haofanwang](https://github.com/haofanwang).
3
- This is an experimental LoRA extension of [this example](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py). We further support add LoRA layers for text encoder.
4
-
5
- ## Training with LoRA
6
-
7
- Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*.
8
-
9
- In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages:
10
-
11
- - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114).
12
- - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable.
13
- - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter.
14
-
15
- [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository.
16
-
17
- With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset
18
- on consumer GPUs like Tesla T4, Tesla V100.
19
-
20
- ### Training
21
-
22
- First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Stable Diffusion v1-4](https://hf.co/CompVis/stable-diffusion-v1-4) and the [Pokemons dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions).
23
-
24
- **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
25
-
26
- **___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___**
27
-
28
- ```bash
29
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
30
- export DATASET_NAME="lambdalabs/pokemon-blip-captions"
31
- ```
32
-
33
- For this example we want to directly store the trained LoRA embeddings on the Hub, so
34
- we need to be logged in and add the `--push_to_hub` flag.
35
-
36
- ```bash
37
- huggingface-cli login
38
- ```
39
-
40
- Now we can start training!
41
-
42
- ```bash
43
- accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
44
- --pretrained_model_name_or_path=$MODEL_NAME \
45
- --dataset_name=$DATASET_NAME --caption_column="text" \
46
- --resolution=512 --random_flip \
47
- --train_batch_size=1 \
48
- --num_train_epochs=100 --checkpointing_steps=5000 \
49
- --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \
50
- --seed=42 \
51
- --output_dir="sd-pokemon-model-lora" \
52
- --validation_prompt="cute dragon creature" --report_to="wandb"
53
- --use_peft \
54
- --lora_r=4 --lora_alpha=32 \
55
- --lora_text_encoder_r=4 --lora_text_encoder_alpha=32
56
- ```
57
-
58
- The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases.
59
-
60
- **___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run `train_text_to_image_lora.py` in consumer GPUs like T4 or V100.___**
61
-
62
- The final LoRA embedding weights have been uploaded to [sayakpaul/sd-model-finetuned-lora-t4](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4). **___Note: [The final weights](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/pytorch_lora_weights.bin) are only 3 MB in size, which is orders of magnitudes smaller than the original model.___**
63
-
64
- You can check some inference samples that were logged during the course of the fine-tuning process [here](https://wandb.ai/sayakpaul/text2image-fine-tune/runs/q4lc0xsw).
65
-
66
- ### Inference
67
-
68
- Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline` after loading the trained LoRA weights. You
69
- need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-pokemon-model-lora`.
70
-
71
- ```python
72
- from diffusers import StableDiffusionPipeline
73
- import torch
74
-
75
- model_path = "sayakpaul/sd-model-finetuned-lora-t4"
76
- pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
77
- pipe.unet.load_attn_procs(model_path)
78
- pipe.to("cuda")
79
-
80
- prompt = "A pokemon with green eyes and red legs."
81
- image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0]
82
- image.save("pokemon.png")
83
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py DELETED
@@ -1,417 +0,0 @@
1
- # Copyright 2023 Kakao Brain and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import List, Optional, Union
17
-
18
- import PIL
19
- import torch
20
- from torch.nn import functional as F
21
- from transformers import (
22
- CLIPImageProcessor,
23
- CLIPTextModelWithProjection,
24
- CLIPTokenizer,
25
- CLIPVisionModelWithProjection,
26
- )
27
-
28
- from ...models import UNet2DConditionModel, UNet2DModel
29
- from ...schedulers import UnCLIPScheduler
30
- from ...utils import logging, randn_tensor
31
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
32
- from .text_proj import UnCLIPTextProjModel
33
-
34
-
35
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
-
37
-
38
- class UnCLIPImageVariationPipeline(DiffusionPipeline):
39
- """
40
- Pipeline to generate image variations from an input image using UnCLIP.
41
-
42
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
43
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
44
-
45
- Args:
46
- text_encoder ([`~transformers.CLIPTextModelWithProjection`]):
47
- Frozen text-encoder.
48
- tokenizer ([`~transformers.CLIPTokenizer`]):
49
- A `CLIPTokenizer` to tokenize text.
50
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
51
- Model that extracts features from generated images to be used as inputs for the `image_encoder`.
52
- image_encoder ([`~transformers.CLIPVisionModelWithProjection`]):
53
- Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
54
- text_proj ([`UnCLIPTextProjModel`]):
55
- Utility class to prepare and combine the embeddings before they are passed to the decoder.
56
- decoder ([`UNet2DConditionModel`]):
57
- The decoder to invert the image embedding into an image.
58
- super_res_first ([`UNet2DModel`]):
59
- Super resolution UNet. Used in all but the last step of the super resolution diffusion process.
60
- super_res_last ([`UNet2DModel`]):
61
- Super resolution UNet. Used in the last step of the super resolution diffusion process.
62
- decoder_scheduler ([`UnCLIPScheduler`]):
63
- Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]).
64
- super_res_scheduler ([`UnCLIPScheduler`]):
65
- Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]).
66
- """
67
-
68
- decoder: UNet2DConditionModel
69
- text_proj: UnCLIPTextProjModel
70
- text_encoder: CLIPTextModelWithProjection
71
- tokenizer: CLIPTokenizer
72
- feature_extractor: CLIPImageProcessor
73
- image_encoder: CLIPVisionModelWithProjection
74
- super_res_first: UNet2DModel
75
- super_res_last: UNet2DModel
76
-
77
- decoder_scheduler: UnCLIPScheduler
78
- super_res_scheduler: UnCLIPScheduler
79
-
80
- def __init__(
81
- self,
82
- decoder: UNet2DConditionModel,
83
- text_encoder: CLIPTextModelWithProjection,
84
- tokenizer: CLIPTokenizer,
85
- text_proj: UnCLIPTextProjModel,
86
- feature_extractor: CLIPImageProcessor,
87
- image_encoder: CLIPVisionModelWithProjection,
88
- super_res_first: UNet2DModel,
89
- super_res_last: UNet2DModel,
90
- decoder_scheduler: UnCLIPScheduler,
91
- super_res_scheduler: UnCLIPScheduler,
92
- ):
93
- super().__init__()
94
-
95
- self.register_modules(
96
- decoder=decoder,
97
- text_encoder=text_encoder,
98
- tokenizer=tokenizer,
99
- text_proj=text_proj,
100
- feature_extractor=feature_extractor,
101
- image_encoder=image_encoder,
102
- super_res_first=super_res_first,
103
- super_res_last=super_res_last,
104
- decoder_scheduler=decoder_scheduler,
105
- super_res_scheduler=super_res_scheduler,
106
- )
107
-
108
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
109
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
110
- if latents is None:
111
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
112
- else:
113
- if latents.shape != shape:
114
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
115
- latents = latents.to(device)
116
-
117
- latents = latents * scheduler.init_noise_sigma
118
- return latents
119
-
120
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance):
121
- batch_size = len(prompt) if isinstance(prompt, list) else 1
122
-
123
- # get prompt text embeddings
124
- text_inputs = self.tokenizer(
125
- prompt,
126
- padding="max_length",
127
- max_length=self.tokenizer.model_max_length,
128
- return_tensors="pt",
129
- )
130
- text_input_ids = text_inputs.input_ids
131
- text_mask = text_inputs.attention_mask.bool().to(device)
132
- text_encoder_output = self.text_encoder(text_input_ids.to(device))
133
-
134
- prompt_embeds = text_encoder_output.text_embeds
135
- text_encoder_hidden_states = text_encoder_output.last_hidden_state
136
-
137
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
138
- text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
139
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
140
-
141
- if do_classifier_free_guidance:
142
- uncond_tokens = [""] * batch_size
143
-
144
- max_length = text_input_ids.shape[-1]
145
- uncond_input = self.tokenizer(
146
- uncond_tokens,
147
- padding="max_length",
148
- max_length=max_length,
149
- truncation=True,
150
- return_tensors="pt",
151
- )
152
- uncond_text_mask = uncond_input.attention_mask.bool().to(device)
153
- negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
154
-
155
- negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds
156
- uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state
157
-
158
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
159
-
160
- seq_len = negative_prompt_embeds.shape[1]
161
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
162
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
163
-
164
- seq_len = uncond_text_encoder_hidden_states.shape[1]
165
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
166
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
167
- batch_size * num_images_per_prompt, seq_len, -1
168
- )
169
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
170
-
171
- # done duplicates
172
-
173
- # For classifier free guidance, we need to do two forward passes.
174
- # Here we concatenate the unconditional and text embeddings into a single batch
175
- # to avoid doing two forward passes
176
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
177
- text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
178
-
179
- text_mask = torch.cat([uncond_text_mask, text_mask])
180
-
181
- return prompt_embeds, text_encoder_hidden_states, text_mask
182
-
183
- def _encode_image(self, image, device, num_images_per_prompt, image_embeddings: Optional[torch.Tensor] = None):
184
- dtype = next(self.image_encoder.parameters()).dtype
185
-
186
- if image_embeddings is None:
187
- if not isinstance(image, torch.Tensor):
188
- image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
189
-
190
- image = image.to(device=device, dtype=dtype)
191
- image_embeddings = self.image_encoder(image).image_embeds
192
-
193
- image_embeddings = image_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
194
-
195
- return image_embeddings
196
-
197
- @torch.no_grad()
198
- def __call__(
199
- self,
200
- image: Optional[Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor]] = None,
201
- num_images_per_prompt: int = 1,
202
- decoder_num_inference_steps: int = 25,
203
- super_res_num_inference_steps: int = 7,
204
- generator: Optional[torch.Generator] = None,
205
- decoder_latents: Optional[torch.FloatTensor] = None,
206
- super_res_latents: Optional[torch.FloatTensor] = None,
207
- image_embeddings: Optional[torch.Tensor] = None,
208
- decoder_guidance_scale: float = 8.0,
209
- output_type: Optional[str] = "pil",
210
- return_dict: bool = True,
211
- ):
212
- """
213
- The call function to the pipeline for generation.
214
-
215
- Args:
216
- image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`):
217
- `Image` or tensor representing an image batch to be used as the starting point. If you provide a
218
- tensor, it needs to be compatible with the [`CLIPImageProcessor`]
219
- [configuration](https://huggingface.co/fusing/karlo-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json).
220
- Can be left as `None` only when `image_embeddings` are passed.
221
- num_images_per_prompt (`int`, *optional*, defaults to 1):
222
- The number of images to generate per prompt.
223
- decoder_num_inference_steps (`int`, *optional*, defaults to 25):
224
- The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality
225
- image at the expense of slower inference.
226
- super_res_num_inference_steps (`int`, *optional*, defaults to 7):
227
- The number of denoising steps for super resolution. More denoising steps usually lead to a higher
228
- quality image at the expense of slower inference.
229
- generator (`torch.Generator`, *optional*):
230
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
231
- generation deterministic.
232
- decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*):
233
- Pre-generated noisy latents to be used as inputs for the decoder.
234
- super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*):
235
- Pre-generated noisy latents to be used as inputs for the decoder.
236
- decoder_guidance_scale (`float`, *optional*, defaults to 4.0):
237
- A higher guidance scale value encourages the model to generate images closely linked to the text
238
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
239
- image_embeddings (`torch.Tensor`, *optional*):
240
- Pre-defined image embeddings that can be derived from the image encoder. Pre-defined image embeddings
241
- can be passed for tasks like image interpolations. `image` can be left as `None`.
242
- output_type (`str`, *optional*, defaults to `"pil"`):
243
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
244
- return_dict (`bool`, *optional*, defaults to `True`):
245
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
246
-
247
- Returns:
248
- [`~pipelines.ImagePipelineOutput`] or `tuple`:
249
- If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
250
- returned where the first element is a list with the generated images.
251
- """
252
- if image is not None:
253
- if isinstance(image, PIL.Image.Image):
254
- batch_size = 1
255
- elif isinstance(image, list):
256
- batch_size = len(image)
257
- else:
258
- batch_size = image.shape[0]
259
- else:
260
- batch_size = image_embeddings.shape[0]
261
-
262
- prompt = [""] * batch_size
263
-
264
- device = self._execution_device
265
-
266
- batch_size = batch_size * num_images_per_prompt
267
-
268
- do_classifier_free_guidance = decoder_guidance_scale > 1.0
269
-
270
- prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt(
271
- prompt, device, num_images_per_prompt, do_classifier_free_guidance
272
- )
273
-
274
- image_embeddings = self._encode_image(image, device, num_images_per_prompt, image_embeddings)
275
-
276
- # decoder
277
- text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj(
278
- image_embeddings=image_embeddings,
279
- prompt_embeds=prompt_embeds,
280
- text_encoder_hidden_states=text_encoder_hidden_states,
281
- do_classifier_free_guidance=do_classifier_free_guidance,
282
- )
283
-
284
- if device.type == "mps":
285
- # HACK: MPS: There is a panic when padding bool tensors,
286
- # so cast to int tensor for the pad and back to bool afterwards
287
- text_mask = text_mask.type(torch.int)
288
- decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1)
289
- decoder_text_mask = decoder_text_mask.type(torch.bool)
290
- else:
291
- decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True)
292
-
293
- self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device)
294
- decoder_timesteps_tensor = self.decoder_scheduler.timesteps
295
-
296
- num_channels_latents = self.decoder.config.in_channels
297
- height = self.decoder.config.sample_size
298
- width = self.decoder.config.sample_size
299
-
300
- if decoder_latents is None:
301
- decoder_latents = self.prepare_latents(
302
- (batch_size, num_channels_latents, height, width),
303
- text_encoder_hidden_states.dtype,
304
- device,
305
- generator,
306
- decoder_latents,
307
- self.decoder_scheduler,
308
- )
309
-
310
- for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)):
311
- # expand the latents if we are doing classifier free guidance
312
- latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents
313
-
314
- noise_pred = self.decoder(
315
- sample=latent_model_input,
316
- timestep=t,
317
- encoder_hidden_states=text_encoder_hidden_states,
318
- class_labels=additive_clip_time_embeddings,
319
- attention_mask=decoder_text_mask,
320
- ).sample
321
-
322
- if do_classifier_free_guidance:
323
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
324
- noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1)
325
- noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1)
326
- noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond)
327
- noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
328
-
329
- if i + 1 == decoder_timesteps_tensor.shape[0]:
330
- prev_timestep = None
331
- else:
332
- prev_timestep = decoder_timesteps_tensor[i + 1]
333
-
334
- # compute the previous noisy sample x_t -> x_t-1
335
- decoder_latents = self.decoder_scheduler.step(
336
- noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator
337
- ).prev_sample
338
-
339
- decoder_latents = decoder_latents.clamp(-1, 1)
340
-
341
- image_small = decoder_latents
342
-
343
- # done decoder
344
-
345
- # super res
346
-
347
- self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device)
348
- super_res_timesteps_tensor = self.super_res_scheduler.timesteps
349
-
350
- channels = self.super_res_first.config.in_channels // 2
351
- height = self.super_res_first.config.sample_size
352
- width = self.super_res_first.config.sample_size
353
-
354
- if super_res_latents is None:
355
- super_res_latents = self.prepare_latents(
356
- (batch_size, channels, height, width),
357
- image_small.dtype,
358
- device,
359
- generator,
360
- super_res_latents,
361
- self.super_res_scheduler,
362
- )
363
-
364
- if device.type == "mps":
365
- # MPS does not support many interpolations
366
- image_upscaled = F.interpolate(image_small, size=[height, width])
367
- else:
368
- interpolate_antialias = {}
369
- if "antialias" in inspect.signature(F.interpolate).parameters:
370
- interpolate_antialias["antialias"] = True
371
-
372
- image_upscaled = F.interpolate(
373
- image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias
374
- )
375
-
376
- for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)):
377
- # no classifier free guidance
378
-
379
- if i == super_res_timesteps_tensor.shape[0] - 1:
380
- unet = self.super_res_last
381
- else:
382
- unet = self.super_res_first
383
-
384
- latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1)
385
-
386
- noise_pred = unet(
387
- sample=latent_model_input,
388
- timestep=t,
389
- ).sample
390
-
391
- if i + 1 == super_res_timesteps_tensor.shape[0]:
392
- prev_timestep = None
393
- else:
394
- prev_timestep = super_res_timesteps_tensor[i + 1]
395
-
396
- # compute the previous noisy sample x_t -> x_t-1
397
- super_res_latents = self.super_res_scheduler.step(
398
- noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator
399
- ).prev_sample
400
-
401
- image = super_res_latents
402
-
403
- # done super res
404
-
405
- # post processing
406
-
407
- image = image * 0.5 + 0.5
408
- image = image.clamp(0, 1)
409
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
410
-
411
- if output_type == "pil":
412
- image = self.numpy_to_pil(image)
413
-
414
- if not return_dict:
415
- return (image,)
416
-
417
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py DELETED
@@ -1,105 +0,0 @@
1
- _base_ = [
2
- '../_base_/datasets/coco_detection.py',
3
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
4
- ]
5
- # model settings
6
- model = dict(
7
- type='FCOS',
8
- pretrained='open-mmlab://detectron/resnet50_caffe',
9
- backbone=dict(
10
- type='ResNet',
11
- depth=50,
12
- num_stages=4,
13
- out_indices=(0, 1, 2, 3),
14
- frozen_stages=1,
15
- norm_cfg=dict(type='BN', requires_grad=False),
16
- norm_eval=True,
17
- style='caffe'),
18
- neck=dict(
19
- type='FPN',
20
- in_channels=[256, 512, 1024, 2048],
21
- out_channels=256,
22
- start_level=1,
23
- add_extra_convs=True,
24
- extra_convs_on_inputs=False, # use P5
25
- num_outs=5,
26
- relu_before_extra_convs=True),
27
- bbox_head=dict(
28
- type='FCOSHead',
29
- num_classes=80,
30
- in_channels=256,
31
- stacked_convs=4,
32
- feat_channels=256,
33
- strides=[8, 16, 32, 64, 128],
34
- loss_cls=dict(
35
- type='FocalLoss',
36
- use_sigmoid=True,
37
- gamma=2.0,
38
- alpha=0.25,
39
- loss_weight=1.0),
40
- loss_bbox=dict(type='IoULoss', loss_weight=1.0),
41
- loss_centerness=dict(
42
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
43
- # training and testing settings
44
- train_cfg=dict(
45
- assigner=dict(
46
- type='MaxIoUAssigner',
47
- pos_iou_thr=0.5,
48
- neg_iou_thr=0.4,
49
- min_pos_iou=0,
50
- ignore_iof_thr=-1),
51
- allowed_border=-1,
52
- pos_weight=-1,
53
- debug=False),
54
- test_cfg=dict(
55
- nms_pre=1000,
56
- min_bbox_size=0,
57
- score_thr=0.05,
58
- nms=dict(type='nms', iou_threshold=0.5),
59
- max_per_img=100))
60
- img_norm_cfg = dict(
61
- mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
62
- train_pipeline = [
63
- dict(type='LoadImageFromFile'),
64
- dict(type='LoadAnnotations', with_bbox=True),
65
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
66
- dict(type='RandomFlip', flip_ratio=0.5),
67
- dict(type='Normalize', **img_norm_cfg),
68
- dict(type='Pad', size_divisor=32),
69
- dict(type='DefaultFormatBundle'),
70
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
71
- ]
72
- test_pipeline = [
73
- dict(type='LoadImageFromFile'),
74
- dict(
75
- type='MultiScaleFlipAug',
76
- img_scale=(1333, 800),
77
- flip=False,
78
- transforms=[
79
- dict(type='Resize', keep_ratio=True),
80
- dict(type='RandomFlip'),
81
- dict(type='Normalize', **img_norm_cfg),
82
- dict(type='Pad', size_divisor=32),
83
- dict(type='ImageToTensor', keys=['img']),
84
- dict(type='Collect', keys=['img']),
85
- ])
86
- ]
87
- data = dict(
88
- samples_per_gpu=2,
89
- workers_per_gpu=2,
90
- train=dict(pipeline=train_pipeline),
91
- val=dict(pipeline=test_pipeline),
92
- test=dict(pipeline=test_pipeline))
93
- # optimizer
94
- optimizer = dict(
95
- lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
96
- optimizer_config = dict(
97
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
98
- # learning policy
99
- lr_config = dict(
100
- policy='step',
101
- warmup='constant',
102
- warmup_iters=500,
103
- warmup_ratio=1.0 / 3,
104
- step=[8, 11])
105
- runner = dict(type='EpochBasedRunner', max_epochs=12)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/reppoints/README.md DELETED
@@ -1,54 +0,0 @@
1
- # RepPoints: Point Set Representation for Object Detection
2
-
3
- By [Ze Yang](https://yangze.tech/), [Shaohui Liu](http://b1ueber2y.me/), and [Han Hu](https://ancientmooner.github.io/).
4
-
5
- We provide code support and configuration files to reproduce the results in the paper for
6
- ["RepPoints: Point Set Representation for Object Detection"](https://arxiv.org/abs/1904.11490) on COCO object detection.
7
-
8
- ## Introduction
9
-
10
- [ALGORITHM]
11
-
12
- **RepPoints**, initially described in [arXiv](https://arxiv.org/abs/1904.11490), is a new representation method for visual objects, on which visual understanding tasks are typically centered. Visual object representation, aiming at both geometric description and appearance feature extraction, is conventionally achieved by `bounding box + RoIPool (RoIAlign)`. The bounding box representation is convenient to use; however, it provides only a rectangular localization of objects that lacks geometric precision and may consequently degrade feature quality. Our new representation, RepPoints, models objects by a `point set` instead of a `bounding box`, which learns to adaptively position themselves over an object in a manner that circumscribes the object’s `spatial extent` and enables `semantically aligned feature extraction`. This richer and more flexible representation maintains the convenience of bounding boxes while facilitating various visual understanding applications. This repo demonstrated the effectiveness of RepPoints for COCO object detection.
13
-
14
- Another feature of this repo is the demonstration of an `anchor-free detector`, which can be as effective as state-of-the-art anchor-based detection methods. The anchor-free detector can utilize either `bounding box` or `RepPoints` as the basic object representation.
15
-
16
- <div align="center">
17
- <img src="reppoints.png" width="400px" />
18
- <p>Learning RepPoints in Object Detection.</p>
19
- </div>
20
-
21
- ## Citing RepPoints
22
-
23
- ```
24
- @inproceedings{yang2019reppoints,
25
- title={RepPoints: Point Set Representation for Object Detection},
26
- author={Yang, Ze and Liu, Shaohui and Hu, Han and Wang, Liwei and Lin, Stephen},
27
- booktitle={The IEEE International Conference on Computer Vision (ICCV)},
28
- month={Oct},
29
- year={2019}
30
- }
31
- ```
32
-
33
- ## Results and models
34
-
35
- The results on COCO 2017val are shown in the table below.
36
-
37
- | Method | Backbone | GN | Anchor | convert func | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
38
- |:---------:|:-------------:|:---:|:------:|:------------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
39
- | BBox | R-50-FPN | Y | single | - | 1x | 3.9 | 15.9 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329-c98bfa96.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) |
40
- | BBox | R-50-FPN | Y | none | - | 1x | 3.9 | 15.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+Bhead_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco_20200330-00f73d58.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_center_fpn_gn-neck%2Bhead_1x_coco_20200330_233609.log.json) |
41
- | RepPoints | R-50-FPN | N | none | moment | 1x | 3.3 | 18.5 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330_233609.log.json) |
42
- | RepPoints | R-50-FPN | Y | none | moment | 1x | 3.9 | 17.5 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329-4b38409a.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952.log.json) |
43
- | RepPoints | R-50-FPN | Y | none | moment | 2x | 3.9 | - | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329_150020.log.json) |
44
- | RepPoints | R-101-FPN | Y | none | moment | 2x | 5.8 | 13.7 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329_132205.log.json) |
45
- | RepPoints | R-101-FPN-DCN | Y | none | moment | 2x | 5.9 | 12.1 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132134.log.json) |
46
- | RepPoints | X-101-FPN-DCN | Y | none | moment | 2x | 7.1 | 9.3 | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132201.log.json) |
47
-
48
- **Notes:**
49
-
50
- - `R-xx`, `X-xx` denote the ResNet and ResNeXt architectures, respectively.
51
- - `DCN` denotes replacing 3x3 conv with the 3x3 deformable convolution in `c3-c5` stages of backbone.
52
- - `none` in the `anchor` column means 2-d `center point` (x,y) is used to represent the initial object hypothesis. `single` denotes one 4-d anchor box (x,y,w,h) with IoU based label assign criterion is adopted.
53
- - `moment`, `partial MinMax`, `MinMax` in the `convert func` column are three functions to convert a point set to a pseudo box.
54
- - Note the results here are slightly different from those reported in the paper, due to framework change. While the original paper uses an [MXNet](https://mxnet.apache.org/) implementation, we re-implement the method in [PyTorch](https://pytorch.org/) based on mmdetection.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,37 +0,0 @@
1
- _base_ = './rpn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron2/resnet50_caffe',
4
- backbone=dict(
5
- norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))
6
- # use caffe img_norm
7
- img_norm_cfg = dict(
8
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
9
- train_pipeline = [
10
- dict(type='LoadImageFromFile'),
11
- dict(type='LoadAnnotations', with_bbox=True, with_label=False),
12
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
13
- dict(type='RandomFlip', flip_ratio=0.5),
14
- dict(type='Normalize', **img_norm_cfg),
15
- dict(type='Pad', size_divisor=32),
16
- dict(type='DefaultFormatBundle'),
17
- dict(type='Collect', keys=['img', 'gt_bboxes']),
18
- ]
19
- test_pipeline = [
20
- dict(type='LoadImageFromFile'),
21
- dict(
22
- type='MultiScaleFlipAug',
23
- img_scale=(1333, 800),
24
- flip=False,
25
- transforms=[
26
- dict(type='Resize', keep_ratio=True),
27
- dict(type='RandomFlip'),
28
- dict(type='Normalize', **img_norm_cfg),
29
- dict(type='Pad', size_divisor=32),
30
- dict(type='ImageToTensor', keys=['img']),
31
- dict(type='Collect', keys=['img']),
32
- ])
33
- ]
34
- data = dict(
35
- train=dict(pipeline=train_pipeline),
36
- val=dict(pipeline=test_pipeline),
37
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_480x480_40k_pascal_context_59.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnimalEquality/chatbot/_proc/_docs/app.html DELETED
@@ -1,660 +0,0 @@
1
- <!DOCTYPE html>
2
- <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"><head>
3
-
4
- <meta charset="utf-8">
5
- <meta name="generator" content="quarto-1.3.361">
6
-
7
- <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
8
-
9
- <meta name="description" content="Gradio app.py">
10
-
11
- <title>lv-recipe-chatbot - app</title>
12
- <style>
13
- code{white-space: pre-wrap;}
14
- span.smallcaps{font-variant: small-caps;}
15
- div.columns{display: flex; gap: min(4vw, 1.5em);}
16
- div.column{flex: auto; overflow-x: auto;}
17
- div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
18
- ul.task-list{list-style: none;}
19
- ul.task-list li input[type="checkbox"] {
20
- width: 0.8em;
21
- margin: 0 0.8em 0.2em -1em; /* quarto-specific, see https://github.com/quarto-dev/quarto-cli/issues/4556 */
22
- vertical-align: middle;
23
- }
24
- /* CSS for syntax highlighting */
25
- pre > code.sourceCode { white-space: pre; position: relative; }
26
- pre > code.sourceCode > span { display: inline-block; line-height: 1.25; }
27
- pre > code.sourceCode > span:empty { height: 1.2em; }
28
- .sourceCode { overflow: visible; }
29
- code.sourceCode > span { color: inherit; text-decoration: inherit; }
30
- div.sourceCode { margin: 1em 0; }
31
- pre.sourceCode { margin: 0; }
32
- @media screen {
33
- div.sourceCode { overflow: auto; }
34
- }
35
- @media print {
36
- pre > code.sourceCode { white-space: pre-wrap; }
37
- pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
38
- }
39
- pre.numberSource code
40
- { counter-reset: source-line 0; }
41
- pre.numberSource code > span
42
- { position: relative; left: -4em; counter-increment: source-line; }
43
- pre.numberSource code > span > a:first-child::before
44
- { content: counter(source-line);
45
- position: relative; left: -1em; text-align: right; vertical-align: baseline;
46
- border: none; display: inline-block;
47
- -webkit-touch-callout: none; -webkit-user-select: none;
48
- -khtml-user-select: none; -moz-user-select: none;
49
- -ms-user-select: none; user-select: none;
50
- padding: 0 4px; width: 4em;
51
- }
52
- pre.numberSource { margin-left: 3em; padding-left: 4px; }
53
- div.sourceCode
54
- { }
55
- @media screen {
56
- pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
57
- }
58
- </style>
59
-
60
-
61
- <script src="site_libs/quarto-nav/quarto-nav.js"></script>
62
- <script src="site_libs/quarto-nav/headroom.min.js"></script>
63
- <script src="site_libs/clipboard/clipboard.min.js"></script>
64
- <script src="site_libs/quarto-search/autocomplete.umd.js"></script>
65
- <script src="site_libs/quarto-search/fuse.min.js"></script>
66
- <script src="site_libs/quarto-search/quarto-search.js"></script>
67
- <meta name="quarto:offset" content="./">
68
- <script src="site_libs/quarto-html/quarto.js"></script>
69
- <script src="site_libs/quarto-html/popper.min.js"></script>
70
- <script src="site_libs/quarto-html/tippy.umd.min.js"></script>
71
- <script src="site_libs/quarto-html/anchor.min.js"></script>
72
- <link href="site_libs/quarto-html/tippy.css" rel="stylesheet">
73
- <link href="site_libs/quarto-html/quarto-syntax-highlighting.css" rel="stylesheet" id="quarto-text-highlighting-styles">
74
- <script src="site_libs/bootstrap/bootstrap.min.js"></script>
75
- <link href="site_libs/bootstrap/bootstrap-icons.css" rel="stylesheet">
76
- <link href="site_libs/bootstrap/bootstrap.min.css" rel="stylesheet" id="quarto-bootstrap" data-mode="light">
77
- <script id="quarto-search-options" type="application/json">{
78
- "location": "navbar",
79
- "copy-button": false,
80
- "collapse-after": 3,
81
- "panel-placement": "end",
82
- "type": "overlay",
83
- "limit": 20,
84
- "language": {
85
- "search-no-results-text": "No results",
86
- "search-matching-documents-text": "matching documents",
87
- "search-copy-link-title": "Copy link to search",
88
- "search-hide-matches-text": "Hide additional matches",
89
- "search-more-match-text": "more match in this document",
90
- "search-more-matches-text": "more matches in this document",
91
- "search-clear-button-title": "Clear",
92
- "search-detached-cancel-button-title": "Cancel",
93
- "search-submit-button-title": "Submit",
94
- "search-label": "Search"
95
- }
96
- }</script>
97
- <script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.6/require.min.js" integrity="sha512-c3Nl8+7g4LMSTdrm621y7kf9v3SDPnhxLNhcjFJbKECVnmZHTdo+IRO05sNLTH/D3vA6u1X32ehoLC7WFVdheg==" crossorigin="anonymous"></script>
98
- <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.min.js" integrity="sha512-bLT0Qm9VnAYZDflyKcBaQ2gg0hSYNQrJ8RilYldYQ1FxQYoCLtUjuuRuZo+fjqhx/qtq/1itJ0C2ejDxltZVFg==" crossorigin="anonymous"></script>
99
- <script type="application/javascript">define('jquery', [],function() {return window.jQuery;})</script>
100
-
101
-
102
- <link rel="stylesheet" href="styles.css">
103
- <meta property="og:title" content="lv-recipe-chatbot - app">
104
- <meta property="og:description" content="Gradio app.py">
105
- <meta property="og:site-name" content="lv-recipe-chatbot">
106
- <meta name="twitter:title" content="lv-recipe-chatbot - app">
107
- <meta name="twitter:description" content="Gradio app.py">
108
- <meta name="twitter:card" content="summary">
109
- </head>
110
-
111
- <body class="nav-sidebar floating nav-fixed">
112
-
113
- <div id="quarto-search-results"></div>
114
- <header id="quarto-header" class="headroom fixed-top">
115
- <nav class="navbar navbar-expand-lg navbar-dark ">
116
- <div class="navbar-container container-fluid">
117
- <div class="navbar-brand-container">
118
- <a class="navbar-brand" href="./index.html">
119
- <span class="navbar-title">lv-recipe-chatbot</span>
120
- </a>
121
- </div>
122
- <div class="quarto-navbar-tools ms-auto">
123
- </div>
124
- <div id="quarto-search" class="" title="Search"></div>
125
- </div> <!-- /container-fluid -->
126
- </nav>
127
- <nav class="quarto-secondary-nav">
128
- <div class="container-fluid d-flex">
129
- <button type="button" class="quarto-btn-toggle btn" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
130
- <i class="bi bi-layout-text-sidebar-reverse"></i>
131
- </button>
132
- <nav class="quarto-page-breadcrumbs" aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item"><a href="./app.html">app</a></li></ol></nav>
133
- <a class="flex-grow-1" role="button" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass" aria-controls="quarto-sidebar" aria-expanded="false" aria-label="Toggle sidebar navigation" onclick="if (window.quartoToggleHeadroom) { window.quartoToggleHeadroom(); }">
134
- </a>
135
- </div>
136
- </nav>
137
- </header>
138
- <!-- content -->
139
- <div id="quarto-content" class="quarto-container page-columns page-rows-contents page-layout-article page-navbar">
140
- <!-- sidebar -->
141
- <nav id="quarto-sidebar" class="sidebar collapse collapse-horizontal sidebar-navigation floating overflow-auto">
142
- <div class="sidebar-menu-container">
143
- <ul class="list-unstyled mt-1">
144
- <li class="sidebar-item">
145
- <div class="sidebar-item-container">
146
- <a href="./index.html" class="sidebar-item-text sidebar-link">
147
- <span class="menu-text">lv-recipe-chatbot</span></a>
148
- </div>
149
- </li>
150
- <li class="sidebar-item">
151
- <div class="sidebar-item-container">
152
- <a href="./engineer_prompt.html" class="sidebar-item-text sidebar-link">
153
- <span class="menu-text">engineer_prompt</span></a>
154
- </div>
155
- </li>
156
- <li class="sidebar-item">
157
- <div class="sidebar-item-container">
158
- <a href="./app.html" class="sidebar-item-text sidebar-link active">
159
- <span class="menu-text">app</span></a>
160
- </div>
161
- </li>
162
- <li class="sidebar-item">
163
- <div class="sidebar-item-container">
164
- <a href="./vegan_recipe_tools.html" class="sidebar-item-text sidebar-link">
165
- <span class="menu-text">vegan_recipe_tools</span></a>
166
- </div>
167
- </li>
168
- <li class="sidebar-item">
169
- <div class="sidebar-item-container">
170
- <a href="./ingredient_vision.html" class="sidebar-item-text sidebar-link">
171
- <span class="menu-text">ingredient_vision</span></a>
172
- </div>
173
- </li>
174
- </ul>
175
- </div>
176
- </nav>
177
- <div id="quarto-sidebar-glass" data-bs-toggle="collapse" data-bs-target="#quarto-sidebar,#quarto-sidebar-glass"></div>
178
- <!-- margin-sidebar -->
179
- <div id="quarto-margin-sidebar" class="sidebar margin-sidebar">
180
- <nav id="TOC" role="doc-toc" class="toc-active">
181
- <h2 id="toc-title">On this page</h2>
182
-
183
- <ul>
184
- <li><a href="#put-the-chat-backend-pieces-together" id="toc-put-the-chat-backend-pieces-together" class="nav-link active" data-scroll-target="#put-the-chat-backend-pieces-together">Put the chat backend pieces together</a>
185
- <ul class="collapse">
186
- <li><a href="#conversationbuffermemory" id="toc-conversationbuffermemory" class="nav-link" data-scroll-target="#conversationbuffermemory">ConversationBufferMemory</a></li>
187
- <li><a href="#chatmessagehistory" id="toc-chatmessagehistory" class="nav-link" data-scroll-target="#chatmessagehistory">ChatMessageHistory</a></li>
188
- <li><a href="#chatopenai" id="toc-chatopenai" class="nav-link" data-scroll-target="#chatopenai">ChatOpenAI</a></li>
189
- <li><a href="#conversationbot" id="toc-conversationbot" class="nav-link" data-scroll-target="#conversationbot">ConversationBot</a></li>
190
- <li><a href="#create_demo" id="toc-create_demo" class="nav-link" data-scroll-target="#create_demo">create_demo</a></li>
191
- </ul></li>
192
- </ul>
193
- <div class="toc-actions"><div><i class="bi bi-git"></i></div><div class="action-links"><p><a href="https://gitlab.com/animalequality/lv-recipe-chatbot/issues/new" class="toc-action">Report an issue</a></p></div></div></nav>
194
- </div>
195
- <!-- main -->
196
- <main class="content" id="quarto-document-content">
197
-
198
- <header id="title-block-header" class="quarto-title-block default">
199
- <div class="quarto-title">
200
- <h1 class="title">app</h1>
201
- </div>
202
-
203
- <div>
204
- <div class="description">
205
- Gradio app.py
206
- </div>
207
- </div>
208
-
209
-
210
- <div class="quarto-title-meta">
211
-
212
-
213
-
214
-
215
- </div>
216
-
217
-
218
- </header>
219
-
220
- <!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
221
- <div class="cell">
222
- <div class="sourceCode cell-code" id="cb1"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true" tabindex="-1"></a><span class="im">from</span> dotenv <span class="im">import</span> load_dotenv</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
223
- </div>
224
- <div class="cell">
225
- <div class="sourceCode cell-code" id="cb2"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb2-1"><a href="#cb2-1" aria-hidden="true" tabindex="-1"></a><span class="co">#: eval: false</span></span>
226
- <span id="cb2-2"><a href="#cb2-2" aria-hidden="true" tabindex="-1"></a>load_dotenv()</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
227
- <div class="cell-output cell-output-display">
228
- <pre><code>True</code></pre>
229
- </div>
230
- </div>
231
- <section id="put-the-chat-backend-pieces-together" class="level2">
232
- <h2 class="anchored" data-anchor-id="put-the-chat-backend-pieces-together">Put the chat backend pieces together</h2>
233
- <hr>
234
- <section id="conversationbuffermemory" class="level3">
235
- <h3 class="anchored" data-anchor-id="conversationbuffermemory">ConversationBufferMemory</h3>
236
- <blockquote class="blockquote">
237
- <pre><code> ConversationBufferMemory
238
- (chat_memory:langchain.schema.memory.BaseChatMe
239
- ssageHistory=None,
240
- output_key:Optional[str]=None,
241
- input_key:Optional[str]=None,
242
- return_messages:bool=False,
243
- human_prefix:str='Human', ai_prefix:str='AI',
244
- memory_key:str='history')</code></pre>
245
- </blockquote>
246
- <p>Buffer for storing conversation memory.</p>
247
- <hr>
248
- </section>
249
- <section id="chatmessagehistory" class="level3">
250
- <h3 class="anchored" data-anchor-id="chatmessagehistory">ChatMessageHistory</h3>
251
- <blockquote class="blockquote">
252
- <pre><code> ChatMessageHistory
253
- (messages:List[langchain.schema.messages.BaseMessage]
254
- =[])</code></pre>
255
- </blockquote>
256
- <p>In memory implementation of chat message history.</p>
257
- <p>Stores messages in an in memory list.</p>
258
- <hr>
259
- </section>
260
- <section id="chatopenai" class="level3">
261
- <h3 class="anchored" data-anchor-id="chatopenai">ChatOpenAI</h3>
262
- <blockquote class="blockquote">
263
- <pre><code> ChatOpenAI (cache:Optional[bool]=None, verbose:bool=None, callbacks:Union
264
- [List[langchain.callbacks.base.BaseCallbackHandler],langchain
265
- .callbacks.base.BaseCallbackManager,NoneType]=None, callback_
266
- manager:Optional[langchain.callbacks.base.BaseCallbackManager
267
- ]=None, tags:Optional[List[str]]=None,
268
- metadata:Optional[Dict[str,Any]]=None, client:Any=None,
269
- model:str='gpt-3.5-turbo', temperature:float=0.7,
270
- model_kwargs:Dict[str,Any]=None,
271
- openai_api_key:Optional[str]=None,
272
- openai_api_base:Optional[str]=None,
273
- openai_organization:Optional[str]=None,
274
- openai_proxy:Optional[str]=None, request_timeout:Union[float,
275
- Tuple[float,float],NoneType]=None, max_retries:int=6,
276
- streaming:bool=False, n:int=1, max_tokens:Optional[int]=None,
277
- tiktoken_model_name:Optional[str]=None)</code></pre>
278
- </blockquote>
279
- <p>Wrapper around OpenAI Chat large language models.</p>
280
- <p>To use, you should have the <code>openai</code> python package installed, and the environment variable <code>OPENAI_API_KEY</code> set with your API key.</p>
281
- <p>Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class.</p>
282
- <p>Example: .. code-block:: python</p>
283
- <pre><code> from langchain.chat_models import ChatOpenAI
284
- openai = ChatOpenAI(model_name="gpt-3.5-turbo")</code></pre>
285
- <div class="cell">
286
- <div class="sourceCode cell-code" id="cb8"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb8-1"><a href="#cb8-1" aria-hidden="true" tabindex="-1"></a>llm <span class="op">=</span> ChatOpenAI(temperature<span class="op">=</span><span class="dv">1</span>)</span>
287
- <span id="cb8-2"><a href="#cb8-2" aria-hidden="true" tabindex="-1"></a>MEMORY_KEY <span class="op">=</span> <span class="st">"chat_history"</span></span>
288
- <span id="cb8-3"><a href="#cb8-3" aria-hidden="true" tabindex="-1"></a>chat_msgs <span class="op">=</span> INIT_PROMPT.format_prompt(</span>
289
- <span id="cb8-4"><a href="#cb8-4" aria-hidden="true" tabindex="-1"></a> ingredients<span class="op">=</span><span class="st">"tofu, pickles, mustard, olives, tomatoes, lettuce, bell peppers, carrots, bread"</span>,</span>
290
- <span id="cb8-5"><a href="#cb8-5" aria-hidden="true" tabindex="-1"></a> allergies<span class="op">=</span><span class="st">""</span>,</span>
291
- <span id="cb8-6"><a href="#cb8-6" aria-hidden="true" tabindex="-1"></a> recipe_freeform_input<span class="op">=</span><span class="st">"The preparation time shVegan spaghetti aglio e olio ould be less than 30 minutes. I really love Thai food!"</span>,</span>
292
- <span id="cb8-7"><a href="#cb8-7" aria-hidden="true" tabindex="-1"></a>)</span>
293
- <span id="cb8-8"><a href="#cb8-8" aria-hidden="true" tabindex="-1"></a>chat_msgs <span class="op">=</span> chat_msgs.to_messages()</span>
294
- <span id="cb8-9"><a href="#cb8-9" aria-hidden="true" tabindex="-1"></a>results <span class="op">=</span> llm.generate([chat_msgs])</span>
295
- <span id="cb8-10"><a href="#cb8-10" aria-hidden="true" tabindex="-1"></a></span>
296
- <span id="cb8-11"><a href="#cb8-11" aria-hidden="true" tabindex="-1"></a>chat_msgs.append(results.generations[<span class="dv">0</span>][<span class="dv">0</span>].message)</span>
297
- <span id="cb8-12"><a href="#cb8-12" aria-hidden="true" tabindex="-1"></a>tools <span class="op">=</span> [vegan_recipe_edamam_search]</span>
298
- <span id="cb8-13"><a href="#cb8-13" aria-hidden="true" tabindex="-1"></a>prompt <span class="op">=</span> OpenAIFunctionsAgent.create_prompt(</span>
299
- <span id="cb8-14"><a href="#cb8-14" aria-hidden="true" tabindex="-1"></a> system_message<span class="op">=</span>INIT_PROMPT.messages[<span class="dv">0</span>],</span>
300
- <span id="cb8-15"><a href="#cb8-15" aria-hidden="true" tabindex="-1"></a> extra_prompt_messages<span class="op">=</span>chat_msgs <span class="op">+</span> [MessagesPlaceholder(variable_name<span class="op">=</span>MEMORY_KEY)],</span>
301
- <span id="cb8-16"><a href="#cb8-16" aria-hidden="true" tabindex="-1"></a>)</span>
302
- <span id="cb8-17"><a href="#cb8-17" aria-hidden="true" tabindex="-1"></a>memory <span class="op">=</span> ConversationBufferMemory(</span>
303
- <span id="cb8-18"><a href="#cb8-18" aria-hidden="true" tabindex="-1"></a> chat_memory<span class="op">=</span>ChatMessageHistory(messages<span class="op">=</span>chat_msgs),</span>
304
- <span id="cb8-19"><a href="#cb8-19" aria-hidden="true" tabindex="-1"></a> return_messages<span class="op">=</span><span class="va">True</span>,</span>
305
- <span id="cb8-20"><a href="#cb8-20" aria-hidden="true" tabindex="-1"></a> memory_key<span class="op">=</span>MEMORY_KEY,</span>
306
- <span id="cb8-21"><a href="#cb8-21" aria-hidden="true" tabindex="-1"></a>)</span>
307
- <span id="cb8-22"><a href="#cb8-22" aria-hidden="true" tabindex="-1"></a>agent_executor <span class="op">=</span> AgentExecutor(</span>
308
- <span id="cb8-23"><a href="#cb8-23" aria-hidden="true" tabindex="-1"></a> agent<span class="op">=</span>OpenAIFunctionsAgent(llm<span class="op">=</span>llm, tools<span class="op">=</span>tools, prompt<span class="op">=</span>prompt),</span>
309
- <span id="cb8-24"><a href="#cb8-24" aria-hidden="true" tabindex="-1"></a> tools<span class="op">=</span>tools,</span>
310
- <span id="cb8-25"><a href="#cb8-25" aria-hidden="true" tabindex="-1"></a> memory<span class="op">=</span>memory,</span>
311
- <span id="cb8-26"><a href="#cb8-26" aria-hidden="true" tabindex="-1"></a> verbose<span class="op">=</span><span class="va">True</span>,</span>
312
- <span id="cb8-27"><a href="#cb8-27" aria-hidden="true" tabindex="-1"></a>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
313
- </div>
314
- <div class="cell">
315
- <div class="sourceCode cell-code" id="cb9"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb9-1"><a href="#cb9-1" aria-hidden="true" tabindex="-1"></a>memory.chat_memory.messages</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
316
- <div class="cell-output cell-output-display">
317
- <pre><code>[SystemMessage(content='The following is a conversation between a human and a friendly vegan AI. \nThe AI is compassionate to animals and therefore serves to promote veganism.\nKnowledge: A vegan diet implies a plant-based diet avoiding all animal foods such as meat (including fish, shellfish and insects), dairy, eggs and honey.\nIf the human messages are not aligned with veganism, remind them of your purpose.\nThe AI never generates vegan recipes itself but instead uses a tool.', additional_kwargs={}),
318
- AIMessage(content='What ingredients do you wish to cook with?', additional_kwargs={}, example=False),
319
- HumanMessage(content='Ingredients: tofu, pickles, mustard, olives, tomatoes, lettuce, bell peppers, carrots, bread', additional_kwargs={}, example=False),
320
- AIMessage(content='Do you have any allergies I should be aware of?', additional_kwargs={}, example=False),
321
- HumanMessage(content='Allergies: ', additional_kwargs={}, example=False),
322
- AIMessage(content='Do you have any preferences I should consider for the recipe such as preparation time, difficulty, or cuisine region?', additional_kwargs={}, example=False),
323
- HumanMessage(content="Preferences: `The preparation time shVegan spaghetti aglio e olio ould be less than 30 minutes. I really love Thai food!`\nYour task is compose a concise, 6 word max vegan recipe keyword query to use in an API search.\nThink step by step.\n\n1. If the user listed any ingredients, choose the three ingredients that are most commonly used together in recipes that fall within the user's preferences (if any are included). \n2. If the user provided any allergies, include them in the query.\nFormat your response as message with the allergy and diet preferences first and then the ingredients.\nExamples:\n'Vegan gluten-free chicken peppers' or 'Vegan tofu, brocolli, and miso'", additional_kwargs={}, example=False),
324
- AIMessage(content='Vegan, quick, Thai tofu, bell peppers', additional_kwargs={}, example=False)]</code></pre>
325
- </div>
326
- </div>
327
- <div class="cell">
328
- <div class="sourceCode cell-code" id="cb11"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb11-1"><a href="#cb11-1" aria-hidden="true" tabindex="-1"></a>agent_executor.run(<span class="st">"Search for vegan recipe"</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
329
- <div class="cell-output cell-output-stdout">
330
- <pre><code>
331
-
332
- &gt; Entering new AgentExecutor chain...
333
-
334
- Invoking: `vegan_recipe_edamam_search` with `{'query': 'Tofu pickle sandwich with Thai-inspired flavors'}`
335
-
336
-
337
- []I apologize, but I couldn't find any vegan recipes matching your query. Can I help you with anything else?
338
-
339
- &gt; Finished chain.</code></pre>
340
- </div>
341
- <div class="cell-output cell-output-display">
342
- <pre><code>"I apologize, but I couldn't find any vegan recipes matching your query. Can I help you with anything else?"</code></pre>
343
- </div>
344
- </div>
345
- <div class="cell">
346
- <div class="sourceCode cell-code" id="cb14"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb14-1"><a href="#cb14-1" aria-hidden="true" tabindex="-1"></a>agent_executor.run(<span class="st">"Which ingredients that I provided go the best together in dishes?"</span>)</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
347
- <div class="cell-output cell-output-error">
348
- <pre><code>NameError: name 'agent_executor' is not defined</code></pre>
349
- </div>
350
- </div>
351
- <hr>
352
- <p><a href="https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L42" target="_blank" style="float:right; font-size:smaller">source</a></p>
353
- </section>
354
- <section id="conversationbot" class="level3">
355
- <h3 class="anchored" data-anchor-id="conversationbot">ConversationBot</h3>
356
- <blockquote class="blockquote">
357
- <pre><code> ConversationBot (verbose=True)</code></pre>
358
- </blockquote>
359
- <p>Initialize self. See help(type(self)) for accurate signature.</p>
360
- <div class="cell">
361
- <div class="sourceCode cell-code" id="cb17"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb17-1"><a href="#cb17-1" aria-hidden="true" tabindex="-1"></a>os.listdir(SAMPLE_IMG_DIR)</span>
362
- <span id="cb17-2"><a href="#cb17-2" aria-hidden="true" tabindex="-1"></a>SAMPLE_IMG_DIR</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
363
- <div class="cell-output cell-output-display">
364
- <pre><code>Path('/home/evylz/AnimalEquality/lv-recipe-chatbot/assets/images/vegan_ingredients')</code></pre>
365
- </div>
366
- </div>
367
- <div class="cell">
368
- <div class="sourceCode cell-code" id="cb19"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
369
- <div class="cell-output cell-output-stdout">
370
- <pre><code>CPU times: user 6.19 s, sys: 1.47 s, total: 7.66 s
371
- Wall time: 4.68 s</code></pre>
372
- </div>
373
- </div>
374
- <div class="cell">
375
- <div class="sourceCode cell-code" id="cb21"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
376
- <div class="cell-output cell-output-stdout">
377
- <pre><code>I uploaded an image that may contain vegan ingredients.
378
- The description of the image is: `a refrigerator with food inside`.
379
- The extracted ingredients are:
380
- ```
381
- cabbage lettuce onion
382
- apples
383
- rice
384
- plant-based milk
385
- ```
386
-
387
- CPU times: user 56.7 s, sys: 63.6 ms, total: 56.8 s
388
- Wall time: 5.95 s</code></pre>
389
- </div>
390
- </div>
391
- <hr>
392
- <p><a href="https://gitlab.com/animalequality/lv-recipe-chatbot/blob/main/lv_recipe_chatbot/app.py#L126" target="_blank" style="float:right; font-size:smaller">source</a></p>
393
- </section>
394
- <section id="create_demo" class="level3">
395
- <h3 class="anchored" data-anchor-id="create_demo">create_demo</h3>
396
- <blockquote class="blockquote">
397
- <pre><code> create_demo (bot=&lt;class '__main__.ConversationBot'&gt;)</code></pre>
398
- </blockquote>
399
- <div class="cell">
400
- <div class="sourceCode cell-code" id="cb24"><pre class="sourceCode python code-with-copy"><code class="sourceCode python"><span id="cb24-1"><a href="#cb24-1" aria-hidden="true" tabindex="-1"></a><span class="cf">if</span> <span class="st">"demo"</span> <span class="kw">in</span> <span class="bu">globals</span>():</span>
401
- <span id="cb24-2"><a href="#cb24-2" aria-hidden="true" tabindex="-1"></a> demo.close()</span>
402
- <span id="cb24-3"><a href="#cb24-3" aria-hidden="true" tabindex="-1"></a>demo <span class="op">=</span> create_demo(bot)</span>
403
- <span id="cb24-4"><a href="#cb24-4" aria-hidden="true" tabindex="-1"></a>demo.launch()</span></code><button title="Copy to Clipboard" class="code-copy-button"><i class="bi"></i></button></pre></div>
404
- <div class="cell-output cell-output-stdout">
405
- <pre><code>Closing server running on port: 7860
406
- Running on local URL: http://127.0.0.1:7860
407
-
408
- To create a public link, set `share=True` in `launch()`.</code></pre>
409
- </div>
410
- <div class="cell-output cell-output-display">
411
- <div><iframe src="http://127.0.0.1:7860/" width="100%" height="500" allow="autoplay; camera; microphone; clipboard-read; clipboard-write;" frameborder="0" allowfullscreen=""></iframe></div>
412
- </div>
413
- <div class="cell-output cell-output-display">
414
- <pre><code></code></pre>
415
- </div>
416
- </div>
417
-
418
-
419
- </section>
420
- </section>
421
-
422
- </main> <!-- /main -->
423
- <script id="quarto-html-after-body" type="application/javascript">
424
- window.document.addEventListener("DOMContentLoaded", function (event) {
425
- const toggleBodyColorMode = (bsSheetEl) => {
426
- const mode = bsSheetEl.getAttribute("data-mode");
427
- const bodyEl = window.document.querySelector("body");
428
- if (mode === "dark") {
429
- bodyEl.classList.add("quarto-dark");
430
- bodyEl.classList.remove("quarto-light");
431
- } else {
432
- bodyEl.classList.add("quarto-light");
433
- bodyEl.classList.remove("quarto-dark");
434
- }
435
- }
436
- const toggleBodyColorPrimary = () => {
437
- const bsSheetEl = window.document.querySelector("link#quarto-bootstrap");
438
- if (bsSheetEl) {
439
- toggleBodyColorMode(bsSheetEl);
440
- }
441
- }
442
- toggleBodyColorPrimary();
443
- const icon = "";
444
- const anchorJS = new window.AnchorJS();
445
- anchorJS.options = {
446
- placement: 'right',
447
- icon: icon
448
- };
449
- anchorJS.add('.anchored');
450
- const isCodeAnnotation = (el) => {
451
- for (const clz of el.classList) {
452
- if (clz.startsWith('code-annotation-')) {
453
- return true;
454
- }
455
- }
456
- return false;
457
- }
458
- const clipboard = new window.ClipboardJS('.code-copy-button', {
459
- text: function(trigger) {
460
- const codeEl = trigger.previousElementSibling.cloneNode(true);
461
- for (const childEl of codeEl.children) {
462
- if (isCodeAnnotation(childEl)) {
463
- childEl.remove();
464
- }
465
- }
466
- return codeEl.innerText;
467
- }
468
- });
469
- clipboard.on('success', function(e) {
470
- // button target
471
- const button = e.trigger;
472
- // don't keep focus
473
- button.blur();
474
- // flash "checked"
475
- button.classList.add('code-copy-button-checked');
476
- var currentTitle = button.getAttribute("title");
477
- button.setAttribute("title", "Copied!");
478
- let tooltip;
479
- if (window.bootstrap) {
480
- button.setAttribute("data-bs-toggle", "tooltip");
481
- button.setAttribute("data-bs-placement", "left");
482
- button.setAttribute("data-bs-title", "Copied!");
483
- tooltip = new bootstrap.Tooltip(button,
484
- { trigger: "manual",
485
- customClass: "code-copy-button-tooltip",
486
- offset: [0, -8]});
487
- tooltip.show();
488
- }
489
- setTimeout(function() {
490
- if (tooltip) {
491
- tooltip.hide();
492
- button.removeAttribute("data-bs-title");
493
- button.removeAttribute("data-bs-toggle");
494
- button.removeAttribute("data-bs-placement");
495
- }
496
- button.setAttribute("title", currentTitle);
497
- button.classList.remove('code-copy-button-checked');
498
- }, 1000);
499
- // clear code selection
500
- e.clearSelection();
501
- });
502
- function tippyHover(el, contentFn) {
503
- const config = {
504
- allowHTML: true,
505
- content: contentFn,
506
- maxWidth: 500,
507
- delay: 100,
508
- arrow: false,
509
- appendTo: function(el) {
510
- return el.parentElement;
511
- },
512
- interactive: true,
513
- interactiveBorder: 10,
514
- theme: 'quarto',
515
- placement: 'bottom-start'
516
- };
517
- window.tippy(el, config);
518
- }
519
- const noterefs = window.document.querySelectorAll('a[role="doc-noteref"]');
520
- for (var i=0; i<noterefs.length; i++) {
521
- const ref = noterefs[i];
522
- tippyHover(ref, function() {
523
- // use id or data attribute instead here
524
- let href = ref.getAttribute('data-footnote-href') || ref.getAttribute('href');
525
- try { href = new URL(href).hash; } catch {}
526
- const id = href.replace(/^#\/?/, "");
527
- const note = window.document.getElementById(id);
528
- return note.innerHTML;
529
- });
530
- }
531
- let selectedAnnoteEl;
532
- const selectorForAnnotation = ( cell, annotation) => {
533
- let cellAttr = 'data-code-cell="' + cell + '"';
534
- let lineAttr = 'data-code-annotation="' + annotation + '"';
535
- const selector = 'span[' + cellAttr + '][' + lineAttr + ']';
536
- return selector;
537
- }
538
- const selectCodeLines = (annoteEl) => {
539
- const doc = window.document;
540
- const targetCell = annoteEl.getAttribute("data-target-cell");
541
- const targetAnnotation = annoteEl.getAttribute("data-target-annotation");
542
- const annoteSpan = window.document.querySelector(selectorForAnnotation(targetCell, targetAnnotation));
543
- const lines = annoteSpan.getAttribute("data-code-lines").split(",");
544
- const lineIds = lines.map((line) => {
545
- return targetCell + "-" + line;
546
- })
547
- let top = null;
548
- let height = null;
549
- let parent = null;
550
- if (lineIds.length > 0) {
551
- //compute the position of the single el (top and bottom and make a div)
552
- const el = window.document.getElementById(lineIds[0]);
553
- top = el.offsetTop;
554
- height = el.offsetHeight;
555
- parent = el.parentElement.parentElement;
556
- if (lineIds.length > 1) {
557
- const lastEl = window.document.getElementById(lineIds[lineIds.length - 1]);
558
- const bottom = lastEl.offsetTop + lastEl.offsetHeight;
559
- height = bottom - top;
560
- }
561
- if (top !== null && height !== null && parent !== null) {
562
- // cook up a div (if necessary) and position it
563
- let div = window.document.getElementById("code-annotation-line-highlight");
564
- if (div === null) {
565
- div = window.document.createElement("div");
566
- div.setAttribute("id", "code-annotation-line-highlight");
567
- div.style.position = 'absolute';
568
- parent.appendChild(div);
569
- }
570
- div.style.top = top - 2 + "px";
571
- div.style.height = height + 4 + "px";
572
- let gutterDiv = window.document.getElementById("code-annotation-line-highlight-gutter");
573
- if (gutterDiv === null) {
574
- gutterDiv = window.document.createElement("div");
575
- gutterDiv.setAttribute("id", "code-annotation-line-highlight-gutter");
576
- gutterDiv.style.position = 'absolute';
577
- const codeCell = window.document.getElementById(targetCell);
578
- const gutter = codeCell.querySelector('.code-annotation-gutter');
579
- gutter.appendChild(gutterDiv);
580
- }
581
- gutterDiv.style.top = top - 2 + "px";
582
- gutterDiv.style.height = height + 4 + "px";
583
- }
584
- selectedAnnoteEl = annoteEl;
585
- }
586
- };
587
- const unselectCodeLines = () => {
588
- const elementsIds = ["code-annotation-line-highlight", "code-annotation-line-highlight-gutter"];
589
- elementsIds.forEach((elId) => {
590
- const div = window.document.getElementById(elId);
591
- if (div) {
592
- div.remove();
593
- }
594
- });
595
- selectedAnnoteEl = undefined;
596
- };
597
- // Attach click handler to the DT
598
- const annoteDls = window.document.querySelectorAll('dt[data-target-cell]');
599
- for (const annoteDlNode of annoteDls) {
600
- annoteDlNode.addEventListener('click', (event) => {
601
- const clickedEl = event.target;
602
- if (clickedEl !== selectedAnnoteEl) {
603
- unselectCodeLines();
604
- const activeEl = window.document.querySelector('dt[data-target-cell].code-annotation-active');
605
- if (activeEl) {
606
- activeEl.classList.remove('code-annotation-active');
607
- }
608
- selectCodeLines(clickedEl);
609
- clickedEl.classList.add('code-annotation-active');
610
- } else {
611
- // Unselect the line
612
- unselectCodeLines();
613
- clickedEl.classList.remove('code-annotation-active');
614
- }
615
- });
616
- }
617
- const findCites = (el) => {
618
- const parentEl = el.parentElement;
619
- if (parentEl) {
620
- const cites = parentEl.dataset.cites;
621
- if (cites) {
622
- return {
623
- el,
624
- cites: cites.split(' ')
625
- };
626
- } else {
627
- return findCites(el.parentElement)
628
- }
629
- } else {
630
- return undefined;
631
- }
632
- };
633
- var bibliorefs = window.document.querySelectorAll('a[role="doc-biblioref"]');
634
- for (var i=0; i<bibliorefs.length; i++) {
635
- const ref = bibliorefs[i];
636
- const citeInfo = findCites(ref);
637
- if (citeInfo) {
638
- tippyHover(citeInfo.el, function() {
639
- var popup = window.document.createElement('div');
640
- citeInfo.cites.forEach(function(cite) {
641
- var citeDiv = window.document.createElement('div');
642
- citeDiv.classList.add('hanging-indent');
643
- citeDiv.classList.add('csl-entry');
644
- var biblioDiv = window.document.getElementById('ref-' + cite);
645
- if (biblioDiv) {
646
- citeDiv.innerHTML = biblioDiv.innerHTML;
647
- }
648
- popup.appendChild(citeDiv);
649
- });
650
- return popup.innerHTML;
651
- });
652
- }
653
- }
654
- });
655
- </script>
656
- </div> <!-- /content -->
657
-
658
-
659
-
660
- </body></html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aniquel/WizApp/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("spaces/eugenesiow/remove-bg").launch()
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/engine/test.py DELETED
@@ -1,202 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import os.path as osp
3
- import pickle
4
- import shutil
5
- import tempfile
6
- import time
7
-
8
- import torch
9
- import torch.distributed as dist
10
-
11
- import annotator.uniformer.mmcv as mmcv
12
- from annotator.uniformer.mmcv.runner import get_dist_info
13
-
14
-
15
- def single_gpu_test(model, data_loader):
16
- """Test model with a single gpu.
17
-
18
- This method tests model with a single gpu and displays test progress bar.
19
-
20
- Args:
21
- model (nn.Module): Model to be tested.
22
- data_loader (nn.Dataloader): Pytorch data loader.
23
-
24
- Returns:
25
- list: The prediction results.
26
- """
27
- model.eval()
28
- results = []
29
- dataset = data_loader.dataset
30
- prog_bar = mmcv.ProgressBar(len(dataset))
31
- for data in data_loader:
32
- with torch.no_grad():
33
- result = model(return_loss=False, **data)
34
- results.extend(result)
35
-
36
- # Assume result has the same length of batch_size
37
- # refer to https://github.com/open-mmlab/mmcv/issues/985
38
- batch_size = len(result)
39
- for _ in range(batch_size):
40
- prog_bar.update()
41
- return results
42
-
43
-
44
- def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
45
- """Test model with multiple gpus.
46
-
47
- This method tests model with multiple gpus and collects the results
48
- under two different modes: gpu and cpu modes. By setting
49
- ``gpu_collect=True``, it encodes results to gpu tensors and use gpu
50
- communication for results collection. On cpu mode it saves the results on
51
- different gpus to ``tmpdir`` and collects them by the rank 0 worker.
52
-
53
- Args:
54
- model (nn.Module): Model to be tested.
55
- data_loader (nn.Dataloader): Pytorch data loader.
56
- tmpdir (str): Path of directory to save the temporary results from
57
- different gpus under cpu mode.
58
- gpu_collect (bool): Option to use either gpu or cpu to collect results.
59
-
60
- Returns:
61
- list: The prediction results.
62
- """
63
- model.eval()
64
- results = []
65
- dataset = data_loader.dataset
66
- rank, world_size = get_dist_info()
67
- if rank == 0:
68
- prog_bar = mmcv.ProgressBar(len(dataset))
69
- time.sleep(2) # This line can prevent deadlock problem in some cases.
70
- for i, data in enumerate(data_loader):
71
- with torch.no_grad():
72
- result = model(return_loss=False, **data)
73
- results.extend(result)
74
-
75
- if rank == 0:
76
- batch_size = len(result)
77
- batch_size_all = batch_size * world_size
78
- if batch_size_all + prog_bar.completed > len(dataset):
79
- batch_size_all = len(dataset) - prog_bar.completed
80
- for _ in range(batch_size_all):
81
- prog_bar.update()
82
-
83
- # collect results from all ranks
84
- if gpu_collect:
85
- results = collect_results_gpu(results, len(dataset))
86
- else:
87
- results = collect_results_cpu(results, len(dataset), tmpdir)
88
- return results
89
-
90
-
91
- def collect_results_cpu(result_part, size, tmpdir=None):
92
- """Collect results under cpu mode.
93
-
94
- On cpu mode, this function will save the results on different gpus to
95
- ``tmpdir`` and collect them by the rank 0 worker.
96
-
97
- Args:
98
- result_part (list): Result list containing result parts
99
- to be collected.
100
- size (int): Size of the results, commonly equal to length of
101
- the results.
102
- tmpdir (str | None): temporal directory for collected results to
103
- store. If set to None, it will create a random temporal directory
104
- for it.
105
-
106
- Returns:
107
- list: The collected results.
108
- """
109
- rank, world_size = get_dist_info()
110
- # create a tmp dir if it is not specified
111
- if tmpdir is None:
112
- MAX_LEN = 512
113
- # 32 is whitespace
114
- dir_tensor = torch.full((MAX_LEN, ),
115
- 32,
116
- dtype=torch.uint8,
117
- device='cuda')
118
- if rank == 0:
119
- mmcv.mkdir_or_exist('.dist_test')
120
- tmpdir = tempfile.mkdtemp(dir='.dist_test')
121
- tmpdir = torch.tensor(
122
- bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
123
- dir_tensor[:len(tmpdir)] = tmpdir
124
- dist.broadcast(dir_tensor, 0)
125
- tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
126
- else:
127
- mmcv.mkdir_or_exist(tmpdir)
128
- # dump the part result to the dir
129
- mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
130
- dist.barrier()
131
- # collect all parts
132
- if rank != 0:
133
- return None
134
- else:
135
- # load results of all parts from tmp dir
136
- part_list = []
137
- for i in range(world_size):
138
- part_file = osp.join(tmpdir, f'part_{i}.pkl')
139
- part_result = mmcv.load(part_file)
140
- # When data is severely insufficient, an empty part_result
141
- # on a certain gpu could makes the overall outputs empty.
142
- if part_result:
143
- part_list.append(part_result)
144
- # sort the results
145
- ordered_results = []
146
- for res in zip(*part_list):
147
- ordered_results.extend(list(res))
148
- # the dataloader may pad some samples
149
- ordered_results = ordered_results[:size]
150
- # remove tmp dir
151
- shutil.rmtree(tmpdir)
152
- return ordered_results
153
-
154
-
155
- def collect_results_gpu(result_part, size):
156
- """Collect results under gpu mode.
157
-
158
- On gpu mode, this function will encode results to gpu tensors and use gpu
159
- communication for results collection.
160
-
161
- Args:
162
- result_part (list): Result list containing result parts
163
- to be collected.
164
- size (int): Size of the results, commonly equal to length of
165
- the results.
166
-
167
- Returns:
168
- list: The collected results.
169
- """
170
- rank, world_size = get_dist_info()
171
- # dump result part to tensor with pickle
172
- part_tensor = torch.tensor(
173
- bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
174
- # gather all result part tensor shape
175
- shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
176
- shape_list = [shape_tensor.clone() for _ in range(world_size)]
177
- dist.all_gather(shape_list, shape_tensor)
178
- # padding result part tensor to max length
179
- shape_max = torch.tensor(shape_list).max()
180
- part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
181
- part_send[:shape_tensor[0]] = part_tensor
182
- part_recv_list = [
183
- part_tensor.new_zeros(shape_max) for _ in range(world_size)
184
- ]
185
- # gather all result part
186
- dist.all_gather(part_recv_list, part_send)
187
-
188
- if rank == 0:
189
- part_list = []
190
- for recv, shape in zip(part_recv_list, shape_list):
191
- part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
192
- # When data is severely insufficient, an empty part_result
193
- # on a certain gpu could makes the overall outputs empty.
194
- if part_result:
195
- part_list.append(part_result)
196
- # sort the results
197
- ordered_results = []
198
- for res in zip(*part_list):
199
- ordered_results.extend(list(res))
200
- # the dataloader may pad some samples
201
- ordered_results = ordered_results[:size]
202
- return ordered_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artgor/digit-draw-detect/.github/README.md DELETED
@@ -1,13 +0,0 @@
1
- ![visitors](https://visitor-badge.glitch.me/badge?page_id=wissamantoun.arabicnlpapp)
2
- [![DeepSource](https://static.deepsource.io/deepsource-badge-light-mini.svg)](https://deepsource.io/gh/Erlemar/digit-draw-detect/?ref=repository-badge )
3
-
4
- This is a repo of my "Handwritten digit detector" pet-project. It uses a YOLOv3 model trained from scratch and Streamlit for frontent. You can see the live version of the app [here](https://huggingface.co/spaces/Artgor/digit-draw-detect).
5
-
6
- If you are interested in reading more about this project, here are some links:
7
- * [Project page on my personal website](https://andlukyane.com/project/drawn-digits-prediction)
8
- * [A dataset with the digits and bounding boxes on Kaggle](https://www.kaggle.com/datasets/artgor/handwritten-digits-and-bounding-boxes)
9
- * [Training code](https://github.com/Erlemar/pytorch_tempest_pet_)
10
- * [Blogpost on my personal website](https://andlukyane.com/blog/a-third-life-of-a-personal-project)
11
- * [Blogpost on medium](https://towardsdatascience.com/the-third-life-of-a-personal-pet-project-for-handwritten-digit-recognition-fd908dc8e7a1)
12
- * [Russian blogpost on habr](https://habr.com/ru/company/ods/blog/707046/)
13
- * [W&B report](https://wandb.ai/al-3002-w/pet_project_object_detection/reports/Training-a-model-for-Handwritten-Object-Detection---VmlldzozMTgwMzA2?accessToken=yi6t4sz6iwr1yp78nfpvw71qao5wibak30np9tfft885tdj26g3tk91h1sie3h5m)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/proxy.py DELETED
@@ -1,57 +0,0 @@
1
- from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
2
-
3
-
4
- def connection_requires_http_tunnel(
5
- proxy_url=None, proxy_config=None, destination_scheme=None
6
- ):
7
- """
8
- Returns True if the connection requires an HTTP CONNECT through the proxy.
9
-
10
- :param URL proxy_url:
11
- URL of the proxy.
12
- :param ProxyConfig proxy_config:
13
- Proxy configuration from poolmanager.py
14
- :param str destination_scheme:
15
- The scheme of the destination. (i.e https, http, etc)
16
- """
17
- # If we're not using a proxy, no way to use a tunnel.
18
- if proxy_url is None:
19
- return False
20
-
21
- # HTTP destinations never require tunneling, we always forward.
22
- if destination_scheme == "http":
23
- return False
24
-
25
- # Support for forwarding with HTTPS proxies and HTTPS destinations.
26
- if (
27
- proxy_url.scheme == "https"
28
- and proxy_config
29
- and proxy_config.use_forwarding_for_https
30
- ):
31
- return False
32
-
33
- # Otherwise always use a tunnel.
34
- return True
35
-
36
-
37
- def create_proxy_ssl_context(
38
- ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
39
- ):
40
- """
41
- Generates a default proxy ssl context if one hasn't been provided by the
42
- user.
43
- """
44
- ssl_context = create_urllib3_context(
45
- ssl_version=resolve_ssl_version(ssl_version),
46
- cert_reqs=resolve_cert_reqs(cert_reqs),
47
- )
48
-
49
- if (
50
- not ca_certs
51
- and not ca_cert_dir
52
- and not ca_cert_data
53
- and hasattr(ssl_context, "load_default_certs")
54
- ):
55
- ssl_context.load_default_certs()
56
-
57
- return ssl_context
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__init__.py DELETED
@@ -1,331 +0,0 @@
1
- # module pyparsing.py
2
- #
3
- # Copyright (c) 2003-2022 Paul T. McGuire
4
- #
5
- # Permission is hereby granted, free of charge, to any person obtaining
6
- # a copy of this software and associated documentation files (the
7
- # "Software"), to deal in the Software without restriction, including
8
- # without limitation the rights to use, copy, modify, merge, publish,
9
- # distribute, sublicense, and/or sell copies of the Software, and to
10
- # permit persons to whom the Software is furnished to do so, subject to
11
- # the following conditions:
12
- #
13
- # The above copyright notice and this permission notice shall be
14
- # included in all copies or substantial portions of the Software.
15
- #
16
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
- # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
- # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19
- # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20
- # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21
- # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22
- # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23
- #
24
-
25
- __doc__ = """
26
- pyparsing module - Classes and methods to define and execute parsing grammars
27
- =============================================================================
28
-
29
- The pyparsing module is an alternative approach to creating and
30
- executing simple grammars, vs. the traditional lex/yacc approach, or the
31
- use of regular expressions. With pyparsing, you don't need to learn
32
- a new syntax for defining grammars or matching expressions - the parsing
33
- module provides a library of classes that you use to construct the
34
- grammar directly in Python.
35
-
36
- Here is a program to parse "Hello, World!" (or any greeting of the form
37
- ``"<salutation>, <addressee>!"``), built up using :class:`Word`,
38
- :class:`Literal`, and :class:`And` elements
39
- (the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
40
- and the strings are auto-converted to :class:`Literal` expressions)::
41
-
42
- from pyparsing import Word, alphas
43
-
44
- # define grammar of a greeting
45
- greet = Word(alphas) + "," + Word(alphas) + "!"
46
-
47
- hello = "Hello, World!"
48
- print(hello, "->", greet.parse_string(hello))
49
-
50
- The program outputs the following::
51
-
52
- Hello, World! -> ['Hello', ',', 'World', '!']
53
-
54
- The Python representation of the grammar is quite readable, owing to the
55
- self-explanatory class names, and the use of :class:`'+'<And>`,
56
- :class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
57
-
58
- The :class:`ParseResults` object returned from
59
- :class:`ParserElement.parseString` can be
60
- accessed as a nested list, a dictionary, or an object with named
61
- attributes.
62
-
63
- The pyparsing module handles some of the problems that are typically
64
- vexing when writing text parsers:
65
-
66
- - extra or missing whitespace (the above program will also handle
67
- "Hello,World!", "Hello , World !", etc.)
68
- - quoted strings
69
- - embedded comments
70
-
71
-
72
- Getting Started -
73
- -----------------
74
- Visit the classes :class:`ParserElement` and :class:`ParseResults` to
75
- see the base classes that most other pyparsing
76
- classes inherit from. Use the docstrings for examples of how to:
77
-
78
- - construct literal match expressions from :class:`Literal` and
79
- :class:`CaselessLiteral` classes
80
- - construct character word-group expressions using the :class:`Word`
81
- class
82
- - see how to create repetitive expressions using :class:`ZeroOrMore`
83
- and :class:`OneOrMore` classes
84
- - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
85
- and :class:`'&'<Each>` operators to combine simple expressions into
86
- more complex ones
87
- - associate names with your parsed results using
88
- :class:`ParserElement.setResultsName`
89
- - access the parsed data, which is returned as a :class:`ParseResults`
90
- object
91
- - find some helpful expression short-cuts like :class:`delimitedList`
92
- and :class:`oneOf`
93
- - find more useful common expressions in the :class:`pyparsing_common`
94
- namespace class
95
- """
96
- from typing import NamedTuple
97
-
98
-
99
- class version_info(NamedTuple):
100
- major: int
101
- minor: int
102
- micro: int
103
- releaselevel: str
104
- serial: int
105
-
106
- @property
107
- def __version__(self):
108
- return (
109
- "{}.{}.{}".format(self.major, self.minor, self.micro)
110
- + (
111
- "{}{}{}".format(
112
- "r" if self.releaselevel[0] == "c" else "",
113
- self.releaselevel[0],
114
- self.serial,
115
- ),
116
- "",
117
- )[self.releaselevel == "final"]
118
- )
119
-
120
- def __str__(self):
121
- return "{} {} / {}".format(__name__, self.__version__, __version_time__)
122
-
123
- def __repr__(self):
124
- return "{}.{}({})".format(
125
- __name__,
126
- type(self).__name__,
127
- ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
128
- )
129
-
130
-
131
- __version_info__ = version_info(3, 0, 9, "final", 0)
132
- __version_time__ = "05 May 2022 07:02 UTC"
133
- __version__ = __version_info__.__version__
134
- __versionTime__ = __version_time__
135
- __author__ = "Paul McGuire <[email protected]>"
136
-
137
- from .util import *
138
- from .exceptions import *
139
- from .actions import *
140
- from .core import __diag__, __compat__
141
- from .results import *
142
- from .core import *
143
- from .core import _builtin_exprs as core_builtin_exprs
144
- from .helpers import *
145
- from .helpers import _builtin_exprs as helper_builtin_exprs
146
-
147
- from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
148
- from .testing import pyparsing_test as testing
149
- from .common import (
150
- pyparsing_common as common,
151
- _builtin_exprs as common_builtin_exprs,
152
- )
153
-
154
- # define backward compat synonyms
155
- if "pyparsing_unicode" not in globals():
156
- pyparsing_unicode = unicode
157
- if "pyparsing_common" not in globals():
158
- pyparsing_common = common
159
- if "pyparsing_test" not in globals():
160
- pyparsing_test = testing
161
-
162
- core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
163
-
164
-
165
- __all__ = [
166
- "__version__",
167
- "__version_time__",
168
- "__author__",
169
- "__compat__",
170
- "__diag__",
171
- "And",
172
- "AtLineStart",
173
- "AtStringStart",
174
- "CaselessKeyword",
175
- "CaselessLiteral",
176
- "CharsNotIn",
177
- "Combine",
178
- "Dict",
179
- "Each",
180
- "Empty",
181
- "FollowedBy",
182
- "Forward",
183
- "GoToColumn",
184
- "Group",
185
- "IndentedBlock",
186
- "Keyword",
187
- "LineEnd",
188
- "LineStart",
189
- "Literal",
190
- "Located",
191
- "PrecededBy",
192
- "MatchFirst",
193
- "NoMatch",
194
- "NotAny",
195
- "OneOrMore",
196
- "OnlyOnce",
197
- "OpAssoc",
198
- "Opt",
199
- "Optional",
200
- "Or",
201
- "ParseBaseException",
202
- "ParseElementEnhance",
203
- "ParseException",
204
- "ParseExpression",
205
- "ParseFatalException",
206
- "ParseResults",
207
- "ParseSyntaxException",
208
- "ParserElement",
209
- "PositionToken",
210
- "QuotedString",
211
- "RecursiveGrammarException",
212
- "Regex",
213
- "SkipTo",
214
- "StringEnd",
215
- "StringStart",
216
- "Suppress",
217
- "Token",
218
- "TokenConverter",
219
- "White",
220
- "Word",
221
- "WordEnd",
222
- "WordStart",
223
- "ZeroOrMore",
224
- "Char",
225
- "alphanums",
226
- "alphas",
227
- "alphas8bit",
228
- "any_close_tag",
229
- "any_open_tag",
230
- "c_style_comment",
231
- "col",
232
- "common_html_entity",
233
- "counted_array",
234
- "cpp_style_comment",
235
- "dbl_quoted_string",
236
- "dbl_slash_comment",
237
- "delimited_list",
238
- "dict_of",
239
- "empty",
240
- "hexnums",
241
- "html_comment",
242
- "identchars",
243
- "identbodychars",
244
- "java_style_comment",
245
- "line",
246
- "line_end",
247
- "line_start",
248
- "lineno",
249
- "make_html_tags",
250
- "make_xml_tags",
251
- "match_only_at_col",
252
- "match_previous_expr",
253
- "match_previous_literal",
254
- "nested_expr",
255
- "null_debug_action",
256
- "nums",
257
- "one_of",
258
- "printables",
259
- "punc8bit",
260
- "python_style_comment",
261
- "quoted_string",
262
- "remove_quotes",
263
- "replace_with",
264
- "replace_html_entity",
265
- "rest_of_line",
266
- "sgl_quoted_string",
267
- "srange",
268
- "string_end",
269
- "string_start",
270
- "trace_parse_action",
271
- "unicode_string",
272
- "with_attribute",
273
- "indentedBlock",
274
- "original_text_for",
275
- "ungroup",
276
- "infix_notation",
277
- "locatedExpr",
278
- "with_class",
279
- "CloseMatch",
280
- "token_map",
281
- "pyparsing_common",
282
- "pyparsing_unicode",
283
- "unicode_set",
284
- "condition_as_parse_action",
285
- "pyparsing_test",
286
- # pre-PEP8 compatibility names
287
- "__versionTime__",
288
- "anyCloseTag",
289
- "anyOpenTag",
290
- "cStyleComment",
291
- "commonHTMLEntity",
292
- "countedArray",
293
- "cppStyleComment",
294
- "dblQuotedString",
295
- "dblSlashComment",
296
- "delimitedList",
297
- "dictOf",
298
- "htmlComment",
299
- "javaStyleComment",
300
- "lineEnd",
301
- "lineStart",
302
- "makeHTMLTags",
303
- "makeXMLTags",
304
- "matchOnlyAtCol",
305
- "matchPreviousExpr",
306
- "matchPreviousLiteral",
307
- "nestedExpr",
308
- "nullDebugAction",
309
- "oneOf",
310
- "opAssoc",
311
- "pythonStyleComment",
312
- "quotedString",
313
- "removeQuotes",
314
- "replaceHTMLEntity",
315
- "replaceWith",
316
- "restOfLine",
317
- "sglQuotedString",
318
- "stringEnd",
319
- "stringStart",
320
- "traceParseAction",
321
- "unicodeString",
322
- "withAttribute",
323
- "indentedBlock",
324
- "originalTextFor",
325
- "infixNotation",
326
- "locatedExpr",
327
- "withClass",
328
- "tokenMap",
329
- "conditionAsParseAction",
330
- "autoname_elements",
331
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Arrow Fest Apk.md DELETED
@@ -1,47 +0,0 @@
1
-
2
- <h1>Arrow Fest APK: Un juego de acción divertido y adictivo para Android</h1>
3
- <p>Si usted está buscando un nuevo y emocionante juego de acción para jugar en su dispositivo Android, es posible que desee echa un vistazo a Arrow Fest APK. Este es un juego donde tienes que controlar tus flechas, elegir las mejores puertas, y destruir a todos en su camino. Usted puede recoger un montón de monedas y actualizar sus flechas y los ingresos, así como hacer frente a diferentes enemigos y gigantes. En este artículo, le diremos más sobre lo que es Arrow Fest APK, cómo jugarlo, qué características tiene, y cómo descargar e instalar en su dispositivo. </p>
4
- <h2>arrow fest apk</h2><br /><p><b><b>Download File</b> &#10026;&#10026;&#10026; <a href="https://bltlly.com/2v6JxZ">https://bltlly.com/2v6JxZ</a></b></p><br /><br />
5
- <h2>¿Qué es Arrow Fest APK? </h2>
6
- <p>Arrow Fest APK es un juego de acción desarrollado por Rollic Games, un popular estudio de juegos que ha creado muchos otros juegos de éxito como Go Knots 3D, Tangle Master 3D, High Heels! , y más. Arrow Fest APK es uno de sus últimos juegos, que fue lanzado en mayo de 2023. Ya ha ganado más de 10 millones de descargas y una calificación de 3,6 estrellas en Google Play Store. También está disponible en otras plataformas como APKCombo . </p>
7
- <h3>El juego de Arrow Fest APK</h3>
8
- <p>El juego de Arrow Fest APK es simple y adictivo. Tienes que deslizar en la pantalla para controlar las flechas, que se multiplican a medida que pasas por las puertas. Tienes que elegir las mejores puertas que te darán más flechas, evitando las que las reducirán. También tienes que apuntar y disparar a los enemigos y gigantes que intentarán detenerte. Puedes matarlos con un solo golpe si tienes suficientes flechas, pero si te quedas sin flechas, perderás el juego. También puedes recoger monedas en el camino, que puedes usar para actualizar tus flechas e ingresos. </p>
9
- <h3>Las características de Arrow Fest APK</h3>
10
- <p>Arrow Fest APK tiene muchas características que lo hacen divertido y agradable de jugar. Aquí están algunos de ellos:</p>
11
- <h4>Controles simples e intuitivos</h4>
12
-
13
- <h4>Muchos niveles únicos para jugar</h4>
14
- <p>Arrow Fest APK tiene un montón de niveles únicos que desafiará sus habilidades y reflejos. Cada nivel tiene diferentes diseños, puertas, enemigos y gigantes. Nunca te aburrirás mientras avanzas en el juego. Algunos niveles son fáciles y relajantes, mientras que otros son duros e intensos. Tendrás que usar tu estrategia y lógica para elegir las mejores puertas y evitar las trampas. </p>
15
- <h4>Muchos enemigos y gigantes para destruir</h4>
16
- <p>Arrow Fest APK tiene un montón de enemigos y gigantes que tratará de evitar que llegue al final del nivel. Vienen en diferentes formas, tamaños, colores y comportamientos. Algunos de ellos son rápidos y ágiles, mientras que otros son lentos y voluminosos. Algunos de ellos son inofensivos y pasivos, mientras que otros son agresivos y peligrosos. Tendrás que ser cuidadoso y alerta al enfrentarlos. </p>
17
- <p></p>
18
- <h4>Muchas puertas para decidir</h4>
19
- <p>Arrow Fest APK tiene un montón de puertas que afectarán a sus flechas de diferentes maneras. Algunas puertas multiplicarán tus flechas, mientras que otras las dividirán. Algunas puertas cambiarán el color o la forma de sus flechas, mientras que otras cambiarán su dirección o velocidad. Algunas puertas te darán bonificaciones o potenciadores, mientras que otras te darán penalizaciones o obstáculos <p>. Tendrás que tomar decisiones rápidas e inteligentes al pasar por las puertas. </p>
20
- <h4> Un montón de monedas para recoger y actualizar sus flechas y los ingresos</h4>
21
- <p>Arrow Fest APK tiene un montón de monedas que usted puede recoger a medida que juega el juego. Puede utilizar las monedas para actualizar sus flechas y los ingresos. Puedes aumentar el número, tamaño, velocidad y potencia de tus flechas, así como la cantidad de monedas que ganes por nivel. También puedes desbloquear nuevos tipos de flechas, como flechas de fuego, flechas de hielo, flechas de relámpago y más. Actualizar tus flechas e ingresos te ayudará a superar los niveles y enemigos más difíciles. </p>
22
- <h2>Cómo descargar e instalar Arrow Fest APK? </h2>
23
-
24
- <h3>Descargar el archivo APK de una fuente de confianza</h3>
25
- <p>El primer paso es descargar el archivo APK de Arrow Fest APK de una fuente de confianza. Puede utilizar los enlaces que se proporcionan a continuación para descargar la última versión del juego de APKCombo o Google Play Store. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargar el archivo. </p>
26
- <h3>Habilitar fuentes desconocidas en su dispositivo</h3>
27
- <p>El siguiente paso es habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones que no son de la tienda de aplicaciones oficial. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y luego a fuentes desconocidas. Active la opción para permitir la instalación de aplicaciones desde fuentes desconocidas. Puede ver un mensaje de advertencia, pero puede ignorarlo y proceder. </p>
28
- <h3>Instalar el archivo APK y lanzar el juego</h3>
29
- <p>El paso final es instalar el archivo APK y lanzar el juego. Busque el archivo APK descargado en su dispositivo, luego toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla para completar la instalación. Una vez hecho esto, puede encontrar el icono del juego en la pantalla de inicio o en el cajón de la aplicación. Toque en él para iniciar el juego y disfrutar de jugar Arrow Fest APK.</p>
30
- <h2>Conclusión</h2>
31
- <p>Arrow Fest APK es un juego de acción divertido y adictivo para dispositivos Android. Tiene controles simples e intuitivos, muchos niveles únicos, muchos enemigos y gigantes, muchas puertas y muchas monedas. Es un juego que pondrá a prueba tus habilidades y reflejos, así como entretenerte durante horas. Si desea probar este juego, se puede descargar e instalar utilizando los enlaces de abajo. Diviértete jugando Arrow Fest APK! </p>
32
- <h2>Preguntas frecuentes</h2>
33
- <p>Aquí hay algunas preguntas frecuentes sobre Arrow Fest APK:</p>
34
- <ul>
35
- <li><b>Es Arrow Fest APK libre para jugar? </b></li>
36
- <p>Sí, Arrow Fest APK es libre de jugar. Sin embargo, puede contener anuncios y compras en la aplicación que requieren dinero real. </p>
37
- <li><b>¿Es seguro descargar e instalar Arrow Fest APK? </b></li>
38
-
39
- <li><b>¿Cuáles son los requisitos mínimos para jugar Arrow Fest APK? </b></li>
40
- <p>Los requisitos mínimos para jugar Arrow Fest APK son Android 5.0 o superior, 100 MB de espacio de almacenamiento gratuito, y una conexión a Internet estable. </p>
41
- <li><b>¿Cómo puedo contactar con el desarrollador de Arrow Fest APK? </b></li>
42
- <p>Puede ponerse en contacto con el desarrollador de Arrow Fest APK enviando un correo electrónico a [email protected] o visitando su sitio web en https://www.rollicgames.com/.</p>
43
- <li><b>¿Puedo jugar Arrow Fest APK offline? </b></li>
44
- <p>No, no se puede jugar Arrow Fest APK offline. Necesita una conexión a Internet para jugar el juego. </p>
45
- </ul></p> 64aa2da5cf<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BridgeEight/internlm-20B-chat-w4-turbomind/install_lmdeploy.sh DELETED
@@ -1,27 +0,0 @@
1
- #!/bin/bash
2
-
3
- # 安装lmdeploy
4
- # 获取安装lmdeploy的位置下的lib文件夹路径
5
- lmdeploy_dir=$(pip show lmdeploy | grep Location | cut -d' ' -f2)
6
- lib_dir="${lmdeploy_dir}/lmdeploy/lib"
7
-
8
- # 检查lib目录是否存在
9
- if [ ! -d "$lib_dir" ]
10
- then
11
- echo "Lib directory does not exist at ${lib_dir}"
12
- exit 1
13
- fi
14
-
15
- # 克隆lmdeploy的仓库
16
- git clone https://github.com/InternLM/lmdeploy.git || exit 1
17
-
18
- # 将lib文件夹拷贝到刚刚克隆的lmdeploy下
19
- cp -r "$lib_dir" "lmdeploy/lmdeploy/" || exit 1
20
-
21
- pip uninstall -y lmdeploy
22
-
23
- cd lmdeploy && git checkout v0.0.10 && cd ..
24
- mv lmdeploy lmdeploy-backup
25
- mv lmdeploy-backup/lmdeploy lmdeploy
26
-
27
- echo "Script executed successfully"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/notes/contributing.md DELETED
@@ -1 +0,0 @@
1
- ../../.github/CONTRIBUTING.md
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mmnasnet/nasnet.py DELETED
@@ -1,218 +0,0 @@
1
- # --------------------------------------------------------
2
- # OpenVQA
3
- # Written by Zhenwei Shao https://github.com/ParadoxZW
4
- # --------------------------------------------------------
5
-
6
- from openvqa.ops.fc import FC, MLP
7
- from openvqa.ops.layer_norm import LayerNorm
8
-
9
- import torch.nn as nn
10
- import torch.nn.functional as F
11
- import torch
12
- import math
13
-
14
-
15
- # ------------------------------
16
- # --- Operations and Modules ---
17
- # ------------------------------
18
-
19
- class RelMHAtt(nn.Module):
20
- def __init__(self, __C):
21
- super(RelMHAtt, self).__init__()
22
- self.__C = __C
23
- self.HBASE = __C.REL_HBASE
24
- self.HHEAD = int(__C.HIDDEN_SIZE / __C.REL_HBASE)
25
-
26
- self.linear_v = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
27
- self.linear_k = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
28
- self.linear_q = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
29
- self.linear_merge = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
30
- self.linear_r = nn.Linear(__C.REL_SIZE, self.HHEAD, bias=True)
31
-
32
- self.dropout = nn.Dropout(__C.DROPOUT_R)
33
- self.relu = nn.ReLU(inplace=True)
34
-
35
- def forward(self, v, k, q, mask=None, rel_embed=None):
36
- assert rel_embed is not None
37
- n_batches = q.size(0)
38
-
39
- v = self.linear_v(v).view(n_batches, -1, self.HHEAD,
40
- self.HBASE).transpose(1, 2)
41
- k = self.linear_k(k).view(n_batches, -1, self.HHEAD,
42
- self.HBASE).transpose(1, 2)
43
- q = self.linear_q(q).view(n_batches, -1, self.HHEAD,
44
- self.HBASE).transpose(1, 2)
45
- r = self.relu(self.linear_r(rel_embed)).permute(0, 3, 1, 2)
46
-
47
- d_k = q.size(-1)
48
- scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
49
- scores = torch.log(torch.clamp(r, min=1e-6)) + scores
50
- if mask is not None:
51
- scores = scores.masked_fill(mask, -1e9)
52
- att_map = F.softmax(scores, dim=-1)
53
- att_map = self.dropout(att_map)
54
- atted = torch.matmul(att_map, v)
55
-
56
- atted = atted.transpose(1, 2).contiguous().view(
57
- n_batches, -1, self.__C.HIDDEN_SIZE)
58
- atted = self.linear_merge(atted)
59
-
60
- return atted
61
-
62
-
63
- class MHAtt(nn.Module):
64
- def __init__(self, __C):
65
- super(MHAtt, self).__init__()
66
- self.__C = __C
67
-
68
- self.linear_v = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
69
- self.linear_k = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
70
- self.linear_q = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
71
- self.linear_merge = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)
72
-
73
- self.dropout = nn.Dropout(__C.DROPOUT_R)
74
-
75
- def forward(self, v, k, q, mask):
76
- n_batches = q.size(0)
77
-
78
- v = self.linear_v(v).view(
79
- n_batches,
80
- -1,
81
- self.__C.MULTI_HEAD,
82
- int(self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD)
83
- ).transpose(1, 2)
84
-
85
- k = self.linear_k(k).view(
86
- n_batches,
87
- -1,
88
- self.__C.MULTI_HEAD,
89
- int(self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD)
90
- ).transpose(1, 2)
91
-
92
- q = self.linear_q(q).view(
93
- n_batches,
94
- -1,
95
- self.__C.MULTI_HEAD,
96
- int(self.__C.HIDDEN_SIZE / self.__C.MULTI_HEAD)
97
- ).transpose(1, 2)
98
-
99
- atted = self.att(v, k, q, mask)
100
- atted = atted.transpose(1, 2).contiguous().view(
101
- n_batches,
102
- -1,
103
- self.__C.HIDDEN_SIZE
104
- )
105
-
106
- atted = self.linear_merge(atted)
107
-
108
- return atted
109
-
110
- def att(self, value, key, query, mask):
111
- d_k = query.size(-1)
112
-
113
- scores = torch.matmul(
114
- query, key.transpose(-2, -1)
115
- ) / math.sqrt(d_k)
116
-
117
- if mask is not None:
118
- scores = scores.masked_fill(mask, -1e9)
119
-
120
- att_map = F.softmax(scores, dim=-1)
121
- att_map = self.dropout(att_map)
122
-
123
- return torch.matmul(att_map, value)
124
-
125
-
126
- class FFN(nn.Module):
127
- def __init__(self, __C):
128
- super(FFN, self).__init__()
129
-
130
- self.mlp = MLP(
131
- in_size=__C.HIDDEN_SIZE,
132
- mid_size=__C.HIDDEN_SIZE * 4,
133
- out_size=__C.HIDDEN_SIZE,
134
- dropout_r=__C.DROPOUT_R,
135
- use_relu=True
136
- )
137
-
138
- self.dropout = nn.Dropout(__C.DROPOUT_R)
139
- self.norm = LayerNorm(__C.HIDDEN_SIZE)
140
-
141
- def forward(self, x, arg1, arg2, arg3, arg4):
142
- x = self.norm(x + self.dropout(
143
- self.mlp(x)
144
- ))
145
- return x
146
-
147
-
148
- class SA(nn.Module):
149
- def __init__(self, __C, size=1024):
150
- super(SA, self).__init__()
151
-
152
- self.mhatt = MHAtt(__C)
153
-
154
- self.dropout = nn.Dropout(__C.DROPOUT_R)
155
- self.norm = LayerNorm(__C.HIDDEN_SIZE)
156
-
157
- def forward(self, y, arg1, y_mask, arg2, arg3):
158
- y = self.norm(y + self.dropout(
159
- self.mhatt(y, y, y, y_mask)
160
- ))
161
-
162
- return y
163
-
164
-
165
- class RSA(nn.Module):
166
- def __init__(self, __C, size=1024):
167
- super(RSA, self).__init__()
168
-
169
- self.mhatt = RelMHAtt(__C)
170
-
171
- self.dropout = nn.Dropout(__C.DROPOUT_R)
172
- self.norm = LayerNorm(__C.HIDDEN_SIZE)
173
-
174
- def forward(self, x, arg1, x_mask, arg2, rela):
175
- x = self.norm(x + self.dropout(
176
- self.mhatt(x, x, x, x_mask, rela)
177
- ))
178
-
179
- return x
180
-
181
-
182
- class GA(nn.Module):
183
- def __init__(self, __C):
184
- super(GA, self).__init__()
185
-
186
- self.mhatt = MHAtt(__C)
187
-
188
- self.dropout = nn.Dropout(__C.DROPOUT_R)
189
- self.norm = LayerNorm(__C.HIDDEN_SIZE)
190
-
191
- def forward(self, x, y, x_mask, y_mask, rela):
192
- x = self.norm(x + self.dropout(
193
- self.mhatt(v=y, k=y, q=x, mask=y_mask)
194
- ))
195
-
196
- return x
197
-
198
-
199
- # ------------------------------------------------
200
- # --- Encoder-Decoder Architecture of MMNasNet ---
201
- # ------------------------------------------------
202
-
203
- class NAS_ED(nn.Module):
204
- def __init__(self, __C):
205
- super(NAS_ED, self).__init__()
206
- enc = __C.ARCH['enc']
207
- dec = __C.ARCH['dec']
208
- self.enc_list = nn.ModuleList([eval(layer)(__C) for layer in enc])
209
- self.dec_list = nn.ModuleList([eval(layer)(__C) for layer in dec])
210
-
211
- def forward(self, y, x, y_mask, x_mask, rela):
212
- for enc in self.enc_list:
213
- y = enc(y, None, y_mask, None, None)
214
-
215
- for dec in self.dec_list:
216
- x = dec(x, y, x_mask, y_mask, rela)
217
-
218
- return y, x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/complex/cexp.h DELETED
@@ -1,183 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- * Copyright 2013 Filipe RNC Maia
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
-
18
- /*-
19
- * Copyright (c) 2011 David Schultz <[email protected]>
20
- * All rights reserved.
21
- *
22
- * Redistribution and use in source and binary forms, with or without
23
- * modification, are permitted provided that the following conditions
24
- * are met:
25
- * 1. Redistributions of source code must retain the above copyright
26
- * notice, this list of conditions and the following disclaimer.
27
- * 2. Redistributions in binary form must reproduce the above copyright
28
- * notice, this list of conditions and the following disclaimer in the
29
- * documentation and/or other materials provided with the distribution.
30
- *
31
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
32
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
35
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41
- * SUCH DAMAGE.
42
- */
43
-
44
- /* adapted from FreeBSD:
45
- * lib/msun/src/s_cexp.c
46
- * lib/msun/src/k_exp.c
47
- *
48
- */
49
-
50
- #pragma once
51
-
52
- #include <thrust/complex.h>
53
- #include <thrust/detail/complex/math_private.h>
54
-
55
- namespace thrust{
56
- namespace detail{
57
- namespace complex{
58
- /*
59
- * Compute exp(x), scaled to avoid spurious overflow. An exponent is
60
- * returned separately in 'expt'.
61
- *
62
- * Input: ln(DBL_MAX) <= x < ln(2 * DBL_MAX / DBL_MIN_DENORM) ~= 1454.91
63
- * Output: 2**1023 <= y < 2**1024
64
- */
65
- __host__ __device__ inline
66
- double frexp_exp(double x, int *expt){
67
- const uint32_t k = 1799; /* constant for reduction */
68
- const double kln2 = 1246.97177782734161156; /* k * ln2 */
69
-
70
- double exp_x;
71
- uint32_t hx;
72
-
73
- /*
74
- * We use exp(x) = exp(x - kln2) * 2**k, carefully chosen to
75
- * minimize |exp(kln2) - 2**k|. We also scale the exponent of
76
- * exp_x to MAX_EXP so that the result can be multiplied by
77
- * a tiny number without losing accuracy due to denormalization.
78
- */
79
- exp_x = exp(x - kln2);
80
- get_high_word(hx, exp_x);
81
- *expt = (hx >> 20) - (0x3ff + 1023) + k;
82
- set_high_word(exp_x, (hx & 0xfffff) | ((0x3ff + 1023) << 20));
83
- return (exp_x);
84
- }
85
-
86
-
87
- __host__ __device__ inline
88
- complex<double> ldexp_cexp(complex<double> z, int expt){
89
- double x, y, exp_x, scale1, scale2;
90
- int ex_expt, half_expt;
91
-
92
- x = z.real();
93
- y = z.imag();
94
- exp_x = frexp_exp(x, &ex_expt);
95
- expt += ex_expt;
96
-
97
- /*
98
- * Arrange so that scale1 * scale2 == 2**expt. We use this to
99
- * compensate for scalbn being horrendously slow.
100
- */
101
- half_expt = expt / 2;
102
- insert_words(scale1, (0x3ff + half_expt) << 20, 0);
103
- half_expt = expt - half_expt;
104
- insert_words(scale2, (0x3ff + half_expt) << 20, 0);
105
-
106
- return (complex<double>(cos(y) * exp_x * scale1 * scale2,
107
- sin(y) * exp_x * scale1 * scale2));
108
- }
109
-
110
-
111
- __host__ __device__ inline
112
- complex<double> cexp(const complex<double>& z){
113
- double x, y, exp_x;
114
- uint32_t hx, hy, lx, ly;
115
-
116
- const uint32_t
117
- exp_ovfl = 0x40862e42, /* high bits of MAX_EXP * ln2 ~= 710 */
118
- cexp_ovfl = 0x4096b8e4; /* (MAX_EXP - MIN_DENORM_EXP) * ln2 */
119
-
120
-
121
- x = z.real();
122
- y = z.imag();
123
-
124
- extract_words(hy, ly, y);
125
- hy &= 0x7fffffff;
126
-
127
- /* cexp(x + I 0) = exp(x) + I 0 */
128
- if ((hy | ly) == 0)
129
- return (complex<double>(exp(x), y));
130
- extract_words(hx, lx, x);
131
- /* cexp(0 + I y) = cos(y) + I sin(y) */
132
- if (((hx & 0x7fffffff) | lx) == 0)
133
- return (complex<double>(cos(y), sin(y)));
134
-
135
- if (hy >= 0x7ff00000) {
136
- if (lx != 0 || (hx & 0x7fffffff) != 0x7ff00000) {
137
- /* cexp(finite|NaN +- I Inf|NaN) = NaN + I NaN */
138
- return (complex<double>(y - y, y - y));
139
- } else if (hx & 0x80000000) {
140
- /* cexp(-Inf +- I Inf|NaN) = 0 + I 0 */
141
- return (complex<double>(0.0, 0.0));
142
- } else {
143
- /* cexp(+Inf +- I Inf|NaN) = Inf + I NaN */
144
- return (complex<double>(x, y - y));
145
- }
146
- }
147
-
148
- if (hx >= exp_ovfl && hx <= cexp_ovfl) {
149
- /*
150
- * x is between 709.7 and 1454.3, so we must scale to avoid
151
- * overflow in exp(x).
152
- */
153
- return (ldexp_cexp(z, 0));
154
- } else {
155
- /*
156
- * Cases covered here:
157
- * - x < exp_ovfl and exp(x) won't overflow (common case)
158
- * - x > cexp_ovfl, so exp(x) * s overflows for all s > 0
159
- * - x = +-Inf (generated by exp())
160
- * - x = NaN (spurious inexact exception from y)
161
- */
162
- exp_x = std::exp(x);
163
- return (complex<double>(exp_x * cos(y), exp_x * sin(y)));
164
- }
165
- }
166
-
167
- } // namespace complex
168
-
169
- } // namespace detail
170
-
171
- template <typename ValueType>
172
- __host__ __device__
173
- inline complex<ValueType> exp(const complex<ValueType>& z){
174
- return polar(std::exp(z.real()),z.imag());
175
- }
176
-
177
- template <>
178
- __host__ __device__
179
- inline complex<double> exp(const complex<double>& z){
180
- return detail::complex::cexp(z);
181
- }
182
-
183
- } // namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/internal/copy_device_to_device.h DELETED
@@ -1,64 +0,0 @@
1
-
2
- /******************************************************************************
3
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4
- *
5
- * Redistribution and use in source and binary forms, with or without
6
- * modification, are permitted provided that the following conditions are met:
7
- * * Redistributions of source code must retain the above copyright
8
- * notice, this list of conditions and the following disclaimer.
9
- * * Redistributions in binary form must reproduce the above copyright
10
- * notice, this list of conditions and the following disclaimer in the
11
- * documentation and/or other materials provided with the distribution.
12
- * * Neither the name of the NVIDIA CORPORATION nor the
13
- * names of its contributors may be used to endorse or promote products
14
- * derived from this software without specific prior written permission.
15
- *
16
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
- *
27
- ******************************************************************************/
28
- #pragma once
29
-
30
-
31
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
32
- #include <thrust/system/cuda/config.h>
33
- #include <thrust/system/cuda/detail/execution_policy.h>
34
- #include <thrust/system/cuda/detail/transform.h>
35
- #include <thrust/functional.h>
36
-
37
- namespace thrust
38
- {
39
- namespace cuda_cub {
40
-
41
- namespace __copy {
42
-
43
- template <class Derived,
44
- class InputIt,
45
- class OutputIt>
46
- OutputIt THRUST_RUNTIME_FUNCTION
47
- device_to_device(execution_policy<Derived>& policy,
48
- InputIt first,
49
- InputIt last,
50
- OutputIt result)
51
- {
52
- typedef typename thrust::iterator_traits<InputIt>::value_type InputTy;
53
- return cuda_cub::transform(policy,
54
- first,
55
- last,
56
- result,
57
- thrust::identity<InputTy>());
58
- }
59
-
60
- } // namespace __copy
61
-
62
- } // namespace cuda_cub
63
- } // end namespace thrust
64
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/error_code.h DELETED
@@ -1,523 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file error_code.h
19
- * \brief An object used to hold error values, such as those originating from the
20
- * operating system or other low-level application program interfaces.
21
- */
22
-
23
- #pragma once
24
-
25
- #include <thrust/detail/config.h>
26
- #include <thrust/detail/type_traits.h>
27
- #include <thrust/system/detail/errno.h>
28
- #include <iostream>
29
-
30
- namespace thrust
31
- {
32
-
33
- namespace system
34
- {
35
-
36
-
37
- /*! \addtogroup system_diagnostics
38
- * \{
39
- */
40
-
41
- class error_condition;
42
- class error_code;
43
-
44
- /*! A metafunction returning whether or not the parameter is an \p error_code enum.
45
- */
46
- template<typename T> struct is_error_code_enum : public thrust::detail::false_type {};
47
-
48
- /*! A metafunction returning whether or not the parameter is an \p error_condition enum.
49
- */
50
- template<typename T> struct is_error_condition_enum : public thrust::detail::false_type {};
51
-
52
-
53
- // XXX N3092 prefers enum class errc { ... }
54
- namespace errc
55
- {
56
-
57
- /*! An enum containing common error codes.
58
- */
59
- enum errc_t
60
- {
61
- address_family_not_supported = detail::eafnosupport,
62
- address_in_use = detail::eaddrinuse,
63
- address_not_available = detail::eaddrnotavail,
64
- already_connected = detail::eisconn,
65
- argument_list_too_long = detail::e2big,
66
- argument_out_of_domain = detail::edom,
67
- bad_address = detail::efault,
68
- bad_file_descriptor = detail::ebadf,
69
- bad_message = detail::ebadmsg,
70
- broken_pipe = detail::epipe,
71
- connection_aborted = detail::econnaborted,
72
- connection_already_in_progress = detail::ealready,
73
- connection_refused = detail::econnrefused,
74
- connection_reset = detail::econnreset,
75
- cross_device_link = detail::exdev,
76
- destination_address_required = detail::edestaddrreq,
77
- device_or_resource_busy = detail::ebusy,
78
- directory_not_empty = detail::enotempty,
79
- executable_format_error = detail::enoexec,
80
- file_exists = detail::eexist,
81
- file_too_large = detail::efbig,
82
- filename_too_long = detail::enametoolong,
83
- function_not_supported = detail::enosys,
84
- host_unreachable = detail::ehostunreach,
85
- identifier_removed = detail::eidrm,
86
- illegal_byte_sequence = detail::eilseq,
87
- inappropriate_io_control_operation = detail::enotty,
88
- interrupted = detail::eintr,
89
- invalid_argument = detail::einval,
90
- invalid_seek = detail::espipe,
91
- io_error = detail::eio,
92
- is_a_directory = detail::eisdir,
93
- message_size = detail::emsgsize,
94
- network_down = detail::enetdown,
95
- network_reset = detail::enetreset,
96
- network_unreachable = detail::enetunreach,
97
- no_buffer_space = detail::enobufs,
98
- no_child_process = detail::echild,
99
- no_link = detail::enolink,
100
- no_lock_available = detail::enolck,
101
- no_message_available = detail::enodata,
102
- no_message = detail::enomsg,
103
- no_protocol_option = detail::enoprotoopt,
104
- no_space_on_device = detail::enospc,
105
- no_stream_resources = detail::enosr,
106
- no_such_device_or_address = detail::enxio,
107
- no_such_device = detail::enodev,
108
- no_such_file_or_directory = detail::enoent,
109
- no_such_process = detail::esrch,
110
- not_a_directory = detail::enotdir,
111
- not_a_socket = detail::enotsock,
112
- not_a_stream = detail::enostr,
113
- not_connected = detail::enotconn,
114
- not_enough_memory = detail::enomem,
115
- not_supported = detail::enotsup,
116
- operation_canceled = detail::ecanceled,
117
- operation_in_progress = detail::einprogress,
118
- operation_not_permitted = detail::eperm,
119
- operation_not_supported = detail::eopnotsupp,
120
- operation_would_block = detail::ewouldblock,
121
- owner_dead = detail::eownerdead,
122
- permission_denied = detail::eacces,
123
- protocol_error = detail::eproto,
124
- protocol_not_supported = detail::eprotonosupport,
125
- read_only_file_system = detail::erofs,
126
- resource_deadlock_would_occur = detail::edeadlk,
127
- resource_unavailable_try_again = detail::eagain,
128
- result_out_of_range = detail::erange,
129
- state_not_recoverable = detail::enotrecoverable,
130
- stream_timeout = detail::etime,
131
- text_file_busy = detail::etxtbsy,
132
- timed_out = detail::etimedout,
133
- too_many_files_open_in_system = detail::enfile,
134
- too_many_files_open = detail::emfile,
135
- too_many_links = detail::emlink,
136
- too_many_symbolic_link_levels = detail::eloop,
137
- value_too_large = detail::eoverflow,
138
- wrong_protocol_type = detail::eprototype
139
- }; // end errc_t
140
-
141
- } // end namespace errc
142
-
143
-
144
- /*! Specialization of \p is_error_condition_enum for \p errc::errc_t
145
- */
146
- template<> struct is_error_condition_enum<errc::errc_t> : public thrust::detail::true_type {};
147
-
148
-
149
- // [19.5.1.1] class error_category
150
-
151
- /*! \brief The class \p error_category serves as a base class for types used to identify the
152
- * source and encoding of a particular category of error code. Classes may be derived
153
- * from \p error_category to support categories of errors in addition to those defined
154
- * in the C++ International Standard.
155
- */
156
- class error_category
157
- {
158
- public:
159
- /*! Destructor does nothing.
160
- */
161
- inline virtual ~error_category(void);
162
-
163
- // XXX enable upon c++0x
164
- // error_category(const error_category &) = delete;
165
- // error_category &operator=(const error_category &) = delete;
166
-
167
- /*! \return A string naming the error category.
168
- */
169
- inline virtual const char *name(void) const = 0;
170
-
171
- /*! \return \p error_condition(ev, *this).
172
- */
173
- inline virtual error_condition default_error_condition(int ev) const;
174
-
175
- /*! \return <tt>default_error_condition(code) == condition</tt>
176
- */
177
- inline virtual bool equivalent(int code, const error_condition &condition) const;
178
-
179
- /*! \return <tt>*this == code.category() && code.value() == condition</tt>
180
- */
181
- inline virtual bool equivalent(const error_code &code, int condition) const;
182
-
183
- /*! \return A string that describes the error condition denoted by \p ev.
184
- */
185
- virtual std::string message(int ev) const = 0;
186
-
187
- /*! \return <tt>*this == &rhs</tt>
188
- */
189
- inline bool operator==(const error_category &rhs) const;
190
-
191
- /*! \return <tt>!(*this == rhs)</tt>
192
- */
193
- inline bool operator!=(const error_category &rhs) const;
194
-
195
- /*! \return <tt>less<const error_category*>()(this, &rhs)</tt>
196
- * \note \c less provides a total ordering for pointers.
197
- */
198
- inline bool operator<(const error_category &rhs) const;
199
- }; // end error_category
200
-
201
-
202
- // [19.5.1.5] error_category objects
203
-
204
-
205
- /*! \return A reference to an object of a type derived from class \p error_category.
206
- * \note The object's \p default_error_condition and \p equivalent virtual functions
207
- * shall behave as specified for the class \p error_category. The object's
208
- * \p name virtual function shall return a pointer to the string <tt>"generic"</tt>.
209
- */
210
- inline const error_category &generic_category(void);
211
-
212
-
213
- /*! \return A reference to an object of a type derived from class \p error_category.
214
- * \note The object's \p equivalent virtual functions shall behave as specified for
215
- * class \p error_category. The object's \p name virtual function shall return
216
- * a pointer to the string <tt>"system"</tt>. The object's \p default_error_condition
217
- * virtual function shall behave as follows:
218
- *
219
- * If the argument <tt>ev</tt> corresponds to a POSIX <tt>errno</tt> value
220
- * \c posv, the function shall return <tt>error_condition(ev,generic_category())</tt>.
221
- * Otherwise, the function shall return <tt>error_condition(ev,system_category())</tt>.
222
- * What constitutes correspondence for any given operating system is unspecified.
223
- */
224
- inline const error_category &system_category(void);
225
-
226
-
227
- // [19.5.2] Class error_code
228
-
229
-
230
- /*! \brief The class \p error_code describes an object used to hold error code values, such as
231
- * those originating from the operating system or other low-level application program
232
- * interfaces.
233
- */
234
- class error_code
235
- {
236
- public:
237
- // [19.5.2.2] constructors:
238
-
239
- /*! Effects: Constructs an object of type \p error_code.
240
- * \post <tt>value() == 0</tt> and <tt>category() == &system_category()</tt>.
241
- */
242
- inline error_code(void);
243
-
244
- /*! Effects: Constructs an object of type \p error_code.
245
- * \post <tt>value() == val</tt> and <tt>category() == &cat</tt>.
246
- */
247
- inline error_code(int val, const error_category &cat);
248
-
249
- /*! Effects: Constructs an object of type \p error_code.
250
- * \post <tt>*this == make_error_code(e)</tt>.
251
- */
252
- template <typename ErrorCodeEnum>
253
- error_code(ErrorCodeEnum e
254
- // XXX WAR msvc's problem with enable_if
255
- #if THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC
256
- , typename thrust::detail::enable_if<is_error_code_enum<ErrorCodeEnum>::value>::type * = 0
257
- #endif // THRUST_HOST_COMPILER_MSVC
258
- );
259
-
260
- // [19.5.2.3] modifiers:
261
-
262
- /*! \post <tt>value() == val</tt> and <tt>category() == &cat</tt>.
263
- */
264
- inline void assign(int val, const error_category &cat);
265
-
266
- /*! \post <tt>*this == make_error_code(e)</tt>.
267
- */
268
- template <typename ErrorCodeEnum>
269
- // XXX WAR msvc's problem with enable_if
270
- #if THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC
271
- typename thrust::detail::enable_if<is_error_code_enum<ErrorCodeEnum>::value, error_code>::type &
272
- #else
273
- error_code &
274
- #endif // THRUST_HOST_COMPILER_MSVC
275
- operator=(ErrorCodeEnum e);
276
-
277
- /*! \post <tt>value() == 0</tt> and <tt>category() == system_category()</tt>.
278
- */
279
- inline void clear(void);
280
-
281
- // [19.5.2.4] observers:
282
-
283
- /*! \return An integral value of this \p error_code object.
284
- */
285
- inline int value(void) const;
286
-
287
- /*! \return An \p error_category describing the category of this \p error_code object.
288
- */
289
- inline const error_category &category(void) const;
290
-
291
- /*! \return <tt>category().default_error_condition()</tt>.
292
- */
293
- inline error_condition default_error_condition(void) const;
294
-
295
- /*! \return <tt>category().message(value())</tt>.
296
- */
297
- inline std::string message(void) const;
298
-
299
- // XXX replace the below upon c++0x
300
- // inline explicit operator bool (void) const;
301
-
302
- /*! \return <tt>value() != 0</tt>.
303
- */
304
- inline operator bool (void) const;
305
-
306
- /*! \cond
307
- */
308
- private:
309
- int m_val;
310
- const error_category *m_cat;
311
- /*! \endcond
312
- */
313
- }; // end error_code
314
-
315
-
316
- // [19.5.2.5] Class error_code non-member functions
317
-
318
-
319
- // XXX replace errc::errc_t with errc upon c++0x
320
- /*! \return <tt>error_code(static_cast<int>(e), generic_category())</tt>
321
- */
322
- inline error_code make_error_code(errc::errc_t e);
323
-
324
-
325
- /*! \return <tt>lhs.category() < rhs.category() || lhs.category() == rhs.category() && lhs.value() < rhs.value()</tt>.
326
- */
327
- inline bool operator<(const error_code &lhs, const error_code &rhs);
328
-
329
-
330
- /*! Effects: <tt>os << ec.category().name() << ':' << ec.value()</tt>.
331
- */
332
- template <typename charT, typename traits>
333
- std::basic_ostream<charT,traits>&
334
- operator<<(std::basic_ostream<charT,traits>& os, const error_code &ec);
335
-
336
-
337
- // [19.5.3] class error_condition
338
-
339
-
340
- /*! \brief The class \p error_condition describes an object used to hold values identifying
341
- * error conditions.
342
- *
343
- * \note \p error_condition values are portable abstractions, while \p error_code values
344
- * are implementation specific.
345
- */
346
- class error_condition
347
- {
348
- public:
349
- // [19.5.3.2] constructors
350
-
351
- /*! Constructs an object of type \p error_condition.
352
- * \post <tt>value() == 0</tt>.
353
- * \post <tt>category() == generic_category()</tt>.
354
- */
355
- inline error_condition(void);
356
-
357
- /*! Constructs an object of type \p error_condition.
358
- * \post <tt>value() == val</tt>.
359
- * \post <tt>category() == cat</tt>.
360
- */
361
- inline error_condition(int val, const error_category &cat);
362
-
363
- /*! Constructs an object of type \p error_condition.
364
- * \post <tt>*this == make_error_condition(e)</tt>.
365
- * \note This constructor shall not participate in overload resolution unless
366
- * <tt>is_error_condition_enum<ErrorConditionEnum>::value</tt> is <tt>true</tt>.
367
- */
368
- template<typename ErrorConditionEnum>
369
- error_condition(ErrorConditionEnum e
370
- // XXX WAR msvc's problem with enable_if
371
- #if THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC
372
- , typename thrust::detail::enable_if<is_error_condition_enum<ErrorConditionEnum>::value>::type * = 0
373
- #endif // THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC
374
- );
375
-
376
- // [19.5.3.3] modifiers
377
-
378
- /*! Assigns to this \p error_code object from an error value and an \p error_category.
379
- * \param val The new value to return from <tt>value()</tt>.
380
- * \param cat The new \p error_category to return from <tt>category()</tt>.
381
- * \post <tt>value() == val</tt>.
382
- * \post <tt>category() == cat</tt>.
383
- */
384
- inline void assign(int val, const error_category &cat);
385
-
386
- /*! Assigns to this \p error_code object from an error condition enumeration.
387
- * \return *this
388
- * \post <tt>*this == make_error_condition(e)</tt>.
389
- * \note This operator shall not participate in overload resolution unless
390
- * <tt>is_error_condition_enum<ErrorConditionEnum>::value</tt> is <tt>true</tt>.
391
- */
392
- template<typename ErrorConditionEnum>
393
- // XXX WAR msvc's problem with enable_if
394
- #if THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC
395
- typename thrust::detail::enable_if<is_error_condition_enum<ErrorConditionEnum>::value, error_condition>::type &
396
- #else
397
- error_condition &
398
- #endif // THRUST_HOST_COMPILER != THRUST_HOST_COMPILER_MSVC
399
- operator=(ErrorConditionEnum e);
400
-
401
- /*! Clears this \p error_code object.
402
- * \post <tt>value == 0</tt>
403
- * \post <tt>category() == generic_category()</tt>.
404
- */
405
- inline void clear(void);
406
-
407
- // [19.5.3.4] observers
408
-
409
- /*! \return The value encoded by this \p error_condition.
410
- */
411
- inline int value(void) const;
412
-
413
- /*! \return A <tt>const</tt> reference to the \p error_category encoded by this \p error_condition.
414
- */
415
- inline const error_category &category(void) const;
416
-
417
- /*! \return <tt>category().message(value())</tt>.
418
- */
419
- inline std::string message(void) const;
420
-
421
- // XXX replace below with this upon c++0x
422
- //explicit operator bool (void) const;
423
-
424
- /*! \return <tt>value() != 0</tt>.
425
- */
426
- inline operator bool (void) const;
427
-
428
- /*! \cond
429
- */
430
-
431
- private:
432
- int m_val;
433
- const error_category *m_cat;
434
-
435
- /*! \endcond
436
- */
437
- }; // end error_condition
438
-
439
-
440
-
441
- // [19.5.3.5] Class error_condition non-member functions
442
-
443
- // XXX replace errc::errc_t with errc upon c++0x
444
- /*! \return <tt>error_condition(static_cast<int>(e), generic_category())</tt>.
445
- */
446
- inline error_condition make_error_condition(errc::errc_t e);
447
-
448
-
449
- /*! \return <tt>lhs.category() < rhs.category() || lhs.category() == rhs.category() && lhs.value() < rhs.value()</tt>.
450
- */
451
- inline bool operator<(const error_condition &lhs, const error_condition &rhs);
452
-
453
-
454
- // [19.5.4] Comparison operators
455
-
456
-
457
- /*! \return <tt>lhs.category() == rhs.category() && lhs.value() == rhs.value()</tt>.
458
- */
459
- inline bool operator==(const error_code &lhs, const error_code &rhs);
460
-
461
-
462
- /*! \return <tt>lhs.category().equivalent(lhs.value(), rhs) || rhs.category().equivalent(lhs,rhs.value())</tt>.
463
- */
464
- inline bool operator==(const error_code &lhs, const error_condition &rhs);
465
-
466
-
467
- /*! \return <tt>rhs.category().equivalent(lhs.value(), lhs) || lhs.category().equivalent(rhs, lhs.value())</tt>.
468
- */
469
- inline bool operator==(const error_condition &lhs, const error_code &rhs);
470
-
471
-
472
- /*! \return <tt>lhs.category() == rhs.category() && lhs.value() == rhs.value()</tt>
473
- */
474
- inline bool operator==(const error_condition &lhs, const error_condition &rhs);
475
-
476
-
477
- /*! \return <tt>!(lhs == rhs)</tt>
478
- */
479
- inline bool operator!=(const error_code &lhs, const error_code &rhs);
480
-
481
-
482
- /*! \return <tt>!(lhs == rhs)</tt>
483
- */
484
- inline bool operator!=(const error_code &lhs, const error_condition &rhs);
485
-
486
-
487
- /*! \return <tt>!(lhs == rhs)</tt>
488
- */
489
- inline bool operator!=(const error_condition &lhs, const error_code &rhs);
490
-
491
-
492
- /*! \return <tt>!(lhs == rhs)</tt>
493
- */
494
- inline bool operator!=(const error_condition &lhs, const error_condition &rhs);
495
-
496
- /*! \} // end system_diagnostics
497
- */
498
-
499
-
500
- } // end system
501
-
502
-
503
- // import names into thrust::
504
- using system::error_category;
505
- using system::error_code;
506
- using system::error_condition;
507
- using system::is_error_code_enum;
508
- using system::is_error_condition_enum;
509
- using system::make_error_code;
510
- using system::make_error_condition;
511
-
512
- // XXX replace with using system::errc upon c++0x
513
- namespace errc = system::errc;
514
-
515
- using system::generic_category;
516
- using system::system_category;
517
-
518
- } // end thrust
519
-
520
- #include <thrust/system/detail/error_category.inl>
521
- #include <thrust/system/detail/error_code.inl>
522
- #include <thrust/system/detail/error_condition.inl>
523
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/ui/mouse_event.py DELETED
@@ -1,129 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- import numpy as np
4
- from PyQt5.QtCore import *
5
- from PyQt5.QtGui import *
6
- from PyQt5.QtWidgets import *
7
-
8
- color_list = [
9
- QColor(0, 0, 0),
10
- QColor(255, 250, 250),
11
- QColor(220, 220, 220),
12
- QColor(250, 235, 215),
13
- QColor(255, 250, 205),
14
- QColor(211, 211, 211),
15
- QColor(70, 130, 180),
16
- QColor(127, 255, 212),
17
- QColor(0, 100, 0),
18
- QColor(50, 205, 50),
19
- QColor(255, 255, 0),
20
- QColor(245, 222, 179),
21
- QColor(255, 140, 0),
22
- QColor(255, 0, 0),
23
- QColor(16, 78, 139),
24
- QColor(144, 238, 144),
25
- QColor(50, 205, 174),
26
- QColor(50, 155, 250),
27
- QColor(160, 140, 88),
28
- QColor(213, 140, 88),
29
- QColor(90, 140, 90),
30
- QColor(185, 210, 205),
31
- QColor(130, 165, 180),
32
- QColor(225, 141, 151)
33
- ]
34
-
35
-
36
- class GraphicsScene(QGraphicsScene):
37
-
38
- def __init__(self, mode, size, parent=None):
39
- QGraphicsScene.__init__(self, parent)
40
- self.mode = mode
41
- self.size = size
42
- self.mouse_clicked = False
43
- self.prev_pt = None
44
-
45
- # self.masked_image = None
46
-
47
- # save the points
48
- self.mask_points = []
49
- for i in range(len(color_list)):
50
- self.mask_points.append([])
51
-
52
- # save the size of points
53
- self.size_points = []
54
- for i in range(len(color_list)):
55
- self.size_points.append([])
56
-
57
- # save the history of edit
58
- self.history = []
59
-
60
- def reset(self):
61
- # save the points
62
- self.mask_points = []
63
- for i in range(len(color_list)):
64
- self.mask_points.append([])
65
- # save the size of points
66
- self.size_points = []
67
- for i in range(len(color_list)):
68
- self.size_points.append([])
69
- # save the history of edit
70
- self.history = []
71
-
72
- self.mode = 0
73
- self.prev_pt = None
74
-
75
- def mousePressEvent(self, event):
76
- self.mouse_clicked = True
77
-
78
- def mouseReleaseEvent(self, event):
79
- self.prev_pt = None
80
- self.mouse_clicked = False
81
-
82
- def mouseMoveEvent(self, event): # drawing
83
- if self.mouse_clicked:
84
- if self.prev_pt:
85
- self.drawMask(self.prev_pt, event.scenePos(),
86
- color_list[self.mode], self.size)
87
- pts = {}
88
- pts['prev'] = (int(self.prev_pt.x()), int(self.prev_pt.y()))
89
- pts['curr'] = (int(event.scenePos().x()),
90
- int(event.scenePos().y()))
91
-
92
- self.size_points[self.mode].append(self.size)
93
- self.mask_points[self.mode].append(pts)
94
- self.history.append(self.mode)
95
- self.prev_pt = event.scenePos()
96
- else:
97
- self.prev_pt = event.scenePos()
98
-
99
- def drawMask(self, prev_pt, curr_pt, color, size):
100
- lineItem = QGraphicsLineItem(QLineF(prev_pt, curr_pt))
101
- lineItem.setPen(QPen(color, size, Qt.SolidLine)) # rect
102
- self.addItem(lineItem)
103
-
104
- def erase_prev_pt(self):
105
- self.prev_pt = None
106
-
107
- def reset_items(self):
108
- for i in range(len(self.items())):
109
- item = self.items()[0]
110
- self.removeItem(item)
111
-
112
- def undo(self):
113
- if len(self.items()) > 1:
114
- if len(self.items()) >= 9:
115
- for i in range(8):
116
- item = self.items()[0]
117
- self.removeItem(item)
118
- if self.history[-1] == self.mode:
119
- self.mask_points[self.mode].pop()
120
- self.size_points[self.mode].pop()
121
- self.history.pop()
122
- else:
123
- for i in range(len(self.items()) - 1):
124
- item = self.items()[0]
125
- self.removeItem(item)
126
- if self.history[-1] == self.mode:
127
- self.mask_points[self.mode].pop()
128
- self.size_points[self.mode].pop()
129
- self.history.pop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py DELETED
@@ -1,83 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
-
3
- import torch
4
- import torch.nn as nn
5
- from mmcv import ops
6
-
7
-
8
- class BaseRoIExtractor(nn.Module, metaclass=ABCMeta):
9
- """Base class for RoI extractor.
10
-
11
- Args:
12
- roi_layer (dict): Specify RoI layer type and arguments.
13
- out_channels (int): Output channels of RoI layers.
14
- featmap_strides (List[int]): Strides of input feature maps.
15
- """
16
-
17
- def __init__(self, roi_layer, out_channels, featmap_strides):
18
- super(BaseRoIExtractor, self).__init__()
19
- self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
20
- self.out_channels = out_channels
21
- self.featmap_strides = featmap_strides
22
- self.fp16_enabled = False
23
-
24
- @property
25
- def num_inputs(self):
26
- """int: Number of input feature maps."""
27
- return len(self.featmap_strides)
28
-
29
- def init_weights(self):
30
- pass
31
-
32
- def build_roi_layers(self, layer_cfg, featmap_strides):
33
- """Build RoI operator to extract feature from each level feature map.
34
-
35
- Args:
36
- layer_cfg (dict): Dictionary to construct and config RoI layer
37
- operation. Options are modules under ``mmcv/ops`` such as
38
- ``RoIAlign``.
39
- featmap_strides (List[int]): The stride of input feature map w.r.t
40
- to the original image size, which would be used to scale RoI
41
- coordinate (original image coordinate system) to feature
42
- coordinate system.
43
-
44
- Returns:
45
- nn.ModuleList: The RoI extractor modules for each level feature
46
- map.
47
- """
48
-
49
- cfg = layer_cfg.copy()
50
- layer_type = cfg.pop('type')
51
- assert hasattr(ops, layer_type)
52
- layer_cls = getattr(ops, layer_type)
53
- roi_layers = nn.ModuleList(
54
- [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
55
- return roi_layers
56
-
57
- def roi_rescale(self, rois, scale_factor):
58
- """Scale RoI coordinates by scale factor.
59
-
60
- Args:
61
- rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
62
- scale_factor (float): Scale factor that RoI will be multiplied by.
63
-
64
- Returns:
65
- torch.Tensor: Scaled RoI.
66
- """
67
-
68
- cx = (rois[:, 1] + rois[:, 3]) * 0.5
69
- cy = (rois[:, 2] + rois[:, 4]) * 0.5
70
- w = rois[:, 3] - rois[:, 1]
71
- h = rois[:, 4] - rois[:, 2]
72
- new_w = w * scale_factor
73
- new_h = h * scale_factor
74
- x1 = cx - new_w * 0.5
75
- x2 = cx + new_w * 0.5
76
- y1 = cy - new_h * 0.5
77
- y2 = cy + new_h * 0.5
78
- new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
79
- return new_rois
80
-
81
- @abstractmethod
82
- def forward(self, feats, rois, roi_scale_factor=None):
83
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cat125/text-generator-v2/classes.py DELETED
@@ -1,49 +0,0 @@
1
- from random import choice
2
-
3
- import pymorphy3
4
-
5
- morph = pymorphy3.MorphAnalyzer()
6
-
7
- # The Token class takes in a word, previous word, text, sentence, and a boolean value and creates a
8
- # token object with attributes such as count, score, and contexts.
9
- class Token:
10
- def __init__(self, word, prev_word, text, sentence, starter = False, turbo = False):
11
- """
12
- This function initializes a Token with various properties related to a given word and its context
13
- within a sentence.
14
-
15
- :param word: The current word being analyzed
16
- :param prev_word: The word that comes before the current word in the text
17
- :param text: a string containing the entire text to be analyzed
18
- :param sentence: a string representing a sentence in which the word and prev_word occur
19
- :param turbo: A boolean parameter that, when set to True, skips the morphological analysis of words
20
- in the sentence and simply adds all words to the context list. This can be useful for faster
21
- processing, but may result in less accurate context information, defaults to False (optional)
22
- """
23
- self.word = word
24
- self.prev_word = prev_word
25
- self.count = text.count(prev_word + " " + word)
26
- self.score = 0
27
- self.starter = starter
28
- self.contexts = []
29
- for w in sentence.strip().split():
30
- if turbo:
31
- self.contexts.append(w)
32
- continue
33
- result = morph.parse(w)
34
- if len(result) == 0:
35
- continue
36
- result = result[0]
37
- if 'LATN' in result.tag:
38
- continue
39
- if result.tag.POS == 'NOUN':
40
- self.contexts.append(w)
41
- self.contexts.append(result.normal_form)
42
-
43
- def __repr__(self):
44
- """
45
- This function returns a string representation of a Token with information about the previous
46
- word, current word, number of matches, and number of contexts.
47
- :return: A string representation of a Token.
48
- """
49
- return f"'{self.prev_word} > {self.word} ({'starter, ' if self.starter else ''}{self.count}m, {len(self.contexts)}c)'"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/Dockerfile DELETED
@@ -1,27 +0,0 @@
1
- # Dockerfile
2
-
3
- # The first instruction is what image we want to base our container on
4
- # We Use an official Python runtime as a parent image
5
- FROM python:3.11.5
6
-
7
- # copy and mount application code to image
8
- RUN mkdir -p /code
9
- VOLUME /data:/code
10
- RUN chmod -R 777 /code/
11
- COPY . code
12
- WORKDIR /code
13
- RUN chmod -R 777 /code/
14
-
15
- ENV HF_HOME=/code/.huggingface
16
-
17
- # Allows docker to cache installed dependencies between builds
18
- COPY requirements.txt requirements.txt
19
- RUN pip install -r requirements.txt
20
- # add --no-cache-dir as a parameter to install requirements without using cache
21
-
22
- EXPOSE 7860
23
- # CMD ["/launch.sh"]
24
-
25
- # runs the production server
26
- ENTRYPOINT ["python", "mysite/manage.py"]
27
- CMD ["runserver", "0.0.0.0:7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/forbid/__init__.py DELETED
@@ -1,22 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from meme_generator import add_meme
5
- from meme_generator.utils import make_jpg_or_gif
6
- from pil_utils import BuildImage
7
-
8
- img_dir = Path(__file__).parent / "images"
9
-
10
-
11
- def forbid(images: List[BuildImage], texts, args):
12
- frame = BuildImage.open(img_dir / "0.png")
13
-
14
- def make(img: BuildImage) -> BuildImage:
15
- return frame.copy().paste(
16
- img.resize((304, 324), keep_ratio=True), (0, 0), below=True
17
- )
18
-
19
- return make_jpg_or_gif(images[0], make)
20
-
21
-
22
- add_meme("forbid", forbid, min_images=1, max_images=1, keywords=["禁止", "禁"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/http.py DELETED
@@ -1,862 +0,0 @@
1
- from __future__ import absolute_import, division, print_function
2
-
3
- import asyncio
4
- import io
5
- import logging
6
- import re
7
- import weakref
8
- from copy import copy
9
- from urllib.parse import urlparse
10
-
11
- import aiohttp
12
- import requests
13
- import yarl
14
-
15
- from fsspec.asyn import AbstractAsyncStreamedFile, AsyncFileSystem, sync, sync_wrapper
16
- from fsspec.callbacks import _DEFAULT_CALLBACK
17
- from fsspec.exceptions import FSTimeoutError
18
- from fsspec.spec import AbstractBufferedFile
19
- from fsspec.utils import DEFAULT_BLOCK_SIZE, isfilelike, nullcontext, tokenize
20
-
21
- from ..caching import AllBytes
22
-
23
- # https://stackoverflow.com/a/15926317/3821154
24
- ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P<url>[^"']+)""")
25
- ex2 = re.compile(r"""(?P<url>http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
26
- logger = logging.getLogger("fsspec.http")
27
-
28
-
29
- async def get_client(**kwargs):
30
- return aiohttp.ClientSession(**kwargs)
31
-
32
-
33
- class HTTPFileSystem(AsyncFileSystem):
34
- """
35
- Simple File-System for fetching data via HTTP(S)
36
-
37
- ``ls()`` is implemented by loading the parent page and doing a regex
38
- match on the result. If simple_link=True, anything of the form
39
- "http(s)://server.com/stuff?thing=other"; otherwise only links within
40
- HTML href tags will be used.
41
- """
42
-
43
- sep = "/"
44
-
45
- def __init__(
46
- self,
47
- simple_links=True,
48
- block_size=None,
49
- same_scheme=True,
50
- size_policy=None,
51
- cache_type="bytes",
52
- cache_options=None,
53
- asynchronous=False,
54
- loop=None,
55
- client_kwargs=None,
56
- get_client=get_client,
57
- encoded=False,
58
- **storage_options,
59
- ):
60
- """
61
- NB: if this is called async, you must await set_client
62
-
63
- Parameters
64
- ----------
65
- block_size: int
66
- Blocks to read bytes; if 0, will default to raw requests file-like
67
- objects instead of HTTPFile instances
68
- simple_links: bool
69
- If True, will consider both HTML <a> tags and anything that looks
70
- like a URL; if False, will consider only the former.
71
- same_scheme: True
72
- When doing ls/glob, if this is True, only consider paths that have
73
- http/https matching the input URLs.
74
- size_policy: this argument is deprecated
75
- client_kwargs: dict
76
- Passed to aiohttp.ClientSession, see
77
- https://docs.aiohttp.org/en/stable/client_reference.html
78
- For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
79
- get_client: Callable[..., aiohttp.ClientSession]
80
- A callable which takes keyword arguments and constructs
81
- an aiohttp.ClientSession. It's state will be managed by
82
- the HTTPFileSystem class.
83
- storage_options: key-value
84
- Any other parameters passed on to requests
85
- cache_type, cache_options: defaults used in open
86
- """
87
- super().__init__(self, asynchronous=asynchronous, loop=loop, **storage_options)
88
- self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
89
- self.simple_links = simple_links
90
- self.same_schema = same_scheme
91
- self.cache_type = cache_type
92
- self.cache_options = cache_options
93
- self.client_kwargs = client_kwargs or {}
94
- self.get_client = get_client
95
- self.encoded = encoded
96
- self.kwargs = storage_options
97
- self._session = None
98
-
99
- # Clean caching-related parameters from `storage_options`
100
- # before propagating them as `request_options` through `self.kwargs`.
101
- # TODO: Maybe rename `self.kwargs` to `self.request_options` to make
102
- # it clearer.
103
- request_options = copy(storage_options)
104
- self.use_listings_cache = request_options.pop("use_listings_cache", False)
105
- request_options.pop("listings_expiry_time", None)
106
- request_options.pop("max_paths", None)
107
- request_options.pop("skip_instance_cache", None)
108
- self.kwargs = request_options
109
-
110
- @property
111
- def fsid(self):
112
- return "http"
113
-
114
- def encode_url(self, url):
115
- return yarl.URL(url, encoded=self.encoded)
116
-
117
- @staticmethod
118
- def close_session(loop, session):
119
- if loop is not None and loop.is_running():
120
- try:
121
- sync(loop, session.close, timeout=0.1)
122
- return
123
- except (TimeoutError, FSTimeoutError):
124
- pass
125
- connector = getattr(session, "_connector", None)
126
- if connector is not None:
127
- # close after loop is dead
128
- connector._close()
129
-
130
- async def set_session(self):
131
- if self._session is None:
132
- self._session = await self.get_client(loop=self.loop, **self.client_kwargs)
133
- if not self.asynchronous:
134
- weakref.finalize(self, self.close_session, self.loop, self._session)
135
- return self._session
136
-
137
- @classmethod
138
- def _strip_protocol(cls, path):
139
- """For HTTP, we always want to keep the full URL"""
140
- return path
141
-
142
- @classmethod
143
- def _parent(cls, path):
144
- # override, since _strip_protocol is different for URLs
145
- par = super()._parent(path)
146
- if len(par) > 7: # "http://..."
147
- return par
148
- return ""
149
-
150
- async def _ls_real(self, url, detail=True, **kwargs):
151
- # ignoring URL-encoded arguments
152
- kw = self.kwargs.copy()
153
- kw.update(kwargs)
154
- logger.debug(url)
155
- session = await self.set_session()
156
- async with session.get(self.encode_url(url), **self.kwargs) as r:
157
- self._raise_not_found_for_status(r, url)
158
- text = await r.text()
159
- if self.simple_links:
160
- links = ex2.findall(text) + [u[2] for u in ex.findall(text)]
161
- else:
162
- links = [u[2] for u in ex.findall(text)]
163
- out = set()
164
- parts = urlparse(url)
165
- for l in links:
166
- if isinstance(l, tuple):
167
- l = l[1]
168
- if l.startswith("/") and len(l) > 1:
169
- # absolute URL on this server
170
- l = parts.scheme + "://" + parts.netloc + l
171
- if l.startswith("http"):
172
- if self.same_schema and l.startswith(url.rstrip("/") + "/"):
173
- out.add(l)
174
- elif l.replace("https", "http").startswith(
175
- url.replace("https", "http").rstrip("/") + "/"
176
- ):
177
- # allowed to cross http <-> https
178
- out.add(l)
179
- else:
180
- if l not in ["..", "../"]:
181
- # Ignore FTP-like "parent"
182
- out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
183
- if not out and url.endswith("/"):
184
- out = await self._ls_real(url.rstrip("/"), detail=False)
185
- if detail:
186
- return [
187
- {
188
- "name": u,
189
- "size": None,
190
- "type": "directory" if u.endswith("/") else "file",
191
- }
192
- for u in out
193
- ]
194
- else:
195
- return list(sorted(out))
196
-
197
- async def _ls(self, url, detail=True, **kwargs):
198
-
199
- if self.use_listings_cache and url in self.dircache:
200
- out = self.dircache[url]
201
- else:
202
- out = await self._ls_real(url, detail=detail, **kwargs)
203
- self.dircache[url] = out
204
- return out
205
-
206
- ls = sync_wrapper(_ls)
207
-
208
- def _raise_not_found_for_status(self, response, url):
209
- """
210
- Raises FileNotFoundError for 404s, otherwise uses raise_for_status.
211
- """
212
- if response.status == 404:
213
- raise FileNotFoundError(url)
214
- response.raise_for_status()
215
-
216
- async def _cat_file(self, url, start=None, end=None, **kwargs):
217
- kw = self.kwargs.copy()
218
- kw.update(kwargs)
219
- logger.debug(url)
220
-
221
- if start is not None or end is not None:
222
- if start == end:
223
- return b""
224
- headers = kw.pop("headers", {}).copy()
225
-
226
- headers["Range"] = await self._process_limits(url, start, end)
227
- kw["headers"] = headers
228
- session = await self.set_session()
229
- async with session.get(self.encode_url(url), **kw) as r:
230
- out = await r.read()
231
- self._raise_not_found_for_status(r, url)
232
- return out
233
-
234
- async def _get_file(
235
- self, rpath, lpath, chunk_size=5 * 2**20, callback=_DEFAULT_CALLBACK, **kwargs
236
- ):
237
- kw = self.kwargs.copy()
238
- kw.update(kwargs)
239
- logger.debug(rpath)
240
- session = await self.set_session()
241
- async with session.get(self.encode_url(rpath), **kw) as r:
242
- try:
243
- size = int(r.headers["content-length"])
244
- except (ValueError, KeyError):
245
- size = None
246
-
247
- callback.set_size(size)
248
- self._raise_not_found_for_status(r, rpath)
249
- if isfilelike(lpath):
250
- outfile = lpath
251
- else:
252
- outfile = open(lpath, "wb")
253
-
254
- try:
255
- chunk = True
256
- while chunk:
257
- chunk = await r.content.read(chunk_size)
258
- outfile.write(chunk)
259
- callback.relative_update(len(chunk))
260
- finally:
261
- if not isfilelike(lpath):
262
- outfile.close()
263
-
264
- async def _put_file(
265
- self,
266
- lpath,
267
- rpath,
268
- chunk_size=5 * 2**20,
269
- callback=_DEFAULT_CALLBACK,
270
- method="post",
271
- **kwargs,
272
- ):
273
- async def gen_chunks():
274
- # Support passing arbitrary file-like objects
275
- # and use them instead of streams.
276
- if isinstance(lpath, io.IOBase):
277
- context = nullcontext(lpath)
278
- use_seek = False # might not support seeking
279
- else:
280
- context = open(lpath, "rb")
281
- use_seek = True
282
-
283
- with context as f:
284
- if use_seek:
285
- callback.set_size(f.seek(0, 2))
286
- f.seek(0)
287
- else:
288
- callback.set_size(getattr(f, "size", None))
289
-
290
- chunk = f.read(chunk_size)
291
- while chunk:
292
- yield chunk
293
- callback.relative_update(len(chunk))
294
- chunk = f.read(chunk_size)
295
-
296
- kw = self.kwargs.copy()
297
- kw.update(kwargs)
298
- session = await self.set_session()
299
-
300
- method = method.lower()
301
- if method not in ("post", "put"):
302
- raise ValueError(
303
- f"method has to be either 'post' or 'put', not: {method!r}"
304
- )
305
-
306
- meth = getattr(session, method)
307
- async with meth(rpath, data=gen_chunks(), **kw) as resp:
308
- self._raise_not_found_for_status(resp, rpath)
309
-
310
- async def _exists(self, path, **kwargs):
311
- kw = self.kwargs.copy()
312
- kw.update(kwargs)
313
- try:
314
- logger.debug(path)
315
- session = await self.set_session()
316
- r = await session.get(self.encode_url(path), **kw)
317
- async with r:
318
- return r.status < 400
319
- except (requests.HTTPError, aiohttp.ClientError):
320
- return False
321
-
322
- async def _isfile(self, path, **kwargs):
323
- return await self._exists(path, **kwargs)
324
-
325
- def _open(
326
- self,
327
- path,
328
- mode="rb",
329
- block_size=None,
330
- autocommit=None, # XXX: This differs from the base class.
331
- cache_type=None,
332
- cache_options=None,
333
- size=None,
334
- **kwargs,
335
- ):
336
- """Make a file-like object
337
-
338
- Parameters
339
- ----------
340
- path: str
341
- Full URL with protocol
342
- mode: string
343
- must be "rb"
344
- block_size: int or None
345
- Bytes to download in one request; use instance value if None. If
346
- zero, will return a streaming Requests file-like instance.
347
- kwargs: key-value
348
- Any other parameters, passed to requests calls
349
- """
350
- if mode != "rb":
351
- raise NotImplementedError
352
- block_size = block_size if block_size is not None else self.block_size
353
- kw = self.kwargs.copy()
354
- kw["asynchronous"] = self.asynchronous
355
- kw.update(kwargs)
356
- size = size or self.info(path, **kwargs)["size"]
357
- session = sync(self.loop, self.set_session)
358
- if block_size and size:
359
- return HTTPFile(
360
- self,
361
- path,
362
- session=session,
363
- block_size=block_size,
364
- mode=mode,
365
- size=size,
366
- cache_type=cache_type or self.cache_type,
367
- cache_options=cache_options or self.cache_options,
368
- loop=self.loop,
369
- **kw,
370
- )
371
- else:
372
- return HTTPStreamFile(
373
- self,
374
- path,
375
- mode=mode,
376
- loop=self.loop,
377
- session=session,
378
- **kw,
379
- )
380
-
381
- async def open_async(self, path, mode="rb", size=None, **kwargs):
382
- session = await self.set_session()
383
- if size is None:
384
- try:
385
- size = (await self._info(path, **kwargs))["size"]
386
- except FileNotFoundError:
387
- pass
388
- return AsyncStreamFile(
389
- self,
390
- path,
391
- loop=self.loop,
392
- session=session,
393
- size=size,
394
- **kwargs,
395
- )
396
-
397
- def ukey(self, url):
398
- """Unique identifier; assume HTTP files are static, unchanging"""
399
- return tokenize(url, self.kwargs, self.protocol)
400
-
401
- async def _info(self, url, **kwargs):
402
- """Get info of URL
403
-
404
- Tries to access location via HEAD, and then GET methods, but does
405
- not fetch the data.
406
-
407
- It is possible that the server does not supply any size information, in
408
- which case size will be given as None (and certain operations on the
409
- corresponding file will not work).
410
- """
411
- info = {}
412
- session = await self.set_session()
413
-
414
- for policy in ["head", "get"]:
415
- try:
416
- info.update(
417
- await _file_info(
418
- self.encode_url(url),
419
- size_policy=policy,
420
- session=session,
421
- **self.kwargs,
422
- **kwargs,
423
- )
424
- )
425
- if info.get("size") is not None:
426
- break
427
- except Exception as exc:
428
- if policy == "get":
429
- # If get failed, then raise a FileNotFoundError
430
- raise FileNotFoundError(url) from exc
431
- logger.debug(str(exc))
432
-
433
- return {"name": url, "size": None, **info, "type": "file"}
434
-
435
- async def _glob(self, path, **kwargs):
436
- """
437
- Find files by glob-matching.
438
-
439
- This implementation is idntical to the one in AbstractFileSystem,
440
- but "?" is not considered as a character for globbing, because it is
441
- so common in URLs, often identifying the "query" part.
442
- """
443
- import re
444
-
445
- ends = path.endswith("/")
446
- path = self._strip_protocol(path)
447
- indstar = path.find("*") if path.find("*") >= 0 else len(path)
448
- indbrace = path.find("[") if path.find("[") >= 0 else len(path)
449
-
450
- ind = min(indstar, indbrace)
451
-
452
- detail = kwargs.pop("detail", False)
453
-
454
- if not has_magic(path):
455
- root = path
456
- depth = 1
457
- if ends:
458
- path += "/*"
459
- elif await self._exists(path):
460
- if not detail:
461
- return [path]
462
- else:
463
- return {path: await self._info(path)}
464
- else:
465
- if not detail:
466
- return [] # glob of non-existent returns empty
467
- else:
468
- return {}
469
- elif "/" in path[:ind]:
470
- ind2 = path[:ind].rindex("/")
471
- root = path[: ind2 + 1]
472
- depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
473
- else:
474
- root = ""
475
- depth = None if "**" in path else path[ind + 1 :].count("/") + 1
476
-
477
- allpaths = await self._find(
478
- root, maxdepth=depth, withdirs=True, detail=True, **kwargs
479
- )
480
- # Escape characters special to python regex, leaving our supported
481
- # special characters in place.
482
- # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
483
- # for shell globbing details.
484
- pattern = (
485
- "^"
486
- + (
487
- path.replace("\\", r"\\")
488
- .replace(".", r"\.")
489
- .replace("+", r"\+")
490
- .replace("//", "/")
491
- .replace("(", r"\(")
492
- .replace(")", r"\)")
493
- .replace("|", r"\|")
494
- .replace("^", r"\^")
495
- .replace("$", r"\$")
496
- .replace("{", r"\{")
497
- .replace("}", r"\}")
498
- .rstrip("/")
499
- )
500
- + "$"
501
- )
502
- pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
503
- pattern = re.sub("[*]", "[^/]*", pattern)
504
- pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
505
- out = {
506
- p: allpaths[p]
507
- for p in sorted(allpaths)
508
- if pattern.match(p.replace("//", "/").rstrip("/"))
509
- }
510
- if detail:
511
- return out
512
- else:
513
- return list(out)
514
-
515
- async def _isdir(self, path):
516
- # override, since all URLs are (also) files
517
- try:
518
- return bool(await self._ls(path))
519
- except (FileNotFoundError, ValueError):
520
- return False
521
-
522
-
523
- class HTTPFile(AbstractBufferedFile):
524
- """
525
- A file-like object pointing to a remove HTTP(S) resource
526
-
527
- Supports only reading, with read-ahead of a predermined block-size.
528
-
529
- In the case that the server does not supply the filesize, only reading of
530
- the complete file in one go is supported.
531
-
532
- Parameters
533
- ----------
534
- url: str
535
- Full URL of the remote resource, including the protocol
536
- session: requests.Session or None
537
- All calls will be made within this session, to avoid restarting
538
- connections where the server allows this
539
- block_size: int or None
540
- The amount of read-ahead to do, in bytes. Default is 5MB, or the value
541
- configured for the FileSystem creating this file
542
- size: None or int
543
- If given, this is the size of the file in bytes, and we don't attempt
544
- to call the server to find the value.
545
- kwargs: all other key-values are passed to requests calls.
546
- """
547
-
548
- def __init__(
549
- self,
550
- fs,
551
- url,
552
- session=None,
553
- block_size=None,
554
- mode="rb",
555
- cache_type="bytes",
556
- cache_options=None,
557
- size=None,
558
- loop=None,
559
- asynchronous=False,
560
- **kwargs,
561
- ):
562
- if mode != "rb":
563
- raise NotImplementedError("File mode not supported")
564
- self.asynchronous = asynchronous
565
- self.url = url
566
- self.session = session
567
- self.details = {"name": url, "size": size, "type": "file"}
568
- super().__init__(
569
- fs=fs,
570
- path=url,
571
- mode=mode,
572
- block_size=block_size,
573
- cache_type=cache_type,
574
- cache_options=cache_options,
575
- **kwargs,
576
- )
577
- self.loop = loop
578
-
579
- def read(self, length=-1):
580
- """Read bytes from file
581
-
582
- Parameters
583
- ----------
584
- length: int
585
- Read up to this many bytes. If negative, read all content to end of
586
- file. If the server has not supplied the filesize, attempting to
587
- read only part of the data will raise a ValueError.
588
- """
589
- if (
590
- (length < 0 and self.loc == 0) # explicit read all
591
- # but not when the size is known and fits into a block anyways
592
- and not (self.size is not None and self.size <= self.blocksize)
593
- ):
594
- self._fetch_all()
595
- if self.size is None:
596
- if length < 0:
597
- self._fetch_all()
598
- else:
599
- length = min(self.size - self.loc, length)
600
- return super().read(length)
601
-
602
- async def async_fetch_all(self):
603
- """Read whole file in one shot, without caching
604
-
605
- This is only called when position is still at zero,
606
- and read() is called without a byte-count.
607
- """
608
- logger.debug(f"Fetch all for {self}")
609
- if not isinstance(self.cache, AllBytes):
610
- r = await self.session.get(self.fs.encode_url(self.url), **self.kwargs)
611
- async with r:
612
- r.raise_for_status()
613
- out = await r.read()
614
- self.cache = AllBytes(
615
- size=len(out), fetcher=None, blocksize=None, data=out
616
- )
617
- self.size = len(out)
618
-
619
- _fetch_all = sync_wrapper(async_fetch_all)
620
-
621
- def _parse_content_range(self, headers):
622
- """Parse the Content-Range header"""
623
- s = headers.get("Content-Range", "")
624
- m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s)
625
- if not m:
626
- return None, None, None
627
-
628
- if m[1] == "*":
629
- start = end = None
630
- else:
631
- start, end = [int(x) for x in m[1].split("-")]
632
- total = None if m[2] == "*" else int(m[2])
633
- return start, end, total
634
-
635
- async def async_fetch_range(self, start, end):
636
- """Download a block of data
637
-
638
- The expectation is that the server returns only the requested bytes,
639
- with HTTP code 206. If this is not the case, we first check the headers,
640
- and then stream the output - if the data size is bigger than we
641
- requested, an exception is raised.
642
- """
643
- logger.debug(f"Fetch range for {self}: {start}-{end}")
644
- kwargs = self.kwargs.copy()
645
- headers = kwargs.pop("headers", {}).copy()
646
- headers["Range"] = "bytes=%i-%i" % (start, end - 1)
647
- logger.debug(str(self.url) + " : " + headers["Range"])
648
- r = await self.session.get(
649
- self.fs.encode_url(self.url), headers=headers, **kwargs
650
- )
651
- async with r:
652
- if r.status == 416:
653
- # range request outside file
654
- return b""
655
- r.raise_for_status()
656
-
657
- # If the server has handled the range request, it should reply
658
- # with status 206 (partial content). But we'll guess that a suitable
659
- # Content-Range header or a Content-Length no more than the
660
- # requested range also mean we have got the desired range.
661
- response_is_range = (
662
- r.status == 206
663
- or self._parse_content_range(r.headers)[0] == start
664
- or int(r.headers.get("Content-Length", end + 1)) <= end - start
665
- )
666
-
667
- if response_is_range:
668
- # partial content, as expected
669
- out = await r.read()
670
- elif start > 0:
671
- raise ValueError(
672
- "The HTTP server doesn't appear to support range requests. "
673
- "Only reading this file from the beginning is supported. "
674
- "Open with block_size=0 for a streaming file interface."
675
- )
676
- else:
677
- # Response is not a range, but we want the start of the file,
678
- # so we can read the required amount anyway.
679
- cl = 0
680
- out = []
681
- while True:
682
- chunk = await r.content.read(2**20)
683
- # data size unknown, let's read until we have enough
684
- if chunk:
685
- out.append(chunk)
686
- cl += len(chunk)
687
- if cl > end - start:
688
- break
689
- else:
690
- break
691
- out = b"".join(out)[: end - start]
692
- return out
693
-
694
- _fetch_range = sync_wrapper(async_fetch_range)
695
-
696
- def __reduce__(self):
697
- return (
698
- reopen,
699
- (
700
- self.fs,
701
- self.url,
702
- self.mode,
703
- self.blocksize,
704
- self.cache.name if self.cache else "none",
705
- self.size,
706
- ),
707
- )
708
-
709
-
710
- def reopen(fs, url, mode, blocksize, cache_type, size=None):
711
- return fs.open(
712
- url, mode=mode, block_size=blocksize, cache_type=cache_type, size=size
713
- )
714
-
715
-
716
- magic_check = re.compile("([*[])")
717
-
718
-
719
- def has_magic(s):
720
- match = magic_check.search(s)
721
- return match is not None
722
-
723
-
724
- class HTTPStreamFile(AbstractBufferedFile):
725
- def __init__(self, fs, url, mode="rb", loop=None, session=None, **kwargs):
726
- self.asynchronous = kwargs.pop("asynchronous", False)
727
- self.url = url
728
- self.loop = loop
729
- self.session = session
730
- if mode != "rb":
731
- raise ValueError
732
- self.details = {"name": url, "size": None}
733
- super().__init__(fs=fs, path=url, mode=mode, cache_type="none", **kwargs)
734
-
735
- async def cor():
736
- r = await self.session.get(self.fs.encode_url(url), **kwargs).__aenter__()
737
- self.fs._raise_not_found_for_status(r, url)
738
- return r
739
-
740
- self.r = sync(self.loop, cor)
741
-
742
- def seek(self, loc, whence=0):
743
- if loc == 0 and whence == 1:
744
- return
745
- if loc == self.loc and whence == 0:
746
- return
747
- raise ValueError("Cannot seek streaming HTTP file")
748
-
749
- async def _read(self, num=-1):
750
- out = await self.r.content.read(num)
751
- self.loc += len(out)
752
- return out
753
-
754
- read = sync_wrapper(_read)
755
-
756
- async def _close(self):
757
- self.r.close()
758
-
759
- def close(self):
760
- asyncio.run_coroutine_threadsafe(self._close(), self.loop)
761
- super().close()
762
-
763
- def __reduce__(self):
764
- return reopen, (self.fs, self.url, self.mode, self.blocksize, self.cache.name)
765
-
766
-
767
- class AsyncStreamFile(AbstractAsyncStreamedFile):
768
- def __init__(
769
- self, fs, url, mode="rb", loop=None, session=None, size=None, **kwargs
770
- ):
771
- self.url = url
772
- self.session = session
773
- self.r = None
774
- if mode != "rb":
775
- raise ValueError
776
- self.details = {"name": url, "size": None}
777
- self.kwargs = kwargs
778
- super().__init__(fs=fs, path=url, mode=mode, cache_type="none")
779
- self.size = size
780
-
781
- async def read(self, num=-1):
782
- if self.r is None:
783
- r = await self.session.get(
784
- self.fs.encode_url(self.url), **self.kwargs
785
- ).__aenter__()
786
- self.fs._raise_not_found_for_status(r, self.url)
787
- self.r = r
788
- out = await self.r.content.read(num)
789
- self.loc += len(out)
790
- return out
791
-
792
- async def close(self):
793
- if self.r is not None:
794
- self.r.close()
795
- self.r = None
796
- await super().close()
797
-
798
-
799
- async def get_range(session, url, start, end, file=None, **kwargs):
800
- # explicit get a range when we know it must be safe
801
- kwargs = kwargs.copy()
802
- headers = kwargs.pop("headers", {}).copy()
803
- headers["Range"] = "bytes=%i-%i" % (start, end - 1)
804
- r = await session.get(url, headers=headers, **kwargs)
805
- r.raise_for_status()
806
- async with r:
807
- out = await r.read()
808
- if file:
809
- with open(file, "rb+") as f:
810
- f.seek(start)
811
- f.write(out)
812
- else:
813
- return out
814
-
815
-
816
- async def _file_info(url, session, size_policy="head", **kwargs):
817
- """Call HEAD on the server to get details about the file (size/checksum etc.)
818
-
819
- Default operation is to explicitly allow redirects and use encoding
820
- 'identity' (no compression) to get the true size of the target.
821
- """
822
- logger.debug("Retrieve file size for %s" % url)
823
- kwargs = kwargs.copy()
824
- ar = kwargs.pop("allow_redirects", True)
825
- head = kwargs.get("headers", {}).copy()
826
- head["Accept-Encoding"] = "identity"
827
- kwargs["headers"] = head
828
-
829
- info = {}
830
- if size_policy == "head":
831
- r = await session.head(url, allow_redirects=ar, **kwargs)
832
- elif size_policy == "get":
833
- r = await session.get(url, allow_redirects=ar, **kwargs)
834
- else:
835
- raise TypeError('size_policy must be "head" or "get", got %s' "" % size_policy)
836
- async with r:
837
- r.raise_for_status()
838
-
839
- # TODO:
840
- # recognise lack of 'Accept-Ranges',
841
- # or 'Accept-Ranges': 'none' (not 'bytes')
842
- # to mean streaming only, no random access => return None
843
- if "Content-Length" in r.headers:
844
- info["size"] = int(r.headers["Content-Length"])
845
- elif "Content-Range" in r.headers:
846
- info["size"] = int(r.headers["Content-Range"].split("/")[1])
847
-
848
- for checksum_field in ["ETag", "Content-MD5", "Digest"]:
849
- if r.headers.get(checksum_field):
850
- info[checksum_field] = r.headers[checksum_field]
851
-
852
- return info
853
-
854
-
855
- async def _file_size(url, session=None, *args, **kwargs):
856
- if session is None:
857
- session = await get_client()
858
- info = await _file_info(url, session=session, *args, **kwargs)
859
- return info.get("size")
860
-
861
-
862
- file_size = sync_wrapper(_file_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/01_🎥_Input_YouTube_Link.py DELETED
@@ -1,258 +0,0 @@
1
- import whisper
2
- from pytube import YouTube
3
- import requests
4
- import time
5
- import streamlit as st
6
- from streamlit_lottie import st_lottie
7
- import numpy as np
8
- import os
9
- from typing import Iterator
10
- from io import StringIO
11
- from utils import write_vtt, write_srt
12
- import ffmpeg
13
- from languages import LANGUAGES
14
-
15
- st.set_page_config(page_title="Auto Subtitled Video Generator", page_icon=":movie_camera:", layout="wide")
16
-
17
- # Define a function that we can use to load lottie files from a link.
18
- @st.cache()
19
- def load_lottieurl(url: str):
20
- r = requests.get(url)
21
- if r.status_code != 200:
22
- return None
23
- return r.json()
24
-
25
- col1, col2 = st.columns([1, 3])
26
- with col1:
27
- lottie = load_lottieurl("https://assets8.lottiefiles.com/packages/lf20_jh9gfdye.json")
28
- st_lottie(lottie)
29
-
30
- with col2:
31
- st.write("""
32
- ## Auto Subtitled Video Generator
33
- ##### Input a YouTube video link and get a video with subtitles.
34
- ###### ➠ If you want to transcribe the video in its original language, select the task as "Transcribe"
35
- ###### ➠ If you want to translate the subtitles to English, select the task as "Translate"
36
- ###### I recommend starting with the base model and then experimenting with the larger models, the small and medium models often work well. """)
37
-
38
-
39
- @st.cache(allow_output_mutation=True)
40
- def populate_metadata(link):
41
- yt = YouTube(link)
42
- author = yt.author
43
- title = yt.title
44
- description = yt.description
45
- thumbnail = yt.thumbnail_url
46
- length = yt.length
47
- views = yt.views
48
- return author, title, description, thumbnail, length, views
49
-
50
-
51
- @st.cache(allow_output_mutation=True)
52
- def download_video(link):
53
- yt = YouTube(link)
54
- video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download()
55
- return video
56
-
57
-
58
- def convert(seconds):
59
- return time.strftime("%H:%M:%S", time.gmtime(seconds))
60
-
61
-
62
- loaded_model = whisper.load_model("base")
63
- current_size = "None"
64
-
65
-
66
- @st.cache(allow_output_mutation=True)
67
- def change_model(current_size, size):
68
- if current_size != size:
69
- loaded_model = whisper.load_model(size)
70
- return loaded_model
71
- else:
72
- raise Exception("Model size is the same as the current size.")
73
-
74
-
75
- @st.cache(allow_output_mutation=True)
76
- def inference(link, loaded_model, task):
77
- yt = YouTube(link)
78
- path = yt.streams.filter(only_audio=True)[0].download(filename="audio.mp3")
79
- if task == "Transcribe":
80
- options = dict(task="transcribe", best_of=5)
81
- results = loaded_model.transcribe(path, **options)
82
- vtt = getSubs(results["segments"], "vtt", 80)
83
- srt = getSubs(results["segments"], "srt", 80)
84
- lang = results["language"]
85
- return results["text"], vtt, srt, lang
86
- elif task == "Translate":
87
- options = dict(task="translate", best_of=5)
88
- results = loaded_model.transcribe(path, **options)
89
- vtt = getSubs(results["segments"], "vtt", 80)
90
- srt = getSubs(results["segments"], "srt", 80)
91
- lang = results["language"]
92
- return results["text"], vtt, srt, lang
93
- else:
94
- raise ValueError("Task not supported")
95
-
96
-
97
- @st.cache(allow_output_mutation=True)
98
- def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
99
- segmentStream = StringIO()
100
-
101
- if format == 'vtt':
102
- write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
103
- elif format == 'srt':
104
- write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
105
- else:
106
- raise Exception("Unknown format " + format)
107
-
108
- segmentStream.seek(0)
109
- return segmentStream.read()
110
-
111
-
112
- def get_language_code(language):
113
- if language in LANGUAGES.keys():
114
- detected_language = LANGUAGES[language]
115
- return detected_language
116
- else:
117
- raise ValueError("Language not supported")
118
-
119
-
120
- def generate_subtitled_video(video, audio, transcript):
121
- video_file = ffmpeg.input(video)
122
- audio_file = ffmpeg.input(audio)
123
- ffmpeg.concat(video_file.filter("subtitles", transcript), audio_file, v=1, a=1).output("final.mp4").run(quiet=True, overwrite_output=True)
124
- video_with_subs = open("final.mp4", "rb")
125
- return video_with_subs
126
-
127
-
128
- def main():
129
- size = st.selectbox("Select Model Size (The larger the model, the more accurate the transcription will be, but it will take longer)", ["tiny", "base", "small", "medium", "large"], index=1)
130
- loaded_model = change_model(current_size, size)
131
- st.write(f"Model is {'multilingual' if loaded_model.is_multilingual else 'English-only'} "
132
- f"and has {sum(np.prod(p.shape) for p in loaded_model.parameters()):,} parameters.")
133
- link = st.text_input("YouTube Link (The longer the video, the longer the processing time)")
134
- task = st.selectbox("Select Task", ["Transcribe", "Translate"], index=0)
135
- if task == "Transcribe":
136
- if st.button("Transcribe"):
137
- author, title, description, thumbnail, length, views = populate_metadata(link)
138
- results = inference(link, loaded_model, task)
139
- video = download_video(link)
140
- lang = results[3]
141
- detected_language = get_language_code(lang)
142
-
143
- col3, col4 = st.columns(2)
144
- col5, col6, col7, col8 = st.columns(4)
145
- col9, col10 = st.columns(2)
146
- with col3:
147
- st.video(video)
148
-
149
- # Write the results to a .txt file and download it.
150
- with open("transcript.txt", "w+", encoding='utf8') as f:
151
- f.writelines(results[0])
152
- f.close()
153
- with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
154
- datatxt = f.read()
155
-
156
- with open("transcript.vtt", "w+",encoding='utf8') as f:
157
- f.writelines(results[1])
158
- f.close()
159
- with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
160
- datavtt = f.read()
161
-
162
- with open("transcript.srt", "w+",encoding='utf8') as f:
163
- f.writelines(results[2])
164
- f.close()
165
- with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
166
- datasrt = f.read()
167
-
168
- with col5:
169
- st.download_button(label="Download Transcript (.txt)",
170
- data=datatxt,
171
- file_name="transcript.txt")
172
- with col6:
173
- st.download_button(label="Download Transcript (.vtt)",
174
- data=datavtt,
175
- file_name="transcript.vtt")
176
- with col7:
177
- st.download_button(label="Download Transcript (.srt)",
178
- data=datasrt,
179
- file_name="transcript.srt")
180
- with col9:
181
- st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
182
- with col10:
183
- st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
184
-
185
- with col4:
186
- with st.spinner("Generating Subtitled Video"):
187
- video_with_subs = generate_subtitled_video(video, "audio.mp3", "transcript.srt")
188
- st.video(video_with_subs)
189
- st.balloons()
190
- with col8:
191
- st.download_button(label="Download Subtitled Video",
192
- data=video_with_subs,
193
- file_name=f"{title} with subtitles.mp4")
194
- elif task == "Translate":
195
- if st.button("Translate to English"):
196
- author, title, description, thumbnail, length, views = populate_metadata(link)
197
- results = inference(link, loaded_model, task)
198
- video = download_video(link)
199
- lang = results[3]
200
- detected_language = get_language_code(lang)
201
-
202
- col3, col4 = st.columns(2)
203
- col5, col6, col7, col8 = st.columns(4)
204
- col9, col10 = st.columns(2)
205
- with col3:
206
- st.video(video)
207
-
208
- # Write the results to a .txt file and download it.
209
- with open("transcript.txt", "w+", encoding='utf8') as f:
210
- f.writelines(results[0])
211
- f.close()
212
- with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
213
- datatxt = f.read()
214
-
215
- with open("transcript.vtt", "w+",encoding='utf8') as f:
216
- f.writelines(results[1])
217
- f.close()
218
- with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
219
- datavtt = f.read()
220
-
221
- with open("transcript.srt", "w+",encoding='utf8') as f:
222
- f.writelines(results[2])
223
- f.close()
224
- with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
225
- datasrt = f.read()
226
- with col5:
227
- st.download_button(label="Download Transcript (.txt)",
228
- data=datatxt,
229
- file_name="transcript.txt")
230
- with col6:
231
- st.download_button(label="Download Transcript (.vtt)",
232
- data=datavtt,
233
- file_name="transcript.vtt")
234
- with col7:
235
- st.download_button(label="Download Transcript (.srt)",
236
- data=datasrt,
237
- file_name="transcript.srt")
238
- with col9:
239
- st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
240
- with col10:
241
- st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
242
-
243
- with col4:
244
- with st.spinner("Generating Subtitled Video"):
245
- video_with_subs = generate_subtitled_video(video, "audio.mp3", "transcript.srt")
246
- st.video(video_with_subs)
247
- st.balloons()
248
- with col8:
249
- st.download_button(label="Download Subtitled Video",
250
- data=video_with_subs,
251
- file_name=f"{title} with subtitles.mp4")
252
- else:
253
- st.error("Please select a task.")
254
-
255
-
256
- if __name__ == "__main__":
257
- main()
258
- st.markdown("###### Made with :heart: by [@BatuhanYılmaz](https://twitter.com/batuhan3326) [![this is an image link](https://i.imgur.com/thJhzOO.png)](https://www.buymeacoffee.com/batuhanylmz)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Danielzero/GPT3.5/assets/custom.css DELETED
@@ -1,353 +0,0 @@
1
- :root {
2
- --chatbot-color-light: #F3F3F3;
3
- --chatbot-color-dark: #121111;
4
- }
5
-
6
- #app_title {
7
- font-weight: var(--prose-header-text-weight);
8
- font-size: var(--text-xxl);
9
- line-height: 1.3;
10
- text-align: left;
11
- margin-top: 6px;
12
- white-space: nowrap;
13
- }
14
- #description {
15
- text-align: center;
16
- margin:16px 0
17
- }
18
-
19
- /* 覆盖gradio的页脚信息QAQ */
20
- /* footer {
21
- display: none !important;
22
- } */
23
- #footer {
24
- text-align: center;
25
- }
26
- #footer div {
27
- display: inline-block;
28
- }
29
- #footer .versions{
30
- font-size: 85%;
31
- opacity: 0.85;
32
- }
33
-
34
- #float_display {
35
- position: absolute;
36
- max-height: 30px;
37
- }
38
- /* user_info */
39
- #user_info {
40
- white-space: nowrap;
41
- position: absolute; left: 8em; top: .2em;
42
- z-index: var(--layer-2);
43
- box-shadow: var(--block-shadow);
44
- border: none; border-radius: var(--block-label-radius);
45
- background: var(--color-accent);
46
- padding: var(--block-label-padding);
47
- font-size: var(--block-label-text-size); line-height: var(--line-sm);
48
- width: auto; min-height: 30px!important;
49
- opacity: 1;
50
- transition: opacity 0.3s ease-in-out;
51
- }
52
- #user_info .wrap {
53
- opacity: 0;
54
- }
55
- #user_info p {
56
- color: white;
57
- font-weight: var(--block-label-text-weight);
58
- }
59
- #user_info.hideK {
60
- opacity: 0;
61
- transition: opacity 1s ease-in-out;
62
- }
63
-
64
- /* status_display */
65
- #status_display {
66
- display: flex;
67
- min-height: 2em;
68
- align-items: flex-end;
69
- justify-content: flex-end;
70
- }
71
- #status_display p {
72
- font-size: .85em;
73
- font-family: monospace;
74
- color: var(--body-text-color-subdued);
75
- }
76
-
77
- #status_display {
78
- transition: all 0.6s;
79
- }
80
- #chuanhu_chatbot {
81
- transition: height 0.3s ease;
82
- }
83
-
84
- /* usage_display */
85
- .insert_block {
86
- position: relative;
87
- margin: 0;
88
- padding: .5em 1em;
89
- box-shadow: var(--block-shadow);
90
- border-width: var(--block-border-width);
91
- border-color: var(--block-border-color);
92
- border-radius: var(--block-radius);
93
- background: var(--block-background-fill);
94
- width: 100%;
95
- line-height: var(--line-sm);
96
- min-height: 2em;
97
- }
98
- #usage_display p, #usage_display span {
99
- margin: 0;
100
- font-size: .85em;
101
- color: var(--body-text-color-subdued);
102
- }
103
- .progress-bar {
104
- background-color: var(--input-background-fill);;
105
- margin: 0 1em;
106
- height: 20px;
107
- border-radius: 10px;
108
- overflow: hidden;
109
- }
110
- .progress {
111
- background-color: var(--block-title-background-fill);
112
- height: 100%;
113
- border-radius: 10px;
114
- text-align: right;
115
- transition: width 0.5s ease-in-out;
116
- }
117
- .progress-text {
118
- /* color: white; */
119
- color: var(--color-accent) !important;
120
- font-size: 1em !important;
121
- font-weight: bold;
122
- padding-right: 10px;
123
- line-height: 20px;
124
- }
125
-
126
- .apSwitch {
127
- top: 2px;
128
- display: inline-block;
129
- height: 24px;
130
- position: relative;
131
- width: 48px;
132
- border-radius: 12px;
133
- }
134
- .apSwitch input {
135
- display: none !important;
136
- }
137
- .apSlider {
138
- background-color: var(--block-label-background-fill);
139
- bottom: 0;
140
- cursor: pointer;
141
- left: 0;
142
- position: absolute;
143
- right: 0;
144
- top: 0;
145
- transition: .4s;
146
- font-size: 18px;
147
- border-radius: 12px;
148
- }
149
- .apSlider::before {
150
- bottom: -1.5px;
151
- left: 1px;
152
- position: absolute;
153
- transition: .4s;
154
- content: "🌞";
155
- }
156
- input:checked + .apSlider {
157
- background-color: var(--block-label-background-fill);
158
- }
159
- input:checked + .apSlider::before {
160
- transform: translateX(23px);
161
- content:"🌚";
162
- }
163
-
164
- #submit_btn, #cancel_btn {
165
- height: 42px !important;
166
- }
167
- #submit_btn::before {
168
- content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
169
- height: 21px;
170
- }
171
- #cancel_btn::before {
172
- content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
173
- height: 21px;
174
- }
175
- /* list */
176
- ol:not(.options), ul:not(.options) {
177
- padding-inline-start: 2em !important;
178
- }
179
-
180
- /* 亮色(默认) */
181
- #chuanhu_chatbot {
182
- background-color: var(--chatbot-color-light) !important;
183
- color: #000000 !important;
184
- }
185
- [data-testid = "bot"] {
186
- background-color: #FFFFFF !important;
187
- }
188
- [data-testid = "user"] {
189
- background-color: #95EC69 !important;
190
- }
191
- /* 暗色 */
192
- .dark #chuanhu_chatbot {
193
- background-color: var(--chatbot-color-dark) !important;
194
- color: #FFFFFF !important;
195
- }
196
- .dark [data-testid = "bot"] {
197
- background-color: #2C2C2C !important;
198
- }
199
- .dark [data-testid = "user"] {
200
- background-color: #26B561 !important;
201
- }
202
-
203
- /* 屏幕宽度大于等于500px的设备 */
204
- /* update on 2023.4.8: 高度的细致调整已写入JavaScript */
205
- @media screen and (min-width: 500px) {
206
- #chuanhu_chatbot {
207
- height: calc(100vh - 200px);
208
- }
209
- #chuanhu_chatbot .wrap {
210
- max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
211
- }
212
- }
213
- /* 屏幕宽度小于500px的设备 */
214
- @media screen and (max-width: 499px) {
215
- #chuanhu_chatbot {
216
- height: calc(100vh - 140px);
217
- }
218
- #chuanhu_chatbot .wrap {
219
- max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
220
- }
221
- [data-testid = "bot"] {
222
- max-width: 98% !important;
223
- }
224
- #app_title h1{
225
- letter-spacing: -1px; font-size: 22px;
226
- }
227
- }
228
- /* 对话气泡 */
229
- [class *= "message"] {
230
- border-radius: var(--radius-xl) !important;
231
- border: none;
232
- padding: var(--spacing-xl) !important;
233
- font-size: var(--text-md) !important;
234
- line-height: var(--line-md) !important;
235
- min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
236
- min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
237
- }
238
- [data-testid = "bot"] {
239
- max-width: 85%;
240
- border-bottom-left-radius: 0 !important;
241
- }
242
- [data-testid = "user"] {
243
- max-width: 85%;
244
- width: auto !important;
245
- border-bottom-right-radius: 0 !important;
246
- }
247
- /* 表格 */
248
- table {
249
- margin: 1em 0;
250
- border-collapse: collapse;
251
- empty-cells: show;
252
- }
253
- td,th {
254
- border: 1.2px solid var(--border-color-primary) !important;
255
- padding: 0.2em;
256
- }
257
- thead {
258
- background-color: rgba(175,184,193,0.2);
259
- }
260
- thead th {
261
- padding: .5em .2em;
262
- }
263
- /* 行内代码 */
264
- code {
265
- display: inline;
266
- white-space: break-spaces;
267
- border-radius: 6px;
268
- margin: 0 2px 0 2px;
269
- padding: .2em .4em .1em .4em;
270
- background-color: rgba(175,184,193,0.2);
271
- }
272
- /* 代码块 */
273
- pre code {
274
- display: block;
275
- overflow: auto;
276
- white-space: pre;
277
- background-color: hsla(0, 0%, 0%, 80%)!important;
278
- border-radius: 10px;
279
- padding: 1.4em 1.2em 0em 1.4em;
280
- margin: 1.2em 2em 1.2em 0.5em;
281
- color: #FFF;
282
- box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
283
- }
284
- /* 代码高亮样式 */
285
- .highlight .hll { background-color: #49483e }
286
- .highlight .c { color: #75715e } /* Comment */
287
- .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
288
- .highlight .k { color: #66d9ef } /* Keyword */
289
- .highlight .l { color: #ae81ff } /* Literal */
290
- .highlight .n { color: #f8f8f2 } /* Name */
291
- .highlight .o { color: #f92672 } /* Operator */
292
- .highlight .p { color: #f8f8f2 } /* Punctuation */
293
- .highlight .ch { color: #75715e } /* Comment.Hashbang */
294
- .highlight .cm { color: #75715e } /* Comment.Multiline */
295
- .highlight .cp { color: #75715e } /* Comment.Preproc */
296
- .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
297
- .highlight .c1 { color: #75715e } /* Comment.Single */
298
- .highlight .cs { color: #75715e } /* Comment.Special */
299
- .highlight .gd { color: #f92672 } /* Generic.Deleted */
300
- .highlight .ge { font-style: italic } /* Generic.Emph */
301
- .highlight .gi { color: #a6e22e } /* Generic.Inserted */
302
- .highlight .gs { font-weight: bold } /* Generic.Strong */
303
- .highlight .gu { color: #75715e } /* Generic.Subheading */
304
- .highlight .kc { color: #66d9ef } /* Keyword.Constant */
305
- .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
306
- .highlight .kn { color: #f92672 } /* Keyword.Namespace */
307
- .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
308
- .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
309
- .highlight .kt { color: #66d9ef } /* Keyword.Type */
310
- .highlight .ld { color: #e6db74 } /* Literal.Date */
311
- .highlight .m { color: #ae81ff } /* Literal.Number */
312
- .highlight .s { color: #e6db74 } /* Literal.String */
313
- .highlight .na { color: #a6e22e } /* Name.Attribute */
314
- .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
315
- .highlight .nc { color: #a6e22e } /* Name.Class */
316
- .highlight .no { color: #66d9ef } /* Name.Constant */
317
- .highlight .nd { color: #a6e22e } /* Name.Decorator */
318
- .highlight .ni { color: #f8f8f2 } /* Name.Entity */
319
- .highlight .ne { color: #a6e22e } /* Name.Exception */
320
- .highlight .nf { color: #a6e22e } /* Name.Function */
321
- .highlight .nl { color: #f8f8f2 } /* Name.Label */
322
- .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
323
- .highlight .nx { color: #a6e22e } /* Name.Other */
324
- .highlight .py { color: #f8f8f2 } /* Name.Property */
325
- .highlight .nt { color: #f92672 } /* Name.Tag */
326
- .highlight .nv { color: #f8f8f2 } /* Name.Variable */
327
- .highlight .ow { color: #f92672 } /* Operator.Word */
328
- .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
329
- .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
330
- .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
331
- .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
332
- .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
333
- .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
334
- .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
335
- .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
336
- .highlight .sc { color: #e6db74 } /* Literal.String.Char */
337
- .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
338
- .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
339
- .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
340
- .highlight .se { color: #ae81ff } /* Literal.String.Escape */
341
- .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
342
- .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
343
- .highlight .sx { color: #e6db74 } /* Literal.String.Other */
344
- .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
345
- .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
346
- .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
347
- .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
348
- .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
349
- .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
350
- .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
351
- .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
352
- .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
353
- .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/components/ui/toast.tsx DELETED
@@ -1,127 +0,0 @@
1
- import * as React from "react"
2
- import * as ToastPrimitives from "@radix-ui/react-toast"
3
- import { cva, type VariantProps } from "class-variance-authority"
4
- import { X } from "lucide-react"
5
-
6
- import { cn } from "@/lib/utils"
7
-
8
- const ToastProvider = ToastPrimitives.Provider
9
-
10
- const ToastViewport = React.forwardRef<
11
- React.ElementRef<typeof ToastPrimitives.Viewport>,
12
- React.ComponentPropsWithoutRef<typeof ToastPrimitives.Viewport>
13
- >(({ className, ...props }, ref) => (
14
- <ToastPrimitives.Viewport
15
- ref={ref}
16
- className={cn(
17
- "fixed top-0 z-[100] flex max-h-screen w-full flex-col-reverse p-4 sm:bottom-0 sm:right-0 sm:top-auto sm:flex-col md:max-w-[420px]",
18
- className
19
- )}
20
- {...props}
21
- />
22
- ))
23
- ToastViewport.displayName = ToastPrimitives.Viewport.displayName
24
-
25
- const toastVariants = cva(
26
- "group pointer-events-auto relative flex w-full items-center justify-between space-x-4 overflow-hidden rounded-md border border-stone-200 p-6 pr-8 shadow-lg transition-all data-[swipe=cancel]:translate-x-0 data-[swipe=end]:translate-x-[var(--radix-toast-swipe-end-x)] data-[swipe=move]:translate-x-[var(--radix-toast-swipe-move-x)] data-[swipe=move]:transition-none data-[state=open]:animate-in data-[state=closed]:animate-out data-[swipe=end]:animate-out data-[state=closed]:fade-out-80 data-[state=closed]:slide-out-to-right-full data-[state=open]:slide-in-from-top-full data-[state=open]:sm:slide-in-from-bottom-full dark:border-stone-800",
27
- {
28
- variants: {
29
- variant: {
30
- default: "border bg-white text-stone-950 dark:bg-stone-950 dark:text-stone-50",
31
- destructive:
32
- "destructive group border-red-500 bg-red-500 text-stone-50 dark:border-red-900 dark:bg-red-900 dark:text-stone-50",
33
- },
34
- },
35
- defaultVariants: {
36
- variant: "default",
37
- },
38
- }
39
- )
40
-
41
- const Toast = React.forwardRef<
42
- React.ElementRef<typeof ToastPrimitives.Root>,
43
- React.ComponentPropsWithoutRef<typeof ToastPrimitives.Root> &
44
- VariantProps<typeof toastVariants>
45
- >(({ className, variant, ...props }, ref) => {
46
- return (
47
- <ToastPrimitives.Root
48
- ref={ref}
49
- className={cn(toastVariants({ variant }), className)}
50
- {...props}
51
- />
52
- )
53
- })
54
- Toast.displayName = ToastPrimitives.Root.displayName
55
-
56
- const ToastAction = React.forwardRef<
57
- React.ElementRef<typeof ToastPrimitives.Action>,
58
- React.ComponentPropsWithoutRef<typeof ToastPrimitives.Action>
59
- >(({ className, ...props }, ref) => (
60
- <ToastPrimitives.Action
61
- ref={ref}
62
- className={cn(
63
- "inline-flex h-8 shrink-0 items-center justify-center rounded-md border border-stone-200 bg-transparent px-3 text-sm font-medium ring-offset-white transition-colors hover:bg-stone-100 focus:outline-none focus:ring-2 focus:ring-stone-950 focus:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 group-[.destructive]:border-stone-100/40 group-[.destructive]:hover:border-red-500/30 group-[.destructive]:hover:bg-red-500 group-[.destructive]:hover:text-stone-50 group-[.destructive]:focus:ring-red-500 dark:border-stone-800 dark:ring-offset-stone-950 dark:hover:bg-stone-800 dark:focus:ring-stone-300 dark:group-[.destructive]:border-stone-800/40 dark:group-[.destructive]:hover:border-red-900/30 dark:group-[.destructive]:hover:bg-red-900 dark:group-[.destructive]:hover:text-stone-50 dark:group-[.destructive]:focus:ring-red-900",
64
- className
65
- )}
66
- {...props}
67
- />
68
- ))
69
- ToastAction.displayName = ToastPrimitives.Action.displayName
70
-
71
- const ToastClose = React.forwardRef<
72
- React.ElementRef<typeof ToastPrimitives.Close>,
73
- React.ComponentPropsWithoutRef<typeof ToastPrimitives.Close>
74
- >(({ className, ...props }, ref) => (
75
- <ToastPrimitives.Close
76
- ref={ref}
77
- className={cn(
78
- "absolute right-2 top-2 rounded-md p-1 text-stone-950/50 opacity-0 transition-opacity hover:text-stone-950 focus:opacity-100 focus:outline-none focus:ring-2 group-hover:opacity-100 group-[.destructive]:text-red-300 group-[.destructive]:hover:text-red-50 group-[.destructive]:focus:ring-red-400 group-[.destructive]:focus:ring-offset-red-600 dark:text-stone-50/50 dark:hover:text-stone-50",
79
- className
80
- )}
81
- toast-close=""
82
- {...props}
83
- >
84
- <X className="h-4 w-4" />
85
- </ToastPrimitives.Close>
86
- ))
87
- ToastClose.displayName = ToastPrimitives.Close.displayName
88
-
89
- const ToastTitle = React.forwardRef<
90
- React.ElementRef<typeof ToastPrimitives.Title>,
91
- React.ComponentPropsWithoutRef<typeof ToastPrimitives.Title>
92
- >(({ className, ...props }, ref) => (
93
- <ToastPrimitives.Title
94
- ref={ref}
95
- className={cn("text-sm font-semibold", className)}
96
- {...props}
97
- />
98
- ))
99
- ToastTitle.displayName = ToastPrimitives.Title.displayName
100
-
101
- const ToastDescription = React.forwardRef<
102
- React.ElementRef<typeof ToastPrimitives.Description>,
103
- React.ComponentPropsWithoutRef<typeof ToastPrimitives.Description>
104
- >(({ className, ...props }, ref) => (
105
- <ToastPrimitives.Description
106
- ref={ref}
107
- className={cn("text-sm opacity-90", className)}
108
- {...props}
109
- />
110
- ))
111
- ToastDescription.displayName = ToastPrimitives.Description.displayName
112
-
113
- type ToastProps = React.ComponentPropsWithoutRef<typeof Toast>
114
-
115
- type ToastActionElement = React.ReactElement<typeof ToastAction>
116
-
117
- export {
118
- type ToastProps,
119
- type ToastActionElement,
120
- ToastProvider,
121
- ToastViewport,
122
- Toast,
123
- ToastTitle,
124
- ToastDescription,
125
- ToastClose,
126
- ToastAction,
127
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan/scripts/gui.sh DELETED
@@ -1,11 +0,0 @@
1
- python visualizer_drag.py \
2
- checkpoints/stylegan2_lions_512_pytorch.pkl \
3
- checkpoints/stylegan2-ffhq-512x512.pkl \
4
- checkpoints/stylegan2-afhqcat-512x512.pkl \
5
- checkpoints/stylegan2-car-config-f.pkl \
6
- checkpoints/stylegan2_dogs_1024_pytorch.pkl \
7
- checkpoints/stylegan2_horses_256_pytorch.pkl \
8
- checkpoints/stylegan2-cat-config-f.pkl \
9
- checkpoints/stylegan2_elephants_512_pytorch.pkl \
10
- checkpoints/stylegan_human_v2_512.pkl \
11
- checkpoints/stylegan2-lhq-256x256.pkl
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EduardoPacheco/DINOv2-Features-Visualization/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: DINOv2 Features Visualization
3
- emoji: 🚀
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ElainaFanBoy/MusicGen/audiocraft/modules/conv.py DELETED
@@ -1,245 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import math
8
- import typing as tp
9
- import warnings
10
-
11
- import torch
12
- from torch import nn
13
- from torch.nn import functional as F
14
- from torch.nn.utils import spectral_norm, weight_norm
15
-
16
-
17
- CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
18
- 'time_group_norm'])
19
-
20
-
21
- def apply_parametrization_norm(module: nn.Module, norm: str = 'none'):
22
- assert norm in CONV_NORMALIZATIONS
23
- if norm == 'weight_norm':
24
- return weight_norm(module)
25
- elif norm == 'spectral_norm':
26
- return spectral_norm(module)
27
- else:
28
- # We already check was in CONV_NORMALIZATION, so any other choice
29
- # doesn't need reparametrization.
30
- return module
31
-
32
-
33
- def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs):
34
- """Return the proper normalization module. If causal is True, this will ensure the returned
35
- module is causal, or return an error if the normalization doesn't support causal evaluation.
36
- """
37
- assert norm in CONV_NORMALIZATIONS
38
- if norm == 'time_group_norm':
39
- if causal:
40
- raise ValueError("GroupNorm doesn't support causal evaluation.")
41
- assert isinstance(module, nn.modules.conv._ConvNd)
42
- return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
43
- else:
44
- return nn.Identity()
45
-
46
-
47
- def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
48
- padding_total: int = 0) -> int:
49
- """See `pad_for_conv1d`.
50
- """
51
- length = x.shape[-1]
52
- n_frames = (length - kernel_size + padding_total) / stride + 1
53
- ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
54
- return ideal_length - length
55
-
56
-
57
- def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
58
- """Pad for a convolution to make sure that the last window is full.
59
- Extra padding is added at the end. This is required to ensure that we can rebuild
60
- an output of the same length, as otherwise, even with padding, some time steps
61
- might get removed.
62
- For instance, with total padding = 4, kernel size = 4, stride = 2:
63
- 0 0 1 2 3 4 5 0 0 # (0s are padding)
64
- 1 2 3 # (output frames of a convolution, last 0 is never used)
65
- 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding)
66
- 1 2 3 4 # once you removed padding, we are missing one time step !
67
- """
68
- extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
69
- return F.pad(x, (0, extra_padding))
70
-
71
-
72
- def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
73
- """Tiny wrapper around F.pad, just to allow for reflect padding on small input.
74
- If this is the case, we insert extra 0 padding to the right before the reflection happen.
75
- """
76
- length = x.shape[-1]
77
- padding_left, padding_right = paddings
78
- assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
79
- if mode == 'reflect':
80
- max_pad = max(padding_left, padding_right)
81
- extra_pad = 0
82
- if length <= max_pad:
83
- extra_pad = max_pad - length + 1
84
- x = F.pad(x, (0, extra_pad))
85
- padded = F.pad(x, paddings, mode, value)
86
- end = padded.shape[-1] - extra_pad
87
- return padded[..., :end]
88
- else:
89
- return F.pad(x, paddings, mode, value)
90
-
91
-
92
- def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
93
- """Remove padding from x, handling properly zero padding. Only for 1d!
94
- """
95
- padding_left, padding_right = paddings
96
- assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
97
- assert (padding_left + padding_right) <= x.shape[-1]
98
- end = x.shape[-1] - padding_right
99
- return x[..., padding_left: end]
100
-
101
-
102
- class NormConv1d(nn.Module):
103
- """Wrapper around Conv1d and normalization applied to this conv
104
- to provide a uniform interface across normalization approaches.
105
- """
106
- def __init__(self, *args, causal: bool = False, norm: str = 'none',
107
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
108
- super().__init__()
109
- self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
110
- self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
111
- self.norm_type = norm
112
-
113
- def forward(self, x):
114
- x = self.conv(x)
115
- x = self.norm(x)
116
- return x
117
-
118
-
119
- class NormConv2d(nn.Module):
120
- """Wrapper around Conv2d and normalization applied to this conv
121
- to provide a uniform interface across normalization approaches.
122
- """
123
- def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
124
- super().__init__()
125
- self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm)
126
- self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs)
127
- self.norm_type = norm
128
-
129
- def forward(self, x):
130
- x = self.conv(x)
131
- x = self.norm(x)
132
- return x
133
-
134
-
135
- class NormConvTranspose1d(nn.Module):
136
- """Wrapper around ConvTranspose1d and normalization applied to this conv
137
- to provide a uniform interface across normalization approaches.
138
- """
139
- def __init__(self, *args, causal: bool = False, norm: str = 'none',
140
- norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
141
- super().__init__()
142
- self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
143
- self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
144
- self.norm_type = norm
145
-
146
- def forward(self, x):
147
- x = self.convtr(x)
148
- x = self.norm(x)
149
- return x
150
-
151
-
152
- class NormConvTranspose2d(nn.Module):
153
- """Wrapper around ConvTranspose2d and normalization applied to this conv
154
- to provide a uniform interface across normalization approaches.
155
- """
156
- def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
157
- super().__init__()
158
- self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm)
159
- self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs)
160
-
161
- def forward(self, x):
162
- x = self.convtr(x)
163
- x = self.norm(x)
164
- return x
165
-
166
-
167
- class StreamableConv1d(nn.Module):
168
- """Conv1d with some builtin handling of asymmetric or causal padding
169
- and normalization.
170
- """
171
- def __init__(self, in_channels: int, out_channels: int,
172
- kernel_size: int, stride: int = 1, dilation: int = 1,
173
- groups: int = 1, bias: bool = True, causal: bool = False,
174
- norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
175
- pad_mode: str = 'reflect'):
176
- super().__init__()
177
- # warn user on unusual setup between dilation and stride
178
- if stride > 1 and dilation > 1:
179
- warnings.warn('StreamableConv1d has been initialized with stride > 1 and dilation > 1'
180
- f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).')
181
- self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
182
- dilation=dilation, groups=groups, bias=bias, causal=causal,
183
- norm=norm, norm_kwargs=norm_kwargs)
184
- self.causal = causal
185
- self.pad_mode = pad_mode
186
-
187
- def forward(self, x):
188
- B, C, T = x.shape
189
- kernel_size = self.conv.conv.kernel_size[0]
190
- stride = self.conv.conv.stride[0]
191
- dilation = self.conv.conv.dilation[0]
192
- kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
193
- padding_total = kernel_size - stride
194
- extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
195
- if self.causal:
196
- # Left padding for causal
197
- x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
198
- else:
199
- # Asymmetric padding required for odd strides
200
- padding_right = padding_total // 2
201
- padding_left = padding_total - padding_right
202
- x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
203
- return self.conv(x)
204
-
205
-
206
- class StreamableConvTranspose1d(nn.Module):
207
- """ConvTranspose1d with some builtin handling of asymmetric or causal padding
208
- and normalization.
209
- """
210
- def __init__(self, in_channels: int, out_channels: int,
211
- kernel_size: int, stride: int = 1, causal: bool = False,
212
- norm: str = 'none', trim_right_ratio: float = 1.,
213
- norm_kwargs: tp.Dict[str, tp.Any] = {}):
214
- super().__init__()
215
- self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
216
- causal=causal, norm=norm, norm_kwargs=norm_kwargs)
217
- self.causal = causal
218
- self.trim_right_ratio = trim_right_ratio
219
- assert self.causal or self.trim_right_ratio == 1., \
220
- "`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
221
- assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
222
-
223
- def forward(self, x):
224
- kernel_size = self.convtr.convtr.kernel_size[0]
225
- stride = self.convtr.convtr.stride[0]
226
- padding_total = kernel_size - stride
227
-
228
- y = self.convtr(x)
229
-
230
- # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
231
- # removed at the very end, when keeping only the right length for the output,
232
- # as removing it here would require also passing the length at the matching layer
233
- # in the encoder.
234
- if self.causal:
235
- # Trim the padding on the right according to the specified ratio
236
- # if trim_right_ratio = 1.0, trim everything from right
237
- padding_right = math.ceil(padding_total * self.trim_right_ratio)
238
- padding_left = padding_total - padding_right
239
- y = unpad1d(y, (padding_left, padding_right))
240
- else:
241
- # Asymmetric padding required for odd strides
242
- padding_right = padding_total // 2
243
- padding_left = padding_total - padding_right
244
- y = unpad1d(y, (padding_left, padding_right))
245
- return y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EleutherAI/magma/example_inference.py DELETED
@@ -1,27 +0,0 @@
1
- from magma import Magma
2
- from magma.image_input import ImageInput
3
-
4
- model = Magma.from_checkpoint(
5
- config_path = "configs/MAGMA_v1.yml",
6
- checkpoint_path = "./mp_rank_00_model_states.pt",
7
- device = 'cuda:0'
8
- )
9
-
10
- inputs =[
11
- ## supports urls and path/to/image
12
- ImageInput('https://www.art-prints-on-demand.com/kunst/thomas_cole/woods_hi.jpg'),
13
- 'Describe the painting:'
14
- ]
15
-
16
- ## returns a tensor of shape: (1, 149, 4096)
17
- embeddings = model.preprocess_inputs(inputs)
18
-
19
- ## returns a list of length embeddings.shape[0] (batch size)
20
- output = model.generate(
21
- embeddings = embeddings,
22
- max_steps = 6,
23
- temperature = 0.7,
24
- top_k = 0,
25
- )
26
-
27
- print(output[0]) ## A cabin on a lake
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EronSamez/RVC_HFmeu/demucs/train.py DELETED
@@ -1,127 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import sys
8
-
9
- import tqdm
10
- from torch.utils.data import DataLoader
11
- from torch.utils.data.distributed import DistributedSampler
12
-
13
- from .utils import apply_model, average_metric, center_trim
14
-
15
-
16
- def train_model(epoch,
17
- dataset,
18
- model,
19
- criterion,
20
- optimizer,
21
- augment,
22
- quantizer=None,
23
- diffq=0,
24
- repeat=1,
25
- device="cpu",
26
- seed=None,
27
- workers=4,
28
- world_size=1,
29
- batch_size=16):
30
-
31
- if world_size > 1:
32
- sampler = DistributedSampler(dataset)
33
- sampler_epoch = epoch * repeat
34
- if seed is not None:
35
- sampler_epoch += seed * 1000
36
- sampler.set_epoch(sampler_epoch)
37
- batch_size //= world_size
38
- loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=workers)
39
- else:
40
- loader = DataLoader(dataset, batch_size=batch_size, num_workers=workers, shuffle=True)
41
- current_loss = 0
42
- model_size = 0
43
- for repetition in range(repeat):
44
- tq = tqdm.tqdm(loader,
45
- ncols=120,
46
- desc=f"[{epoch:03d}] train ({repetition + 1}/{repeat})",
47
- leave=False,
48
- file=sys.stdout,
49
- unit=" batch")
50
- total_loss = 0
51
- for idx, sources in enumerate(tq):
52
- if len(sources) < batch_size:
53
- # skip uncomplete batch for augment.Remix to work properly
54
- continue
55
- sources = sources.to(device)
56
- sources = augment(sources)
57
- mix = sources.sum(dim=1)
58
-
59
- estimates = model(mix)
60
- sources = center_trim(sources, estimates)
61
- loss = criterion(estimates, sources)
62
- model_size = 0
63
- if quantizer is not None:
64
- model_size = quantizer.model_size()
65
-
66
- train_loss = loss + diffq * model_size
67
- train_loss.backward()
68
- grad_norm = 0
69
- for p in model.parameters():
70
- if p.grad is not None:
71
- grad_norm += p.grad.data.norm()**2
72
- grad_norm = grad_norm**0.5
73
- optimizer.step()
74
- optimizer.zero_grad()
75
-
76
- if quantizer is not None:
77
- model_size = model_size.item()
78
-
79
- total_loss += loss.item()
80
- current_loss = total_loss / (1 + idx)
81
- tq.set_postfix(loss=f"{current_loss:.4f}", ms=f"{model_size:.2f}",
82
- grad=f"{grad_norm:.5f}")
83
-
84
- # free some space before next round
85
- del sources, mix, estimates, loss, train_loss
86
-
87
- if world_size > 1:
88
- sampler.epoch += 1
89
-
90
- if world_size > 1:
91
- current_loss = average_metric(current_loss)
92
- return current_loss, model_size
93
-
94
-
95
- def validate_model(epoch,
96
- dataset,
97
- model,
98
- criterion,
99
- device="cpu",
100
- rank=0,
101
- world_size=1,
102
- shifts=0,
103
- overlap=0.25,
104
- split=False):
105
- indexes = range(rank, len(dataset), world_size)
106
- tq = tqdm.tqdm(indexes,
107
- ncols=120,
108
- desc=f"[{epoch:03d}] valid",
109
- leave=False,
110
- file=sys.stdout,
111
- unit=" track")
112
- current_loss = 0
113
- for index in tq:
114
- streams = dataset[index]
115
- # first five minutes to avoid OOM on --upsample models
116
- streams = streams[..., :15_000_000]
117
- streams = streams.to(device)
118
- sources = streams[1:]
119
- mix = streams[0]
120
- estimates = apply_model(model, mix, shifts=shifts, split=split, overlap=overlap)
121
- loss = criterion(estimates, sources)
122
- current_loss += loss.item() / len(indexes)
123
- del estimates, streams, sources
124
-
125
- if world_size > 1:
126
- current_loss = average_metric(current_loss, len(indexes))
127
- return current_loss