parquet-converter commited on
Commit
f16f168
·
1 Parent(s): bbf757c

Update parquet files (step 18 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0x90e/ESRGAN-MANGA/ESRGAN_plus/architecture.py +0 -38
  2. spaces/101-5/gpt4free/g4f/.v1/gui/streamlit_chat_app.py +0 -156
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fusion 360 2008 32 Bit __EXCLUSIVE__ Xforce Keygen.md +0 -40
  4. spaces/1gistliPinn/ChatGPT4/Examples/Cossacks Back To War __EXCLUSIVE__ Full Game Download.md +0 -32
  5. spaces/1gistliPinn/ChatGPT4/Examples/Fallout 3 Crack [BEST]ed Launcher 1.7.md +0 -6
  6. spaces/1phancelerku/anime-remove-background/Download Free Games for MacBook Air and Experience the Thrill of Gaming.md +0 -140
  7. spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +0 -631
  8. spaces/7thHeaven/ochyai_food/README.md +0 -13
  9. spaces/801artistry/RVC801/infer/lib/infer_pack/onnx_inference.py +0 -149
  10. spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/upfirdn2d.cpp +0 -23
  11. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/diffusionmodules/__init__.py +0 -0
  12. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/predict.py +0 -90
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb16_cifar100.py +0 -19
  14. spaces/Adapter/CoAdapter/ldm/models/autoencoder.py +0 -211
  15. spaces/Adapter/CoAdapter/ldm/models/diffusion/dpm_solver/sampler.py +0 -87
  16. spaces/Adapter/CoAdapter/ldm/modules/diffusionmodules/openaimodel.py +0 -798
  17. spaces/Aditya9790/yolo7-object-tracking/utils/aws/userdata.sh +0 -27
  18. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/monotonic_align/core.c +0 -0
  19. spaces/AlbertoFH98/CastenaApp/README.md +0 -13
  20. spaces/AlekseyKorshuk/thin-plate-spline-motion-model/frames_dataset.py +0 -173
  21. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/latent_mappers.py +0 -81
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/fp16.md +0 -434
  23. spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py +0 -4
  24. spaces/Andy1621/uniformer_light/imagenet_class_index.py +0 -1002
  25. spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-search/fuse.min.js +0 -9
  26. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/losses.py +0 -364
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/io.py +0 -258
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/log.py +0 -80
  29. spaces/Audio-AGI/WavJourney/VoiceParser/hubert_manager.py +0 -33
  30. spaces/Audio-AGI/WavJourney/voice_presets.py +0 -96
  31. spaces/B10915003/B10915003-autotrain-jimmy-test-face-identification-53251125423/README.md +0 -13
  32. spaces/BAAI/dreambooth-altdiffusion/app.py +0 -654
  33. spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/model_param_init.py +0 -69
  34. spaces/Benson/text-generation/Examples/Baku Burger House.md +0 -79
  35. spaces/BestteaLib/README/README.md +0 -10
  36. spaces/BetterAPI/BetterChat_new/src/lib/utils/share.ts +0 -7
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/tags.py +0 -487
  38. spaces/CVH-vn1210/make_hair/minigpt4/datasets/datasets/__init__.py +0 -0
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/demo/demo.py +0 -159
  40. spaces/CVPR/LIVE/thrust/CONTRIBUTING.md +0 -490
  41. spaces/CVPR/LIVE/thrust/testing/omp/nvcc_independence.cpp +0 -75
  42. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/equal.h +0 -74
  43. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/backbone/position_encoding.py +0 -186
  44. spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/Client.js +0 -446
  45. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/encoders/modules.py +0 -226
  46. spaces/Curranj/GPT-QRI/app.py +0 -78
  47. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/builders/__init__.py +0 -77
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageDraw.py +0 -1038
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/openapi/docs.py +0 -203
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/designspaceLib/statNames.py +0 -252
spaces/0x90e/ESRGAN-MANGA/ESRGAN_plus/architecture.py DELETED
@@ -1,38 +0,0 @@
1
- import math
2
- import torch
3
- import torch.nn as nn
4
- import ESRGAN_plus.block as B
5
-
6
-
7
- class RRDB_Net(nn.Module):
8
- def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', \
9
- mode='CNA', res_scale=1, upsample_mode='upconv'):
10
- super(RRDB_Net, self).__init__()
11
- n_upscale = int(math.log(upscale, 2))
12
- if upscale == 3:
13
- n_upscale = 1
14
-
15
- fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
16
- rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
17
- norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)]
18
- LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
19
-
20
- if upsample_mode == 'upconv':
21
- upsample_block = B.upconv_blcok
22
- elif upsample_mode == 'pixelshuffle':
23
- upsample_block = B.pixelshuffle_block
24
- else:
25
- raise NotImplementedError('upsample mode [%s] is not found' % upsample_mode)
26
- if upscale == 3:
27
- upsampler = upsample_block(nf, nf, 3, act_type=act_type)
28
- else:
29
- upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
30
- HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
31
- HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
32
-
33
- self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)),\
34
- *upsampler, HR_conv0, HR_conv1)
35
-
36
- def forward(self, x):
37
- x = self.model(x)
38
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/g4f/.v1/gui/streamlit_chat_app.py DELETED
@@ -1,156 +0,0 @@
1
- import atexit
2
- import Levenshtein
3
- import os
4
- import sys
5
-
6
- sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
7
-
8
- import streamlit as st
9
- from streamlit_chat import message
10
- from query_methods import query, avail_query_methods
11
- import pickle
12
-
13
- conversations_file = "conversations.pkl"
14
-
15
- def load_conversations():
16
- try:
17
- with open(conversations_file, "rb") as f:
18
- return pickle.load(f)
19
- except FileNotFoundError:
20
- return []
21
- except EOFError:
22
- return []
23
-
24
-
25
- def save_conversations(conversations, current_conversation):
26
- updated = False
27
- for idx, conversation in enumerate(conversations):
28
- if conversation == current_conversation:
29
- conversations[idx] = current_conversation
30
- updated = True
31
- break
32
- if not updated:
33
- conversations.append(current_conversation)
34
-
35
- temp_conversations_file = "temp_" + conversations_file
36
- with open(temp_conversations_file, "wb") as f:
37
- pickle.dump(conversations, f)
38
-
39
- os.replace(temp_conversations_file, conversations_file)
40
-
41
- def delete_conversation(conversations, current_conversation):
42
- for idx, conversation in enumerate(conversations):
43
- conversations[idx] = current_conversation
44
- break
45
- conversations.remove(current_conversation)
46
-
47
- temp_conversations_file = "temp_" + conversations_file
48
- with open(temp_conversations_file, "wb") as f:
49
- pickle.dump(conversations, f)
50
-
51
- os.replace(temp_conversations_file, conversations_file)
52
-
53
- def exit_handler():
54
- print("Exiting, saving data...")
55
- # Perform cleanup operations here, like saving data or closing open files.
56
- save_conversations(st.session_state.conversations, st.session_state.current_conversation)
57
-
58
-
59
- # Register the exit_handler function to be called when the program is closing.
60
- atexit.register(exit_handler)
61
-
62
- st.header("Chat Placeholder")
63
-
64
- if 'conversations' not in st.session_state:
65
- st.session_state['conversations'] = load_conversations()
66
-
67
- if 'input_text' not in st.session_state:
68
- st.session_state['input_text'] = ''
69
-
70
- if 'selected_conversation' not in st.session_state:
71
- st.session_state['selected_conversation'] = None
72
-
73
- if 'input_field_key' not in st.session_state:
74
- st.session_state['input_field_key'] = 0
75
-
76
- if 'query_method' not in st.session_state:
77
- st.session_state['query_method'] = query
78
-
79
- if 'search_query' not in st.session_state:
80
- st.session_state['search_query'] = ''
81
-
82
- # Initialize new conversation
83
- if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None:
84
- st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
85
-
86
- input_placeholder = st.empty()
87
- user_input = input_placeholder.text_input(
88
- 'You:', value=st.session_state['input_text'], key=f'input_text_-1'#{st.session_state["input_field_key"]}
89
- )
90
- submit_button = st.button("Submit")
91
-
92
- if (user_input and user_input != st.session_state['input_text']) or submit_button:
93
- output = query(user_input, st.session_state['query_method'])
94
-
95
- escaped_output = output.encode('utf-8').decode('unicode-escape')
96
-
97
- st.session_state['current_conversation']['user_inputs'].append(user_input)
98
- st.session_state.current_conversation['generated_responses'].append(escaped_output)
99
- save_conversations(st.session_state.conversations, st.session_state.current_conversation)
100
- st.session_state['input_text'] = ''
101
- st.session_state['input_field_key'] += 1 # Increment key value for new widget
102
- user_input = input_placeholder.text_input(
103
- 'You:', value=st.session_state['input_text'], key=f'input_text_{st.session_state["input_field_key"]}'
104
- ) # Clear the input field
105
-
106
- # Add a button to create a new conversation
107
- if st.sidebar.button("New Conversation"):
108
- st.session_state['selected_conversation'] = None
109
- st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
110
- st.session_state['input_field_key'] += 1 # Increment key value for new widget
111
- st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
112
-
113
- # Proxy
114
- st.session_state['proxy'] = st.sidebar.text_input("Proxy: ")
115
-
116
- # Searchbar
117
- search_query = st.sidebar.text_input("Search Conversations:", value=st.session_state.get('search_query', ''), key='search')
118
-
119
- if search_query:
120
- filtered_conversations = []
121
- indices = []
122
- for idx, conversation in enumerate(st.session_state.conversations):
123
- if search_query in conversation['user_inputs'][0]:
124
- filtered_conversations.append(conversation)
125
- indices.append(idx)
126
-
127
- filtered_conversations = list(zip(indices, filtered_conversations))
128
- conversations = sorted(filtered_conversations, key=lambda x: Levenshtein.distance(search_query, x[1]['user_inputs'][0]))
129
-
130
- sidebar_header = f"Search Results ({len(conversations)})"
131
- else:
132
- conversations = st.session_state.conversations
133
- sidebar_header = "Conversation History"
134
-
135
- # Sidebar
136
- st.sidebar.header(sidebar_header)
137
- sidebar_col1, sidebar_col2 = st.sidebar.columns([5,1])
138
- for idx, conversation in enumerate(conversations):
139
- if sidebar_col1.button(f"Conversation {idx + 1}: {conversation['user_inputs'][0]}", key=f"sidebar_btn_{idx}"):
140
- st.session_state['selected_conversation'] = idx
141
- st.session_state['current_conversation'] = conversation
142
- if sidebar_col2.button('🗑️', key=f"sidebar_btn_delete_{idx}"):
143
- if st.session_state['selected_conversation'] == idx:
144
- st.session_state['selected_conversation'] = None
145
- st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
146
- delete_conversation(conversations, conversation)
147
- st.experimental_rerun()
148
- if st.session_state['selected_conversation'] is not None:
149
- conversation_to_display = conversations[st.session_state['selected_conversation']]
150
- else:
151
- conversation_to_display = st.session_state.current_conversation
152
-
153
- if conversation_to_display['generated_responses']:
154
- for i in range(len(conversation_to_display['generated_responses']) - 1, -1, -1):
155
- message(conversation_to_display["generated_responses"][i], key=f"display_generated_{i}")
156
- message(conversation_to_display['user_inputs'][i], is_user=True, key=f"display_user_{i}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fusion 360 2008 32 Bit __EXCLUSIVE__ Xforce Keygen.md DELETED
@@ -1,40 +0,0 @@
1
-
2
- <h1>Fusion 360 2008 32 Bit Xforce Keygen: How to Activate Autodesk Products for Free</h1>
3
- <p>If you are looking for a powerful and versatile software for designing, modeling, and engineering, you might have heard of <strong>Fusion 360</strong>. Fusion 360 is a cloud-based CAD/CAM/CAE software that allows you to create, edit, and share your designs in various formats. It also integrates with other Autodesk products and services, such as AutoCAD, Inventor, Revit, Maya, and more.</p>
4
- <h2>Fusion 360 2008 32 Bit Xforce Keygen</h2><br /><p><b><b>Download File</b> &rarr;&rarr;&rarr; <a href="https://byltly.com/2uKxYI">https://byltly.com/2uKxYI</a></b></p><br /><br />
5
- <p>However, Fusion 360 is not a cheap software. It requires a monthly or yearly subscription fee that can range from $25 to $500 depending on your plan. If you want to use Fusion 360 without paying anything, you might be tempted to use a <strong>Xforce keygen</strong>. Xforce keygen is a crack tool that can generate activation codes for any Autodesk product. By using Xforce keygen, you can bypass the license verification process and activate Fusion 360 for free.</p>
6
- <p>But is it worth it? In this article, we will show you how to use Xforce keygen for Fusion 360 2008 32 bit, as well as the benefits and risks of doing so. We will also answer some frequently asked questions about Fusion 360 and Xforce keygen. Read on to find out more.</p>
7
- <h2>How to Download and Install Fusion 360 2008 32 Bit</h2>
8
- <p>Before you can use Xforce keygen for Fusion 360, you need to download and install the software first. Here are the steps to do so:</p> - Step 1: Download Fusion 360 2008 32 bit from the official website or a trusted source. You can find the download link here: . Make sure you choose the correct version for your operating system and system requirements. - Step 2: Run the setup file and follow the instructions. You may need to accept the terms and conditions, choose the installation location, and select the components you want to install. The installation process may take some time depending on your internet speed and computer performance. - Step 3: Choose the trial version or enter a serial number. If you have a valid serial number, you can enter it and activate Fusion 360. If not, you can choose the trial version and use it for 30 days. You can also sign in with your Autodesk account if you have one. <h2>How to Download and Use Xforce Keygen for Fusion 360 2008 32 Bit</h2>
9
- <p>Once you have installed Fusion 360, you can use Xforce keygen to activate it for free. Here are the steps to do so:</p>
10
- - Step 1: Download Xforce keygen from a reliable source. You can find many websites that offer Xforce keygen for various Autodesk products, but be careful of malware and viruses. One of the most popular sources is , but use it at your own risk. - Step 2: Disable your antivirus and internet connection. This is important because Xforce keygen is detected as a malicious program by most antivirus software and may be blocked or deleted. Also, disconnecting from the internet will prevent Autodesk from verifying your license online. - Step 3: Run Xforce keygen as administrator and click on "Patch". You will see a window that asks you to locate the file "xf-adsk2018_x86.exe" in your Fusion 360 installation folder. Usually, it is located in "C:\Program Files\Autodesk\Fusion 360\bin". Select the file and click on "Open". - Step 4: Copy the request code from Fusion 360 and paste it into Xforce keygen. To get the request code, open Fusion 360 and go to the "Help" menu. Click on "Register" and then on "Activate". You will see a screen that shows your serial number and a request code. Copy the request code and paste it into Xforce keygen where it says "Request". - Step 5: Click on "Generate" and copy the activation code from Xforce keygen. You will see a long string of letters and numbers that is your activation code. Copy it and go back to Fusion 360. - Step 6: Paste the activation code into Fusion 360 and click on "Next". You will see a message that says "Activation successful". Click on "Finish" and enjoy using Fusion 360 for free. <h2>How to Verify that Fusion 360 is Activated</h2>
11
- <p>To make sure that Fusion 360 is activated, you can do the following:</p>
12
- - Step 1: Open Fusion 360 and go to the "Help" menu. - Step 2: Click on "About Autodesk Fusion 360". - Step 3: Check the license status and expiration date. You should see that your license is valid until January 1, 2060, which means that you have activated Fusion 360 with Xforce keygen. <h2>Benefits of Using Fusion 360 with Xforce Keygen</h2>
13
- <p>By using Xforce keygen for Fusion 360, you can enjoy some benefits, such as:</p>
14
- <p></p>
15
- - Access to all features and updates of Fusion 360. You can use all the tools and functions of Fusion 360 without any limitations or restrictions. You can also download and install any updates or patches that are released by Autodesk. - No need to pay for a subscription or license. You can save money by not having to pay for a monthly or yearly fee to use Fusion 360. You can also avoid any renewal or cancellation issues that may arise with a subscription plan. - Ability to create, edit, and share designs in various formats. You can work on any type of design project with Fusion 360, from sketching to rendering to simulation. You can also export and import your designs in different formats, such as DWG, DXF, STL, OBJ, IGES, STEP, etc. - Compatibility with other Autodesk products and services. You can integrate your designs with other Autodesk software, such as AutoCAD, Inventor, Revit, Maya, etc. You can also access other Autodesk services, such as A360 cloud storage, Autodesk Gallery, Autodesk Education Community, etc. <h2>Risks and Drawbacks of Using Fusion 360 with Xforce Keygen</h2>
16
- <p>However, using Xforce keygen for Fusion 360 also comes with some risks and drawbacks, such as:</p>
17
- - Legal and ethical issues of using - Legal and ethical issues of using pirated software. By using Xforce keygen, you are violating the terms and conditions of Autodesk and infringing their intellectual property rights. You are also depriving them of their revenue and supporting software piracy. This can lead to legal consequences, such as fines, lawsuits, or even jail time. You are also acting unethically, as you are not respecting the work and effort of the software developers and creators. - Potential malware and virus infection from untrusted sources. Xforce keygen is not an official or authorized tool from Autodesk. It is created by hackers and crackers who may have malicious intentions. They may embed malware or viruses into the Xforce keygen file or the websites that offer it. This can compromise your computer security and privacy, as well as damage your files and data. - Possible errors and glitches in the software performance. Xforce keygen may not work properly with Fusion 360, especially if there are updates or patches from Autodesk. It may cause errors, crashes, or glitches in the software functionality or stability. It may also interfere with other programs or applications on your computer. - No technical support or customer service from Autodesk. If you encounter any problems or issues with Fusion 360, you cannot contact Autodesk for help or assistance. You are on your own, as Autodesk does not provide any support or service for pirated software. You may also miss out on some features or benefits that are only available for licensed users, such as online forums, tutorials, feedback, etc. <h2>Conclusion</h2>
18
- <p>Fusion 360 is a great software for design, modeling, and engineering. However, it is not a cheap software, and you may want to use Xforce keygen to activate it for free. Xforce keygen is a crack tool that can generate activation codes for any Autodesk product, including Fusion 360.</p>
19
- <p>In this article, we have shown you how to use Xforce keygen for Fusion 360 2008 32 bit, as well as the benefits and risks of doing so. We have also answered some frequently asked questions about Fusion 360 and Xforce keygen.</p>
20
- <p>Using Xforce keygen for Fusion 360 can give you access to all features and updates of the software without paying anything. However, it can also expose you to legal and ethical issues, malware and virus infection, errors and glitches, and no technical support or customer service.</p>
21
- <p>Ultimately, the decision is yours to make. We do not recommend or endorse using Xforce keygen for Fusion 360 or any other Autodesk product. We advise you to use the official website or a trusted source to download and install Fusion 360, and to pay for a subscription or license if you want to use it legally and ethically.</p>
22
- <p>If you found this article helpful, please share it with your friends and colleagues who may be interested in using Fusion 360 with Xforce keygen. If you have any questions or feedback, please leave a comment below. Thank you for reading.</p>
23
- <h2>FAQs</h2>
24
- <p>Here are some of the most common questions that people ask about Fusion 360 and Xforce keygen:</p>
25
- <h3>What is the difference between Fusion 360 and AutoCAD?</h3>
26
- <p>Fusion 360 and AutoCAD are both products from Autodesk that are used for design and engineering. However, they have some differences in their features and functions.</p>
27
- <p>AutoCAD is a 2D and 3D CAD software that is mainly used for drafting, documentation, and detailing. It has a wide range of tools and commands that allow you to create precise and accurate drawings and models.</p>
28
- <p>Fusion 360 is a cloud-based CAD/CAM/CAE software that is mainly used for design, modeling, and simulation. It has a more intuitive and user-friendly interface that allows you to create complex and organic shapes and forms. It also integrates with other Autodesk products and services, such as Inventor, Revit, Maya, etc.</p>
29
- <h3>Is Fusion 360 free for students and educators?</h3>
30
- <p>Yes, Fusion 360 is free for students and educators who are enrolled in or employed by a qualified educational institution. You can apply for a free educational license that will allow you to use Fusion 360 for non-commercial purposes for up to three years.</p>
31
- <p>To get a free educational license for Fusion 360, you need to create an Autodesk account with your educational email address. Then, you need to verify your eligibility by providing proof of your enrollment or employment status. After that, you can download and install Fusion 360 from the Autodesk Education Community website.</p>
32
- <h3>How can I update Fusion 360 after using Xforce keygen?</h3>
33
- <p>If you have used Xforce keygen to activate Fusion 360, you may not be able to update it automatically from the software itself. This is because Autodesk may detect - your license as invalid or expired and prevent you from updating. You may also lose your activation if you update Fusion 360 with Xforce keygen. - To update Fusion 360 after using Xforce keygen, you need to download the latest version of the software from the official website or a trusted source. Then, you need to uninstall the previous version of Fusion 360 and install the new one. After that, you need to use Xforce keygen again to activate the new version of Fusion 360. - Alternatively, you can use a patcher tool that can update Fusion 360 without affecting your activation. One of the most popular patchers is , but use it at your own risk. <h3>What are some alternatives to Xforce keygen for activating Autodesk products?</h3>
34
- <p>Xforce keygen is not the only crack tool that can activate Autodesk products. There are some other tools that can do the same thing, such as:</p>
35
- - : This is a universal keygen that can generate serial numbers and product keys for any Autodesk product. It also has a patch function that can modify the software files and bypass the license verification process. - : This is a patcher tool that can activate any Autodesk product by modifying the registry entries and the hosts file. It also has a backup and restore function that can save and restore your activation data. - : This is a loader tool that can activate any Autodesk product by injecting a DLL file into the software process. It also has a stealth mode that can hide the loader from detection. <p>However, these tools are also illegal and unethical, and they may have the same risks and drawbacks as Xforce keygen. We do not recommend or endorse using these tools for activating Autodesk products.</p>
36
- <h3>Where can I find more tutorials and resources for Fusion 360?</h3>
37
- <p>If you want to learn more about Fusion 360 and how to use it effectively, you can find many tutorials and resources online, such as:</p>
38
- - : This is the official website of Fusion 360, where you can find information, documentation, downloads, updates, forums, blogs, videos, webinars, events, and more. - : This is the official YouTube channel of Fusion 360, where you can find tutorials, tips, tricks, demos, showcases, live streams, and more. - : This is an online learning platform that offers courses and lessons on Fusion 360 and other Autodesk products. You can learn from experts and professionals at your own pace and level. - : This is an online community of Fusion 360 users, where you can share your designs, projects, questions, feedback, ideas, and more. You can also join groups, challenges, contests, and events. <p>These are some of the best sources for learning and improving your skills in Fusion 360. You can also search for other websites, blogs, podcasts, books, magazines, etc. that offer Fusion 360 tutorials and resources.</p> b2dd77e56b<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cossacks Back To War __EXCLUSIVE__ Full Game Download.md DELETED
@@ -1,32 +0,0 @@
1
- <h2>cossacks back to war full game download</h2><br /><p><b><b>Download</b> &#9658;&#9658;&#9658; <a href="https://imgfil.com/2uy1Yf">https://imgfil.com/2uy1Yf</a></b></p><br /><br />
2
-
3
- ZOMBIE ATTACK (2014)
4
-
5
- NOVOLAND
6
-
7
- Jakob Keseberg
8
-
9
- As zombie hordes gather in occupied Europe, a powerful armada of airborne attack vehicles emerge to fight them. Heroes have to fight the undead from the skies or else face the devastating consequences.
10
-
11
- Jakob Keseberg grew up in Germany as a creative hobbyist with a passion for game development. He was already making video games at the age of 12, inspired by old arcade titles. Later he founded a company called Geek Bytes Studio, and to this day he continues to develop games.
12
-
13
- After finishing school he moved to Hamburg to work as a web developer. In 2011 Jakob Keseberg started studying to be a game developer and took his first steps in the game industry by creating the text adventure game "The Vampyricium" and the social game "Heroes of Shadow".
14
-
15
- ZOMBIE ATTACK (2014) (click the thumbnail for the official trailer)
16
-
17
- REAL-TIME STRATEGY WITH A HISTORICAL BACKGROUND
18
-
19
- The game puts the player in the shoes of a powerful airborne commander. The player takes control of ground and air forces to lead military operations on the continent of Europe. The player creates a strategic combination of military forces to defeat the invading zombie hordes.
20
-
21
- You have the opportunity to operate military operations at the strategic level of European politics, and the challenge of making history. The year is 1530, and Europe is about to plunge into chaos. Religiously motivated wars are spreading like wildfire across the continent, while great powers like Spain, France, and England are preparing to fight for the control of the new world. During this time, new kinds of weapons become available. It is your mission to determine who will survive, and to rule the world in their place.
22
-
23
- ZOMBIE ATTACK (2014) is a strategy game inspired by the German popular saying "Zombie attack!"
24
-
25
- TO THE ROOF OF EUROPE
26
-
27
- Europe has been invaded by hordes of undead, and the continent is experiencing its worst war. Now, armed with sword, pistol, and shotgun, you will lead the only force of its kind to make history. At the helm of your military force you will shape the destiny of the continent.
28
-
29
- In ZOMBIE ATTACK (2014) you will lead your forces to the roof of Europe. Through 4fefd39f24<br />
30
- <br />
31
- <br />
32
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Fallout 3 Crack [BEST]ed Launcher 1.7.md DELETED
@@ -1,6 +0,0 @@
1
- <br />
2
- <p>bethesda's <em>fallout 4</em> is a completely overhauled version of the fallout engine. the title is based on the "true" fallout universe established by<em> fallout 3</em> and <em>fallout new vegas</em>, and the setting takes place in boston, which has been the center of civilization in the present and future. fallout 4 will include a full open world, with large towns, a government to rule, factions to choose from, and a dynamic main quest. the game's creators say it will also be the first entry in the series to feature a linear narrative.</p>
3
- <h2>fallout 3 cracked launcher 1.7</h2><br /><p><b><b>Download</b> ---> <a href="https://imgfil.com/2uy19L">https://imgfil.com/2uy19L</a></b></p><br /><br />
4
- <p>bethesda softworks also announced a new version of the game would be released on march 27, 2011. it was available on steam on march 29, 2011, and costs $15 for a pc edition or $40 for a game plus the director's cut. the director's cut consists of six new expansion packs, including <i>fallout: new california</i>, <i>fallout: new vegas</i>, <i>fallout: brotherhood of steel</i>, <i>fallout: brotherhood of steel - the pitt</i>, <i>fallout: brotherhood of steel - point lookout</i> and <i>fallout: brotherhood of steel - mothership zeta</i>, all free of drm and released on a weekly basis. <sup id=cite_ref-3 class=reference> [3] </sup> bethesda softworks also announced a new version of the game would be released on march 27, 2011. <sup id=cite_ref-4 class=reference> [4] </sup> the game also includes an enhanced version of the <i>companion app</i>, which allows players to access the in-game radio, map and quick-travel to an area with the iphone or ipod touch. the game was also re-released as a part of a bundle pack with the first two expansion packs, which included the game and the first two expansion packs, which included the game and the first two expansion packs. </p> 899543212b<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Free Games for MacBook Air and Experience the Thrill of Gaming.md DELETED
@@ -1,140 +0,0 @@
1
- <br />
2
- <h1>How to Download Free Games for MacBook Air</h1>
3
- <p>If you are a MacBook Air user, you might be wondering how to download free games for your device. After all, gaming is not only fun but also a great way to relax and unwind. However, finding free games for Mac can be challenging, as not all games are compatible with macOS or optimized for the MacBook Air's performance. In this article, we will show you how to download free games for MacBook Air using different methods, such as Steam, the App Store, and iPad games. We will also recommend some of the best free games that you can enjoy on your MacBook Air.</p>
4
- <h2>Install Steam on Mac</h2>
5
- <p>Steam is one of the most popular online gaming platforms for PC and Mac users. It offers thousands of games across various genres, many of which are free to play or have free demos. To use Steam on your Mac, you need to install the Steam app first. Here's how:</p>
6
- <h2>download free games for macbook air</h2><br /><p><b><b>DOWNLOAD</b> &#10027;&#10027;&#10027; <a href="https://jinyurl.com/2uNNPs">https://jinyurl.com/2uNNPs</a></b></p><br /><br />
7
- <ul>
8
- <li>Go to <a href="(^1^)">steampowered.com</a> in your browser.</li>
9
- <li>Click Install Steam at the top right of the webpage.</li>
10
- <li>Choose Install Steam.</li>
11
- <li>Next, click the show downloads button.</li>
12
- <li>Double-click on steam.dmg to launch the installer.</li>
13
- <li>Drag Steam into the Applications folder.</li>
14
- <li>Exit the window.</li>
15
- </ul>
16
- <h2>Download a Steam game on Mac</h2>
17
- <p>Once you have installed Steam on your Mac, you can browse and download games from its library. To download a Steam game on your Mac, follow these steps:</p>
18
- <ul>
19
- <li>Launch Steam from the Applications folder.</li>
20
- <li>Choose Open, if applicable.</li>
21
- <li>Wait for Steam to update.</li>
22
- <li>Create a new account or log in into an existing account if you have one.</li>
23
- <li>Go to Store.</li>
24
- <li>Browse or search for the game that you want to download. Make sure it has an Apple logo next to it, which means it is compatible with Mac.</li>
25
- <li>Click on the title you want to download.</li>
26
- <li>Choose Add to Cart.</li>
27
- <li>Click Purchase for myself.</li>
28
- <li>Select your payment method. You can also use your Steam Wallet balance if you have any.</li>
29
- <li>Fill out your payment information.</li>
30
- <li>Click Continue.</li>
31
- <li>Click the checkbox to agree to the terms.</li>
32
- <li>Click Purchase.</li>
33
- </ul>
34
- <p>From here, your game will begin to download, and you can start playing as soon as it's done. You can also manage your downloads and library from the Library tab in Steam.</p>
35
- <h2>Download apps from the App Store on Mac</h2>
36
- <p>If you prefer not to use Steam or want to find more games that are designed for macOS, you can use the App Store on your Mac. The App Store has a wide selection of apps and games for your Mac, some of which are free or have free trials. To download apps from the App Store on your Mac, follow these steps:</p>
37
- <ul>
38
- <li>Open the App Store from the Dock or the Applications folder.</li>
39
- <li>Sign in with your Apple ID and password if you haven't already.</li>
40
- <li>Go to Games.</li>
41
- <li>Browse or search for the game that you want to download. You can also filter by categories, ratings, prices, and more.</li>
42
- <li>Click on the game that you want to download.</li>
43
- <li>Click Get if the game is free, or click the price if it is paid.</li>
44
- <li>Enter your Apple ID password or use Touch ID if prompted.</li>
45
- <li>Wait for the game to download and install on your Mac.</li>
46
- </ul>
47
- <p>You can find your downloaded games in the Launchpad or the Applications folder. You can also manage your purchases and updates from the App Store.</p>
48
- <h2>Download iPad games on Mac with Apple Silicon</h2>
49
- <p>If you have a MacBook Air with an Apple Silicon processor, such as the M1 chip, you can also download and play iPad games on your Mac. This is because Apple Silicon Macs can run iOS apps natively, without any emulation or compatibility issues. However, not all iPad games are available for Mac, as some developers may choose to opt out of this feature. To download iPad games on your Mac with Apple Silicon, follow these steps:</p>
50
- <p>download free games for macbook air 2023<br />
51
- download free games for macbook air offline<br />
52
- download free games for macbook air without app store<br />
53
- download free games for macbook air full version<br />
54
- download free games for macbook air no internet<br />
55
- download free games for macbook air from steam<br />
56
- download free games for macbook air adventure<br />
57
- download free games for macbook air racing<br />
58
- download free games for macbook air puzzle<br />
59
- download free games for macbook air strategy<br />
60
- download free games for macbook air simulation<br />
61
- download free games for macbook air action<br />
62
- download free games for macbook air horror<br />
63
- download free games for macbook air rpg<br />
64
- download free games for macbook air multiplayer<br />
65
- download free games for macbook air shooting<br />
66
- download free games for macbook air sports<br />
67
- download free games for macbook air educational<br />
68
- download free games for macbook air casual<br />
69
- download free games for macbook air hidden object<br />
70
- download free games for macbook air arcade<br />
71
- download free games for macbook air platformer<br />
72
- download free games for macbook air sandbox<br />
73
- download free games for macbook air survival<br />
74
- download free games for macbook air tycoon<br />
75
- download free games for macbook air card<br />
76
- download free games for macbook air board<br />
77
- download free games for macbook air trivia<br />
78
- download free games for macbook air word<br />
79
- download free games for macbook air match 3<br />
80
- download free games for macbook air solitaire<br />
81
- download free games for macbook air mahjong<br />
82
- download free games for macbook air sudoku<br />
83
- download free games for macbook air crossword<br />
84
- download free games for macbook air chess<br />
85
- download free games for macbook air checkers<br />
86
- download free games for macbook air backgammon<br />
87
- download free games for macbook air minesweeper<br />
88
- download free games for macbook air snake<br />
89
- download free games for macbook air tetris<br />
90
- download free games for macbook air pacman<br />
91
- download free games for macbook air pinball<br />
92
- download free games for macbook air breakout<br />
93
- download free games for macbook air space invaders<br />
94
- download free games for macbook air frogger<br />
95
- download free games for macbook air asteroids<br />
96
- download free games for macbook air pong<br />
97
- download free games for macbook air galaga<br />
98
- download free games for macbook air donkey kong</p>
99
- <ul>
100
- <li>Open the App Store from the Dock or the Applications folder.</li>
101
- <li>Sign in with your Apple ID and password if you haven't already.</li>
102
- <li>Go to Games.</li>
103
- <li>Click on iPhone & iPad Apps in the sidebar.</li>
104
- <li>Browse or search for the game that you want to download. You can also filter by categories, ratings, prices, and more.</li>
105
- <li>Click on the game that you want to download.</li>
106
- <li>Click Get if the game is free, or click the price if it is paid.</li>
107
- <li>Enter your Apple ID password or use Touch ID if prompted.</li>
108
- <li>Wait for the game to download and install on your Mac.</li>
109
- </ul>
110
- <p>You can find your downloaded iPad games in the Launchpad or the Applications folder. You can also manage your purchases and updates from the App Store.</p>
111
- <h2>Best free games for MacBook Air</h2>
112
- <p>Now that you know how to download free games for MacBook Air using different methods, you might be wondering what are some of the best free games that you can play on your device. Here are some of our recommendations:</p>
113
- <ol>
114
- <li><strong>Dota 2</strong>: Dota 2 is one of the most popular and competitive multiplayer online battle arena (MOBA) games in the world. It features hundreds of heroes, each with their own unique abilities and playstyles, and a variety of game modes and maps. You can team up with friends or strangers and fight against other players in matches that can last from 20 minutes to over an hour. Dota 2 is free to play on Steam, but you can also buy cosmetic items and battle passes to enhance your experience. <a href="">Download Dota 2 here</a>.</li>
115
- <li><strong>Among Us</strong>: Among Us is a social deduction game that has taken the internet by storm. It is set in a spaceship where one or more impostors are trying to sabotage and kill the crewmates, while the crewmates are trying to complete tasks and find out who the impostors are. You can play online with up to 10 players, or locally with up to 4 players. Among Us is free to play on iOS devices, but you can also buy it for $4.99 on Steam or $6.99 on the App Store for Mac. <a href="">Download Among Us here</a>.</li>
116
- <li><strong>Fallout Shelter</strong>: Fallout Shelter is a simulation game based on the Fallout series. It puts you in charge of a vault, where you have to build rooms, assign dwellers, manage resources, and protect your vault from disasters and enemies. You can also explore the wasteland, send dwellers on quests, and collect weapons and outfits. Fallout Shelter is free to play on iOS devices, but you can also buy it for $9.99 on Steam or $14.99 on the App Store for Mac. <a href="">Download Fallout Shelter here</a>.</li>
117
- </ol>
118
- <h2>Conclusion</h2>
119
- <p>In this article, we have shown you how to download free games for MacBook Air using different methods, such as Steam, the App Store, and iPad games. We have also recommended some of the best free games that you can play on your MacBook Air, such as Dota 2, Among Us, and Fallout Shelter. We hope you have found this article helpful and informative, and that you have fun playing these games on your MacBook Air. Here are some FAQs that you might have about downloading free games for MacBook Air.</p>
120
- <h2>FAQs</h2>
121
- <dl>
122
- <dt>How do I uninstall a game from my MacBook Air?</dt>
123
- <dd>To uninstall a game from your MacBook Air, you can either drag it to the Trash from the Applications folder, or use an uninstaller app that can remove all the associated files and folders. You can also delete a game from Steam or the App Store by right-clicking on it and choosing Delete or Move to Trash.</dd>
124
- <dt>How do I update a game on my MacBook Air?</dt>
125
- <dd>To update a game on your MacBook Air, you can either check for updates manually from the game's menu or settings, or enable automatic updates from Steam or the App Store. You can also check for updates from the Updates tab in the App Store, or from the Downloads tab in Steam.</dd>
126
- <dt>How do I optimize a game's performance on my MacBook Air?</dt>
127
- <dd>To optimize a game's performance on your MacBook Air, you can try some of the following tips: <ul>
128
- <li>Close any unnecessary apps or tabs that are running in the background.</li>
129
- <li>Adjust the game's graphics settings to lower the resolution, quality, or effects.</li>
130
- <li>Use an external monitor, keyboard, and mouse for better gaming experience.</li>
131
- <li>Keep your MacBook Air cool and ventilated by using a cooling pad or fan.</li>
132
- <li>Update your macOS and drivers to the latest versions.</li>
133
- </ul></dd>
134
- <dt>How do I play online games on my MacBook Air?</dt>
135
- <dd>To play online games on your MacBook Air, you need to have a stable and fast internet connection, preferably wired or Wi-Fi. You also need to have an account for the online gaming platform or service that you are using, such as Steam, Origin, Epic Games, etc. You may also need to pay a subscription fee or buy in-game currency or items for some online games.</dd>
136
- <dt>How do I play Windows games on my MacBook Air?</dt>
137
- <dd>To play Windows games on your MacBook Air, you need to use a software that can run Windows applications on Mac, such as Boot Camp, Parallels Desktop, Wine, or Crossover. However, not all Windows games are compatible with Mac, and some may have performance issues or bugs. You also need to have a valid license for Windows and enough disk space and memory for the installation.</dd>
138
- </dl></p> 197e85843d<br />
139
- <br />
140
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py DELETED
@@ -1,631 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- from typing import Callable, List, Optional, Union
18
-
19
- import numpy as np
20
- import paddle
21
- import PIL
22
- from packaging import version
23
-
24
- from paddlenlp.transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
25
-
26
- from ...configuration_utils import FrozenDict
27
- from ...models import AutoencoderKL, UNet2DConditionModel
28
- from ...pipeline_utils import DiffusionPipeline
29
- from ...schedulers import DDIMScheduler
30
- from ...utils import PIL_INTERPOLATION, deprecate, logging
31
- from . import StableDiffusionPipelineOutput
32
- from .safety_checker import StableDiffusionSafetyChecker
33
-
34
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
-
36
-
37
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
38
- def preprocess(image):
39
- if isinstance(image, paddle.Tensor):
40
- return image
41
- elif isinstance(image, PIL.Image.Image):
42
- image = [image]
43
-
44
- if isinstance(image[0], PIL.Image.Image):
45
- w, h = image[0].size
46
- w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
47
-
48
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
49
- image = np.concatenate(image, axis=0)
50
- image = np.array(image).astype(np.float32) / 255.0
51
- image = image.transpose(0, 3, 1, 2)
52
- image = 2.0 * image - 1.0
53
- image = paddle.to_tensor(image)
54
- elif isinstance(image[0], paddle.Tensor):
55
- image = paddle.concat(image, axis=0)
56
- return image
57
-
58
-
59
- def posterior_sample(scheduler, latents, timestep, clean_latents, generator, eta):
60
- # 1. get previous step value (=t-1)
61
- prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps
62
-
63
- if prev_timestep <= 0:
64
- return clean_latents
65
-
66
- # 2. compute alphas, betas
67
- alpha_prod_t = scheduler.alphas_cumprod[timestep]
68
- alpha_prod_t_prev = (
69
- scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod
70
- )
71
-
72
- variance = scheduler._get_variance(timestep, prev_timestep)
73
- std_dev_t = eta * variance ** (0.5)
74
-
75
- # direction pointing to x_t
76
- e_t = (latents - alpha_prod_t ** (0.5) * clean_latents) / (1 - alpha_prod_t) ** (0.5)
77
- dir_xt = (1.0 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * e_t
78
- noise = std_dev_t * paddle.randn(clean_latents.shape, dtype=clean_latents.dtype, generator=generator)
79
- prev_latents = alpha_prod_t_prev ** (0.5) * clean_latents + dir_xt + noise
80
-
81
- return prev_latents
82
-
83
-
84
- def compute_noise(scheduler, prev_latents, latents, timestep, noise_pred, eta):
85
- # 1. get previous step value (=t-1)
86
- prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps
87
-
88
- # 2. compute alphas, betas
89
- alpha_prod_t = scheduler.alphas_cumprod[timestep]
90
- alpha_prod_t_prev = (
91
- scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod
92
- )
93
-
94
- beta_prod_t = 1 - alpha_prod_t
95
-
96
- # 3. compute predicted original sample from predicted noise also called
97
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
98
- pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
99
-
100
- # 4. Clip "predicted x_0"
101
- if scheduler.config.clip_sample:
102
- pred_original_sample = pred_original_sample.clip(-1, 1)
103
-
104
- # 5. compute variance: "sigma_t(η)" -> see formula (16)
105
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
106
- variance = scheduler._get_variance(timestep, prev_timestep)
107
- std_dev_t = eta * variance ** (0.5)
108
-
109
- # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
110
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred
111
-
112
- noise = (prev_latents - (alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction)) / (
113
- variance ** (0.5) * eta
114
- )
115
- return noise
116
-
117
-
118
- class CycleDiffusionPipeline(DiffusionPipeline):
119
- r"""
120
- Pipeline for text-guided image to image generation using Stable Diffusion.
121
-
122
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
123
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
124
-
125
- Args:
126
- vae ([`AutoencoderKL`]):
127
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
128
- text_encoder ([`CLIPTextModel`]):
129
- Frozen text-encoder. Stable Diffusion uses the text portion of
130
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
131
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
132
- tokenizer (`CLIPTokenizer`):
133
- Tokenizer of class
134
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
135
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
136
- scheduler ([`SchedulerMixin`]):
137
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
138
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
139
- safety_checker ([`StableDiffusionSafetyChecker`]):
140
- Classification module that estimates whether generated images could be considered offensive or harmful.
141
- Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
142
- feature_extractor ([`CLIPFeatureExtractor`]):
143
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
144
- """
145
- _optional_components = ["safety_checker", "feature_extractor"]
146
-
147
- def __init__(
148
- self,
149
- vae: AutoencoderKL,
150
- text_encoder: CLIPTextModel,
151
- tokenizer: CLIPTokenizer,
152
- unet: UNet2DConditionModel,
153
- scheduler: DDIMScheduler,
154
- safety_checker: StableDiffusionSafetyChecker,
155
- feature_extractor: CLIPFeatureExtractor,
156
- requires_safety_checker: bool = True,
157
- ):
158
- super().__init__()
159
-
160
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
161
- deprecation_message = (
162
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
163
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
164
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
165
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
166
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
167
- " file"
168
- )
169
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
170
- new_config = dict(scheduler.config)
171
- new_config["steps_offset"] = 1
172
- scheduler._internal_dict = FrozenDict(new_config)
173
-
174
- if safety_checker is None and requires_safety_checker:
175
- logger.warning(
176
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
177
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
178
- " results in services or applications open to the public. PaddleNLP team, diffusers team and Hugging Face"
179
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
180
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
181
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
182
- )
183
- if safety_checker is not None and feature_extractor is None:
184
- raise ValueError(
185
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
186
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
187
- )
188
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_ppdiffusers_version") and version.parse(
189
- version.parse(unet.config._ppdiffusers_version).base_version
190
- ) < version.parse("0.9.0.dev0")
191
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
192
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
193
- deprecation_message = (
194
- "The configuration file of the unet has set the default `sample_size` to smaller than"
195
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
196
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
197
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
198
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
199
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
200
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
201
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
202
- " the `unet/config.json` file"
203
- )
204
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
205
- new_config = dict(unet.config)
206
- new_config["sample_size"] = 64
207
- unet._internal_dict = FrozenDict(new_config)
208
-
209
- self.register_modules(
210
- vae=vae,
211
- text_encoder=text_encoder,
212
- tokenizer=tokenizer,
213
- unet=unet,
214
- scheduler=scheduler,
215
- safety_checker=safety_checker,
216
- feature_extractor=feature_extractor,
217
- )
218
-
219
- self.register_to_config(requires_safety_checker=requires_safety_checker)
220
-
221
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
222
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
223
- r"""
224
- Encodes the prompt into text encoder hidden states.
225
-
226
- Args:
227
- prompt (`str` or `list(int)`):
228
- prompt to be encoded
229
- num_images_per_prompt (`int`):
230
- number of images that should be generated per prompt
231
- do_classifier_free_guidance (`bool`):
232
- whether to use classifier free guidance or not
233
- negative_prompt (`str` or `List[str]`):
234
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
235
- if `guidance_scale` is less than `1`).
236
- """
237
- batch_size = len(prompt) if isinstance(prompt, list) else 1
238
-
239
- text_inputs = self.tokenizer(
240
- prompt,
241
- padding="max_length",
242
- max_length=self.tokenizer.model_max_length,
243
- truncation=True,
244
- return_tensors="pd",
245
- )
246
- text_input_ids = text_inputs.input_ids
247
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
248
-
249
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not paddle.equal_all(
250
- text_input_ids, untruncated_ids
251
- ):
252
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
253
- logger.warning(
254
- "The following part of your input was truncated because CLIP can only handle sequences up to"
255
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
256
- )
257
-
258
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
259
- attention_mask = text_inputs.attention_mask
260
- else:
261
- attention_mask = None
262
-
263
- text_embeddings = self.text_encoder(
264
- text_input_ids,
265
- attention_mask=attention_mask,
266
- )
267
- text_embeddings = text_embeddings[0]
268
-
269
- # duplicate text embeddings for each generation per prompt, using mps friendly method
270
- bs_embed, seq_len, _ = text_embeddings.shape
271
- text_embeddings = text_embeddings.tile([1, num_images_per_prompt, 1])
272
- text_embeddings = text_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
273
-
274
- # get unconditional embeddings for classifier free guidance
275
- if do_classifier_free_guidance:
276
- uncond_tokens: List[str]
277
- if negative_prompt is None:
278
- uncond_tokens = [""] * batch_size
279
- elif type(prompt) is not type(negative_prompt):
280
- raise TypeError(
281
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
282
- f" {type(prompt)}."
283
- )
284
- elif isinstance(negative_prompt, str):
285
- uncond_tokens = [negative_prompt]
286
- elif batch_size != len(negative_prompt):
287
- raise ValueError(
288
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
289
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
290
- " the batch size of `prompt`."
291
- )
292
- else:
293
- uncond_tokens = negative_prompt
294
-
295
- max_length = text_input_ids.shape[-1]
296
- uncond_input = self.tokenizer(
297
- uncond_tokens,
298
- padding="max_length",
299
- max_length=max_length,
300
- truncation=True,
301
- return_tensors="pd",
302
- )
303
-
304
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
305
- attention_mask = uncond_input.attention_mask
306
- else:
307
- attention_mask = None
308
-
309
- uncond_embeddings = self.text_encoder(
310
- uncond_input.input_ids,
311
- attention_mask=attention_mask,
312
- )
313
- uncond_embeddings = uncond_embeddings[0]
314
-
315
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
316
- seq_len = uncond_embeddings.shape[1]
317
- uncond_embeddings = uncond_embeddings.tile([1, num_images_per_prompt, 1])
318
- uncond_embeddings = uncond_embeddings.reshape([batch_size * num_images_per_prompt, seq_len, -1])
319
-
320
- # For classifier free guidance, we need to do two forward passes.
321
- # Here we concatenate the unconditional and text embeddings into a single batch
322
- # to avoid doing two forward passes
323
- text_embeddings = paddle.concat([uncond_embeddings, text_embeddings])
324
-
325
- return text_embeddings
326
-
327
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.check_inputs
328
- def check_inputs(self, prompt, strength, callback_steps):
329
- if not isinstance(prompt, str) and not isinstance(prompt, list):
330
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
331
-
332
- if strength < 0 or strength > 1:
333
- raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}")
334
-
335
- if (callback_steps is None) or (
336
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
337
- ):
338
- raise ValueError(
339
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
340
- f" {type(callback_steps)}."
341
- )
342
-
343
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
344
- def prepare_extra_step_kwargs(self, generator, eta):
345
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
346
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
347
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
348
- # and should be between [0, 1]
349
-
350
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
351
- extra_step_kwargs = {}
352
- if accepts_eta:
353
- extra_step_kwargs["eta"] = eta
354
-
355
- # check if the scheduler accepts generator
356
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
357
- if accepts_generator:
358
- extra_step_kwargs["generator"] = generator
359
- return extra_step_kwargs
360
-
361
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
362
- def run_safety_checker(self, image, dtype):
363
- if self.safety_checker is not None:
364
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd")
365
- image, has_nsfw_concept = self.safety_checker(
366
- images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
367
- )
368
- else:
369
- has_nsfw_concept = None
370
- return image, has_nsfw_concept
371
-
372
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
373
- def decode_latents(self, latents):
374
- latents = 1 / 0.18215 * latents
375
- image = self.vae.decode(latents).sample
376
- image = (image / 2 + 0.5).clip(0, 1)
377
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
378
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
379
- return image
380
-
381
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
382
- def get_timesteps(self, num_inference_steps, strength):
383
- # get the original timestep using init_timestep
384
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
385
-
386
- t_start = max(num_inference_steps - init_timestep, 0)
387
- timesteps = self.scheduler.timesteps[t_start:]
388
-
389
- return timesteps, num_inference_steps - t_start
390
-
391
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, generator=None):
392
- image = image.cast(dtype=dtype)
393
-
394
- batch_size = image.shape[0]
395
- if isinstance(generator, list) and len(generator) != batch_size:
396
- raise ValueError(
397
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
398
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
399
- )
400
-
401
- if isinstance(generator, list):
402
- init_latents = [
403
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
404
- ]
405
- init_latents = paddle.concat(init_latents, axis=0)
406
- else:
407
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
408
- init_latents = 0.18215 * init_latents
409
-
410
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
411
- # expand init_latents for batch_size
412
- deprecation_message = (
413
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
414
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
415
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
416
- " your script to pass as many initial images as text prompts to suppress this warning."
417
- )
418
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
419
- additional_image_per_prompt = batch_size // init_latents.shape[0]
420
- init_latents = paddle.concat([init_latents] * additional_image_per_prompt * num_images_per_prompt, axis=0)
421
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
422
- raise ValueError(
423
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
424
- )
425
- else:
426
- init_latents = paddle.concat([init_latents] * num_images_per_prompt, axis=0)
427
-
428
- # add noise to latents using the timestep
429
- shape = init_latents.shape
430
- if isinstance(generator, list):
431
- shape = [
432
- 1,
433
- ] + shape[1:]
434
- noise = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)]
435
- noise = paddle.concat(noise, axis=0)
436
- else:
437
- noise = paddle.randn(shape, generator=generator, dtype=dtype)
438
-
439
- # get latents
440
- clean_latents = init_latents
441
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
442
- latents = init_latents
443
-
444
- return latents, clean_latents
445
-
446
- @paddle.no_grad()
447
- def __call__(
448
- self,
449
- prompt: Union[str, List[str]],
450
- source_prompt: Union[str, List[str]],
451
- image: Union[paddle.Tensor, PIL.Image.Image] = None,
452
- strength: float = 0.8,
453
- num_inference_steps: Optional[int] = 50,
454
- guidance_scale: Optional[float] = 7.5,
455
- source_guidance_scale: Optional[float] = 1,
456
- num_images_per_prompt: Optional[int] = 1,
457
- eta: Optional[float] = 0.1,
458
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
459
- output_type: Optional[str] = "pil",
460
- return_dict: bool = True,
461
- callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
462
- callback_steps: Optional[int] = 1,
463
- ):
464
- r"""
465
- Function invoked when calling the pipeline for generation.
466
-
467
- Args:
468
- prompt (`str` or `List[str]`):
469
- The prompt or prompts to guide the image generation.
470
- image (`paddle.Tensor` or `PIL.Image.Image`):
471
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
472
- process.
473
- strength (`float`, *optional*, defaults to 0.8):
474
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
475
- `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
476
- number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
477
- noise will be maximum and the denoising process will run for the full number of iterations specified in
478
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
479
- num_inference_steps (`int`, *optional*, defaults to 50):
480
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
481
- expense of slower inference. This parameter will be modulated by `strength`.
482
- guidance_scale (`float`, *optional*, defaults to 7.5):
483
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
484
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
485
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
486
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
487
- usually at the expense of lower image quality.
488
- source_guidance_scale (`float`, *optional*, defaults to 1):
489
- Guidance scale for the source prompt. This is useful to control the amount of influence the source
490
- prompt for encoding.
491
- num_images_per_prompt (`int`, *optional*, defaults to 1):
492
- The number of images to generate per prompt.
493
- eta (`float`, *optional*, defaults to 0.1):
494
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
495
- [`schedulers.DDIMScheduler`], will be ignored for others.
496
- generator (`paddle.Generator`, *optional*):
497
- One or a list of paddle generator(s) to make generation deterministic.
498
- output_type (`str`, *optional*, defaults to `"pil"`):
499
- The output format of the generate image. Choose between
500
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
501
- return_dict (`bool`, *optional*, defaults to `True`):
502
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
503
- plain tuple.
504
- callback (`Callable`, *optional*):
505
- A function that will be called every `callback_steps` steps during inference. The function will be
506
- called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
507
- callback_steps (`int`, *optional*, defaults to 1):
508
- The frequency at which the `callback` function will be called. If not specified, the callback will be
509
- called at every step.
510
-
511
- Returns:
512
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
513
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
514
- When returning a tuple, the first element is a list with the generated images, and the second element is a
515
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
516
- (nsfw) content, according to the `safety_checker`.
517
- """
518
- # 1. Check inputs
519
- self.check_inputs(prompt, strength, callback_steps)
520
-
521
- # 2. Define call parameters
522
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
523
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
524
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
525
- # corresponds to doing no classifier free guidance.
526
- do_classifier_free_guidance = guidance_scale > 1.0
527
-
528
- # 3. Encode input prompt
529
- text_embeddings = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance, None)
530
- source_text_embeddings = self._encode_prompt(
531
- source_prompt, num_images_per_prompt, do_classifier_free_guidance, None
532
- )
533
-
534
- # 4. Preprocess image
535
- image = preprocess(image)
536
-
537
- # 5. Prepare timesteps
538
- self.scheduler.set_timesteps(num_inference_steps)
539
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
540
- latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt])
541
-
542
- # 6. Prepare latent variables
543
- latents, clean_latents = self.prepare_latents(
544
- image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, generator
545
- )
546
- source_latents = latents
547
-
548
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
549
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
550
- generator = extra_step_kwargs.pop("generator", None)
551
-
552
- # 8. Denoising loop
553
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
554
- with self.progress_bar(total=num_inference_steps) as progress_bar:
555
- for i, t in enumerate(timesteps):
556
- # expand the latents if we are doing classifier free guidance
557
- latent_model_input = paddle.concat([latents] * 2)
558
- source_latent_model_input = paddle.concat([source_latents] * 2)
559
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
560
- source_latent_model_input = self.scheduler.scale_model_input(source_latent_model_input, t)
561
-
562
- # predict the noise residual
563
- concat_latent_model_input = paddle.stack(
564
- [
565
- source_latent_model_input[0],
566
- latent_model_input[0],
567
- source_latent_model_input[1],
568
- latent_model_input[1],
569
- ],
570
- axis=0,
571
- )
572
- concat_text_embeddings = paddle.stack(
573
- [
574
- source_text_embeddings[0],
575
- text_embeddings[0],
576
- source_text_embeddings[1],
577
- text_embeddings[1],
578
- ],
579
- axis=0,
580
- )
581
- concat_noise_pred = self.unet(
582
- concat_latent_model_input, t, encoder_hidden_states=concat_text_embeddings
583
- ).sample
584
-
585
- # perform guidance
586
- (
587
- source_noise_pred_uncond,
588
- noise_pred_uncond,
589
- source_noise_pred_text,
590
- noise_pred_text,
591
- ) = concat_noise_pred.chunk(4, axis=0)
592
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
593
- source_noise_pred = source_noise_pred_uncond + source_guidance_scale * (
594
- source_noise_pred_text - source_noise_pred_uncond
595
- )
596
-
597
- # Sample source_latents from the posterior distribution.
598
- prev_source_latents = posterior_sample(
599
- self.scheduler, source_latents, t, clean_latents, generator=generator, **extra_step_kwargs
600
- )
601
- # Compute noise.
602
- noise = compute_noise(
603
- self.scheduler, prev_source_latents, source_latents, t, source_noise_pred, **extra_step_kwargs
604
- )
605
- source_latents = prev_source_latents
606
-
607
- # compute the previous noisy sample x_t -> x_t-1
608
- latents = self.scheduler.step(
609
- noise_pred, t, latents, variance_noise=noise, **extra_step_kwargs
610
- ).prev_sample
611
-
612
- # call the callback, if provided
613
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
614
- progress_bar.update()
615
- if callback is not None and i % callback_steps == 0:
616
- callback(i, t, latents)
617
-
618
- # 9. Post-processing
619
- image = self.decode_latents(latents)
620
-
621
- # 10. Run safety checker
622
- image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype)
623
-
624
- # 11. Convert to PIL
625
- if output_type == "pil":
626
- image = self.numpy_to_pil(image)
627
-
628
- if not return_dict:
629
- return (image, has_nsfw_concept)
630
-
631
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7thHeaven/ochyai_food/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: ochyai_food
3
- emoji: 🍛
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: ochyai/ochyai_food
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/infer_pack/onnx_inference.py DELETED
@@ -1,149 +0,0 @@
1
- import librosa
2
- import numpy as np
3
- import onnxruntime
4
- import soundfile
5
-
6
- import logging
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- class ContentVec:
12
- def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
13
- logger.info("Load model(s) from {}".format(vec_path))
14
- if device == "cpu" or device is None:
15
- providers = ["CPUExecutionProvider"]
16
- elif device == "cuda":
17
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
18
- elif device == "dml":
19
- providers = ["DmlExecutionProvider"]
20
- else:
21
- raise RuntimeError("Unsportted Device")
22
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
23
-
24
- def __call__(self, wav):
25
- return self.forward(wav)
26
-
27
- def forward(self, wav):
28
- feats = wav
29
- if feats.ndim == 2: # double channels
30
- feats = feats.mean(-1)
31
- assert feats.ndim == 1, feats.ndim
32
- feats = np.expand_dims(np.expand_dims(feats, 0), 0)
33
- onnx_input = {self.model.get_inputs()[0].name: feats}
34
- logits = self.model.run(None, onnx_input)[0]
35
- return logits.transpose(0, 2, 1)
36
-
37
-
38
- def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
39
- if f0_predictor == "pm":
40
- from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
41
-
42
- f0_predictor_object = PMF0Predictor(
43
- hop_length=hop_length, sampling_rate=sampling_rate
44
- )
45
- elif f0_predictor == "harvest":
46
- from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
47
- HarvestF0Predictor,
48
- )
49
-
50
- f0_predictor_object = HarvestF0Predictor(
51
- hop_length=hop_length, sampling_rate=sampling_rate
52
- )
53
- elif f0_predictor == "dio":
54
- from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
55
-
56
- f0_predictor_object = DioF0Predictor(
57
- hop_length=hop_length, sampling_rate=sampling_rate
58
- )
59
- else:
60
- raise Exception("Unknown f0 predictor")
61
- return f0_predictor_object
62
-
63
-
64
- class OnnxRVC:
65
- def __init__(
66
- self,
67
- model_path,
68
- sr=40000,
69
- hop_size=512,
70
- vec_path="vec-768-layer-12",
71
- device="cpu",
72
- ):
73
- vec_path = f"pretrained/{vec_path}.onnx"
74
- self.vec_model = ContentVec(vec_path, device)
75
- if device == "cpu" or device is None:
76
- providers = ["CPUExecutionProvider"]
77
- elif device == "cuda":
78
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
79
- elif device == "dml":
80
- providers = ["DmlExecutionProvider"]
81
- else:
82
- raise RuntimeError("Unsportted Device")
83
- self.model = onnxruntime.InferenceSession(model_path, providers=providers)
84
- self.sampling_rate = sr
85
- self.hop_size = hop_size
86
-
87
- def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
88
- onnx_input = {
89
- self.model.get_inputs()[0].name: hubert,
90
- self.model.get_inputs()[1].name: hubert_length,
91
- self.model.get_inputs()[2].name: pitch,
92
- self.model.get_inputs()[3].name: pitchf,
93
- self.model.get_inputs()[4].name: ds,
94
- self.model.get_inputs()[5].name: rnd,
95
- }
96
- return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
97
-
98
- def inference(
99
- self,
100
- raw_path,
101
- sid,
102
- f0_method="dio",
103
- f0_up_key=0,
104
- pad_time=0.5,
105
- cr_threshold=0.02,
106
- ):
107
- f0_min = 50
108
- f0_max = 1100
109
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
110
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
111
- f0_predictor = get_f0_predictor(
112
- f0_method,
113
- hop_length=self.hop_size,
114
- sampling_rate=self.sampling_rate,
115
- threshold=cr_threshold,
116
- )
117
- wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
118
- org_length = len(wav)
119
- if org_length / sr > 50.0:
120
- raise RuntimeError("Reached Max Length")
121
-
122
- wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
123
- wav16k = wav16k
124
-
125
- hubert = self.vec_model(wav16k)
126
- hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
127
- hubert_length = hubert.shape[1]
128
-
129
- pitchf = f0_predictor.compute_f0(wav, hubert_length)
130
- pitchf = pitchf * 2 ** (f0_up_key / 12)
131
- pitch = pitchf.copy()
132
- f0_mel = 1127 * np.log(1 + pitch / 700)
133
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
134
- f0_mel_max - f0_mel_min
135
- ) + 1
136
- f0_mel[f0_mel <= 1] = 1
137
- f0_mel[f0_mel > 255] = 255
138
- pitch = np.rint(f0_mel).astype(np.int64)
139
-
140
- pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
141
- pitch = pitch.reshape(1, len(pitch))
142
- ds = np.array([sid]).astype(np.int64)
143
-
144
- rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
145
- hubert_length = np.array([hubert_length]).astype(np.int64)
146
-
147
- out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
148
- out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
149
- return out_wav[0:org_length]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/stylegan2/op_ori/upfirdn2d.cpp DELETED
@@ -1,23 +0,0 @@
1
- #include <torch/extension.h>
2
-
3
-
4
- torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
5
- int up_x, int up_y, int down_x, int down_y,
6
- int pad_x0, int pad_x1, int pad_y0, int pad_y1);
7
-
8
- #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
9
- #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
10
- #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11
-
12
- torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
13
- int up_x, int up_y, int down_x, int down_y,
14
- int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
15
- CHECK_CUDA(input);
16
- CHECK_CUDA(kernel);
17
-
18
- return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
19
- }
20
-
21
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
22
- m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/diffusionmodules/__init__.py DELETED
File without changes
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/predict.py DELETED
@@ -1,90 +0,0 @@
1
- import os
2
- from torch.utils.data import DataLoader
3
- import torchvision
4
- from tqdm import tqdm
5
- from dataset import VGGSound
6
- import torch
7
- import torch.nn as nn
8
- from metrics import metrics
9
- from omegaconf import OmegaConf
10
- from model import VGGishish
11
- from transforms import Crop, StandardNormalizeAudio, ToTensor
12
-
13
-
14
- if __name__ == '__main__':
15
- cfg_cli = OmegaConf.from_cli()
16
- print(cfg_cli.config)
17
- cfg_yml = OmegaConf.load(cfg_cli.config)
18
- # the latter arguments are prioritized
19
- cfg = OmegaConf.merge(cfg_yml, cfg_cli)
20
- OmegaConf.set_readonly(cfg, True)
21
- print(OmegaConf.to_yaml(cfg))
22
-
23
- # logger = LoggerWithTBoard(cfg)
24
- transforms = [
25
- StandardNormalizeAudio(cfg.mels_path),
26
- ToTensor(),
27
- ]
28
- if cfg.cropped_size not in [None, 'None', 'none']:
29
- transforms.append(Crop(cfg.cropped_size))
30
- transforms = torchvision.transforms.transforms.Compose(transforms)
31
-
32
- datasets = {
33
- 'test': VGGSound('test', cfg.mels_path, transforms),
34
- }
35
-
36
- loaders = {
37
- 'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
38
- num_workers=cfg.num_workers, pin_memory=True)
39
- }
40
-
41
- device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
42
- model = VGGishish(cfg.conv_layers, cfg.use_bn, num_classes=len(datasets['test'].target2label))
43
- model = model.to(device)
44
-
45
- optimizer = torch.optim.Adam(model.parameters(), lr=cfg.learning_rate)
46
- criterion = nn.CrossEntropyLoss()
47
-
48
- # loading the best model
49
- folder_name = os.path.split(cfg.config)[0].split('/')[-1]
50
- print(folder_name)
51
- ckpt = torch.load(f'./logs/{folder_name}/vggishish-{folder_name}.pt', map_location='cpu')
52
- model.load_state_dict(ckpt['model'])
53
- print((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
54
-
55
- # Testing the model
56
- model.eval()
57
- running_loss = 0
58
- preds_from_each_batch = []
59
- targets_from_each_batch = []
60
-
61
- for i, batch in enumerate(tqdm(loaders['test'])):
62
- inputs = batch['input'].to(device)
63
- targets = batch['target'].to(device)
64
-
65
- # zero the parameter gradients
66
- optimizer.zero_grad()
67
-
68
- # forward + backward + optimize
69
- with torch.set_grad_enabled(False):
70
- outputs = model(inputs)
71
- loss = criterion(outputs, targets)
72
-
73
- # loss
74
- running_loss += loss.item()
75
-
76
- # for metrics calculation later on
77
- preds_from_each_batch += [outputs.detach().cpu()]
78
- targets_from_each_batch += [targets.cpu()]
79
-
80
- # logging metrics
81
- preds_from_each_batch = torch.cat(preds_from_each_batch)
82
- targets_from_each_batch = torch.cat(targets_from_each_batch)
83
- test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
84
- test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
85
- test_metrics_dict['param_num'] = sum(p.numel() for p in model.parameters() if p.requires_grad)
86
-
87
- # TODO: I have no idea why tboard doesn't keep metrics (hparams) in a tensorboard when
88
- # I run this experiment from cli: `python main.py config=./configs/vggish.yaml`
89
- # while when I run it in vscode debugger the metrics are present in the tboard (weird)
90
- print(test_metrics_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb16_cifar100.py DELETED
@@ -1,19 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/resnet50_cifar.py',
3
- '../_base_/datasets/cifar100_bs16.py',
4
- '../_base_/schedules/cifar10_bs128.py',
5
- '../_base_/default_runtime.py',
6
- ]
7
-
8
- # model settings
9
- model = dict(head=dict(num_classes=100))
10
-
11
- # schedule settings
12
- optim_wrapper = dict(optimizer=dict(weight_decay=0.0005))
13
-
14
- param_scheduler = dict(
15
- type='MultiStepLR',
16
- by_epoch=True,
17
- milestones=[60, 120, 160],
18
- gamma=0.2,
19
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/models/autoencoder.py DELETED
@@ -1,211 +0,0 @@
1
- import torch
2
- import pytorch_lightning as pl
3
- import torch.nn.functional as F
4
- import torch.nn as nn
5
- from contextlib import contextmanager
6
-
7
- from ldm.modules.diffusionmodules.model import Encoder, Decoder
8
- from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
9
-
10
- from ldm.util import instantiate_from_config
11
- from ldm.modules.ema import LitEma
12
-
13
-
14
- class AutoencoderKL(pl.LightningModule):
15
- def __init__(self,
16
- ddconfig,
17
- lossconfig,
18
- embed_dim,
19
- ckpt_path=None,
20
- ignore_keys=[],
21
- image_key="image",
22
- colorize_nlabels=None,
23
- monitor=None,
24
- ema_decay=None,
25
- learn_logvar=False
26
- ):
27
- super().__init__()
28
- self.learn_logvar = learn_logvar
29
- self.image_key = image_key
30
- self.encoder = Encoder(**ddconfig)
31
- self.decoder = Decoder(**ddconfig)
32
- self.loss = instantiate_from_config(lossconfig)
33
- assert ddconfig["double_z"]
34
- self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
35
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
36
- self.embed_dim = embed_dim
37
- if colorize_nlabels is not None:
38
- assert type(colorize_nlabels)==int
39
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
40
- if monitor is not None:
41
- self.monitor = monitor
42
-
43
- self.use_ema = ema_decay is not None
44
- if self.use_ema:
45
- self.ema_decay = ema_decay
46
- assert 0. < ema_decay < 1.
47
- self.model_ema = LitEma(self, decay=ema_decay)
48
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
49
-
50
- if ckpt_path is not None:
51
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
52
-
53
- def init_from_ckpt(self, path, ignore_keys=list()):
54
- sd = torch.load(path, map_location="cpu")["state_dict"]
55
- keys = list(sd.keys())
56
- for k in keys:
57
- for ik in ignore_keys:
58
- if k.startswith(ik):
59
- print("Deleting key {} from state_dict.".format(k))
60
- del sd[k]
61
- self.load_state_dict(sd, strict=False)
62
- print(f"Restored from {path}")
63
-
64
- @contextmanager
65
- def ema_scope(self, context=None):
66
- if self.use_ema:
67
- self.model_ema.store(self.parameters())
68
- self.model_ema.copy_to(self)
69
- if context is not None:
70
- print(f"{context}: Switched to EMA weights")
71
- try:
72
- yield None
73
- finally:
74
- if self.use_ema:
75
- self.model_ema.restore(self.parameters())
76
- if context is not None:
77
- print(f"{context}: Restored training weights")
78
-
79
- def on_train_batch_end(self, *args, **kwargs):
80
- if self.use_ema:
81
- self.model_ema(self)
82
-
83
- def encode(self, x):
84
- h = self.encoder(x)
85
- moments = self.quant_conv(h)
86
- posterior = DiagonalGaussianDistribution(moments)
87
- return posterior
88
-
89
- def decode(self, z):
90
- z = self.post_quant_conv(z)
91
- dec = self.decoder(z)
92
- return dec
93
-
94
- def forward(self, input, sample_posterior=True):
95
- posterior = self.encode(input)
96
- if sample_posterior:
97
- z = posterior.sample()
98
- else:
99
- z = posterior.mode()
100
- dec = self.decode(z)
101
- return dec, posterior
102
-
103
- def get_input(self, batch, k):
104
- x = batch[k]
105
- if len(x.shape) == 3:
106
- x = x[..., None]
107
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
108
- return x
109
-
110
- def training_step(self, batch, batch_idx, optimizer_idx):
111
- inputs = self.get_input(batch, self.image_key)
112
- reconstructions, posterior = self(inputs)
113
-
114
- if optimizer_idx == 0:
115
- # train encoder+decoder+logvar
116
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
117
- last_layer=self.get_last_layer(), split="train")
118
- self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
119
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
120
- return aeloss
121
-
122
- if optimizer_idx == 1:
123
- # train the discriminator
124
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
125
- last_layer=self.get_last_layer(), split="train")
126
-
127
- self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
128
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
129
- return discloss
130
-
131
- def validation_step(self, batch, batch_idx):
132
- log_dict = self._validation_step(batch, batch_idx)
133
- with self.ema_scope():
134
- log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
135
- return log_dict
136
-
137
- def _validation_step(self, batch, batch_idx, postfix=""):
138
- inputs = self.get_input(batch, self.image_key)
139
- reconstructions, posterior = self(inputs)
140
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
141
- last_layer=self.get_last_layer(), split="val"+postfix)
142
-
143
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
144
- last_layer=self.get_last_layer(), split="val"+postfix)
145
-
146
- self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
147
- self.log_dict(log_dict_ae)
148
- self.log_dict(log_dict_disc)
149
- return self.log_dict
150
-
151
- def configure_optimizers(self):
152
- lr = self.learning_rate
153
- ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
154
- self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
155
- if self.learn_logvar:
156
- print(f"{self.__class__.__name__}: Learning logvar")
157
- ae_params_list.append(self.loss.logvar)
158
- opt_ae = torch.optim.Adam(ae_params_list,
159
- lr=lr, betas=(0.5, 0.9))
160
- opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
161
- lr=lr, betas=(0.5, 0.9))
162
- return [opt_ae, opt_disc], []
163
-
164
- def get_last_layer(self):
165
- return self.decoder.conv_out.weight
166
-
167
- @torch.no_grad()
168
- def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
169
- log = dict()
170
- x = self.get_input(batch, self.image_key)
171
- x = x.to(self.device)
172
- if not only_inputs:
173
- xrec, posterior = self(x)
174
- if x.shape[1] > 3:
175
- # colorize with random projection
176
- assert xrec.shape[1] > 3
177
- x = self.to_rgb(x)
178
- xrec = self.to_rgb(xrec)
179
- log["samples"] = self.decode(torch.randn_like(posterior.sample()))
180
- log["reconstructions"] = xrec
181
- log["inputs"] = x
182
- return log
183
-
184
- def to_rgb(self, x):
185
- assert self.image_key == "segmentation"
186
- if not hasattr(self, "colorize"):
187
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
188
- x = F.conv2d(x, weight=self.colorize)
189
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
190
- return x
191
-
192
-
193
- class IdentityFirstStage(nn.Module):
194
- def __init__(self, *args, vq_interface=False, **kwargs):
195
- self.vq_interface = vq_interface
196
- super().__init__()
197
-
198
- def encode(self, x, *args, **kwargs):
199
- return x
200
-
201
- def decode(self, x, *args, **kwargs):
202
- return x
203
-
204
- def quantize(self, x, *args, **kwargs):
205
- if self.vq_interface:
206
- return x, None, [None, None, None]
207
- return x
208
-
209
- def forward(self, x, *args, **kwargs):
210
- return x
211
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/models/diffusion/dpm_solver/sampler.py DELETED
@@ -1,87 +0,0 @@
1
- """SAMPLING ONLY."""
2
- import torch
3
-
4
- from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
5
-
6
-
7
- MODEL_TYPES = {
8
- "eps": "noise",
9
- "v": "v"
10
- }
11
-
12
-
13
- class DPMSolverSampler(object):
14
- def __init__(self, model, **kwargs):
15
- super().__init__()
16
- self.model = model
17
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
18
- self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
19
-
20
- def register_buffer(self, name, attr):
21
- if type(attr) == torch.Tensor:
22
- if attr.device != torch.device("cuda"):
23
- attr = attr.to(torch.device("cuda"))
24
- setattr(self, name, attr)
25
-
26
- @torch.no_grad()
27
- def sample(self,
28
- S,
29
- batch_size,
30
- shape,
31
- conditioning=None,
32
- callback=None,
33
- normals_sequence=None,
34
- img_callback=None,
35
- quantize_x0=False,
36
- eta=0.,
37
- mask=None,
38
- x0=None,
39
- temperature=1.,
40
- noise_dropout=0.,
41
- score_corrector=None,
42
- corrector_kwargs=None,
43
- verbose=True,
44
- x_T=None,
45
- log_every_t=100,
46
- unconditional_guidance_scale=1.,
47
- unconditional_conditioning=None,
48
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
49
- **kwargs
50
- ):
51
- if conditioning is not None:
52
- if isinstance(conditioning, dict):
53
- cbs = conditioning[list(conditioning.keys())[0]].shape[0]
54
- if cbs != batch_size:
55
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
56
- else:
57
- if conditioning.shape[0] != batch_size:
58
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
59
-
60
- # sampling
61
- C, H, W = shape
62
- size = (batch_size, C, H, W)
63
-
64
- print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
65
-
66
- device = self.model.betas.device
67
- if x_T is None:
68
- img = torch.randn(size, device=device)
69
- else:
70
- img = x_T
71
-
72
- ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
73
-
74
- model_fn = model_wrapper(
75
- lambda x, t, c: self.model.apply_model(x, t, c),
76
- ns,
77
- model_type=MODEL_TYPES[self.model.parameterization],
78
- guidance_type="classifier-free",
79
- condition=conditioning,
80
- unconditional_condition=unconditional_conditioning,
81
- guidance_scale=unconditional_guidance_scale,
82
- )
83
-
84
- dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
85
- x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True)
86
-
87
- return x.to(device), None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/diffusionmodules/openaimodel.py DELETED
@@ -1,798 +0,0 @@
1
- from abc import abstractmethod
2
- import math
3
- import torch
4
-
5
- import numpy as np
6
- import torch as th
7
- import torch.nn as nn
8
- import torch.nn.functional as F
9
-
10
- from ldm.modules.diffusionmodules.util import (
11
- checkpoint,
12
- conv_nd,
13
- linear,
14
- avg_pool_nd,
15
- zero_module,
16
- normalization,
17
- timestep_embedding,
18
- )
19
- from ldm.modules.attention import SpatialTransformer
20
- from ldm.util import exists
21
-
22
-
23
- # dummy replace
24
- def convert_module_to_f16(x):
25
- pass
26
-
27
- def convert_module_to_f32(x):
28
- pass
29
-
30
-
31
- ## go
32
- class AttentionPool2d(nn.Module):
33
- """
34
- Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
35
- """
36
-
37
- def __init__(
38
- self,
39
- spacial_dim: int,
40
- embed_dim: int,
41
- num_heads_channels: int,
42
- output_dim: int = None,
43
- ):
44
- super().__init__()
45
- self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
46
- self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
47
- self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
48
- self.num_heads = embed_dim // num_heads_channels
49
- self.attention = QKVAttention(self.num_heads)
50
-
51
- def forward(self, x):
52
- b, c, *_spatial = x.shape
53
- x = x.reshape(b, c, -1) # NC(HW)
54
- x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
55
- x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
56
- x = self.qkv_proj(x)
57
- x = self.attention(x)
58
- x = self.c_proj(x)
59
- return x[:, :, 0]
60
-
61
-
62
- class TimestepBlock(nn.Module):
63
- """
64
- Any module where forward() takes timestep embeddings as a second argument.
65
- """
66
-
67
- @abstractmethod
68
- def forward(self, x, emb):
69
- """
70
- Apply the module to `x` given `emb` timestep embeddings.
71
- """
72
-
73
-
74
- class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
75
- """
76
- A sequential module that passes timestep embeddings to the children that
77
- support it as an extra input.
78
- """
79
-
80
- def forward(self, x, emb, context=None):
81
- for layer in self:
82
- if isinstance(layer, TimestepBlock):
83
- x = layer(x, emb)
84
- elif isinstance(layer, SpatialTransformer):
85
- x = layer(x, context)
86
- else:
87
- x = layer(x)
88
- return x
89
-
90
-
91
- class Upsample(nn.Module):
92
- """
93
- An upsampling layer with an optional convolution.
94
- :param channels: channels in the inputs and outputs.
95
- :param use_conv: a bool determining if a convolution is applied.
96
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
97
- upsampling occurs in the inner-two dimensions.
98
- """
99
-
100
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
101
- super().__init__()
102
- self.channels = channels
103
- self.out_channels = out_channels or channels
104
- self.use_conv = use_conv
105
- self.dims = dims
106
- if use_conv:
107
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
108
-
109
- def forward(self, x):
110
- assert x.shape[1] == self.channels
111
- if self.dims == 3:
112
- x = F.interpolate(
113
- x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
114
- )
115
- else:
116
- x = F.interpolate(x, scale_factor=2, mode="nearest")
117
- if self.use_conv:
118
- x = self.conv(x)
119
- return x
120
-
121
- class TransposedUpsample(nn.Module):
122
- 'Learned 2x upsampling without padding'
123
- def __init__(self, channels, out_channels=None, ks=5):
124
- super().__init__()
125
- self.channels = channels
126
- self.out_channels = out_channels or channels
127
-
128
- self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
129
-
130
- def forward(self,x):
131
- return self.up(x)
132
-
133
-
134
- class Downsample(nn.Module):
135
- """
136
- A downsampling layer with an optional convolution.
137
- :param channels: channels in the inputs and outputs.
138
- :param use_conv: a bool determining if a convolution is applied.
139
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
140
- downsampling occurs in the inner-two dimensions.
141
- """
142
-
143
- def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
144
- super().__init__()
145
- self.channels = channels
146
- self.out_channels = out_channels or channels
147
- self.use_conv = use_conv
148
- self.dims = dims
149
- stride = 2 if dims != 3 else (1, 2, 2)
150
- if use_conv:
151
- self.op = conv_nd(
152
- dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
153
- )
154
- else:
155
- assert self.channels == self.out_channels
156
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
157
-
158
- def forward(self, x):
159
- assert x.shape[1] == self.channels
160
- return self.op(x)
161
-
162
-
163
- class ResBlock(TimestepBlock):
164
- """
165
- A residual block that can optionally change the number of channels.
166
- :param channels: the number of input channels.
167
- :param emb_channels: the number of timestep embedding channels.
168
- :param dropout: the rate of dropout.
169
- :param out_channels: if specified, the number of out channels.
170
- :param use_conv: if True and out_channels is specified, use a spatial
171
- convolution instead of a smaller 1x1 convolution to change the
172
- channels in the skip connection.
173
- :param dims: determines if the signal is 1D, 2D, or 3D.
174
- :param use_checkpoint: if True, use gradient checkpointing on this module.
175
- :param up: if True, use this block for upsampling.
176
- :param down: if True, use this block for downsampling.
177
- """
178
-
179
- def __init__(
180
- self,
181
- channels,
182
- emb_channels,
183
- dropout,
184
- out_channels=None,
185
- use_conv=False,
186
- use_scale_shift_norm=False,
187
- dims=2,
188
- use_checkpoint=False,
189
- up=False,
190
- down=False,
191
- ):
192
- super().__init__()
193
- self.channels = channels
194
- self.emb_channels = emb_channels
195
- self.dropout = dropout
196
- self.out_channels = out_channels or channels
197
- self.use_conv = use_conv
198
- self.use_checkpoint = use_checkpoint
199
- self.use_scale_shift_norm = use_scale_shift_norm
200
-
201
- self.in_layers = nn.Sequential(
202
- normalization(channels),
203
- nn.SiLU(),
204
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
205
- )
206
-
207
- self.updown = up or down
208
-
209
- if up:
210
- self.h_upd = Upsample(channels, False, dims)
211
- self.x_upd = Upsample(channels, False, dims)
212
- elif down:
213
- self.h_upd = Downsample(channels, False, dims)
214
- self.x_upd = Downsample(channels, False, dims)
215
- else:
216
- self.h_upd = self.x_upd = nn.Identity()
217
-
218
- self.emb_layers = nn.Sequential(
219
- nn.SiLU(),
220
- linear(
221
- emb_channels,
222
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
223
- ),
224
- )
225
- self.out_layers = nn.Sequential(
226
- normalization(self.out_channels),
227
- nn.SiLU(),
228
- nn.Dropout(p=dropout),
229
- zero_module(
230
- conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
231
- ),
232
- )
233
-
234
- if self.out_channels == channels:
235
- self.skip_connection = nn.Identity()
236
- elif use_conv:
237
- self.skip_connection = conv_nd(
238
- dims, channels, self.out_channels, 3, padding=1
239
- )
240
- else:
241
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
242
-
243
- def forward(self, x, emb):
244
- """
245
- Apply the block to a Tensor, conditioned on a timestep embedding.
246
- :param x: an [N x C x ...] Tensor of features.
247
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
248
- :return: an [N x C x ...] Tensor of outputs.
249
- """
250
- return checkpoint(
251
- self._forward, (x, emb), self.parameters(), self.use_checkpoint
252
- )
253
-
254
-
255
- def _forward(self, x, emb):
256
- if self.updown:
257
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
258
- h = in_rest(x)
259
- h = self.h_upd(h)
260
- x = self.x_upd(x)
261
- h = in_conv(h)
262
- else:
263
- h = self.in_layers(x)
264
- emb_out = self.emb_layers(emb).type(h.dtype)
265
- while len(emb_out.shape) < len(h.shape):
266
- emb_out = emb_out[..., None]
267
- if self.use_scale_shift_norm:
268
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
269
- scale, shift = th.chunk(emb_out, 2, dim=1)
270
- h = out_norm(h) * (1 + scale) + shift
271
- h = out_rest(h)
272
- else:
273
- h = h + emb_out
274
- h = self.out_layers(h)
275
- return self.skip_connection(x) + h
276
-
277
-
278
- class AttentionBlock(nn.Module):
279
- """
280
- An attention block that allows spatial positions to attend to each other.
281
- Originally ported from here, but adapted to the N-d case.
282
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
283
- """
284
-
285
- def __init__(
286
- self,
287
- channels,
288
- num_heads=1,
289
- num_head_channels=-1,
290
- use_checkpoint=False,
291
- use_new_attention_order=False,
292
- ):
293
- super().__init__()
294
- self.channels = channels
295
- if num_head_channels == -1:
296
- self.num_heads = num_heads
297
- else:
298
- assert (
299
- channels % num_head_channels == 0
300
- ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
301
- self.num_heads = channels // num_head_channels
302
- self.use_checkpoint = use_checkpoint
303
- self.norm = normalization(channels)
304
- self.qkv = conv_nd(1, channels, channels * 3, 1)
305
- if use_new_attention_order:
306
- # split qkv before split heads
307
- self.attention = QKVAttention(self.num_heads)
308
- else:
309
- # split heads before split qkv
310
- self.attention = QKVAttentionLegacy(self.num_heads)
311
-
312
- self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
313
-
314
- def forward(self, x):
315
- return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
316
- #return pt_checkpoint(self._forward, x) # pytorch
317
-
318
- def _forward(self, x):
319
- b, c, *spatial = x.shape
320
- x = x.reshape(b, c, -1)
321
- qkv = self.qkv(self.norm(x))
322
- h = self.attention(qkv)
323
- h = self.proj_out(h)
324
- return (x + h).reshape(b, c, *spatial)
325
-
326
-
327
- def count_flops_attn(model, _x, y):
328
- """
329
- A counter for the `thop` package to count the operations in an
330
- attention operation.
331
- Meant to be used like:
332
- macs, params = thop.profile(
333
- model,
334
- inputs=(inputs, timestamps),
335
- custom_ops={QKVAttention: QKVAttention.count_flops},
336
- )
337
- """
338
- b, c, *spatial = y[0].shape
339
- num_spatial = int(np.prod(spatial))
340
- # We perform two matmuls with the same number of ops.
341
- # The first computes the weight matrix, the second computes
342
- # the combination of the value vectors.
343
- matmul_ops = 2 * b * (num_spatial ** 2) * c
344
- model.total_ops += th.DoubleTensor([matmul_ops])
345
-
346
-
347
- class QKVAttentionLegacy(nn.Module):
348
- """
349
- A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
350
- """
351
-
352
- def __init__(self, n_heads):
353
- super().__init__()
354
- self.n_heads = n_heads
355
-
356
- def forward(self, qkv):
357
- """
358
- Apply QKV attention.
359
- :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
360
- :return: an [N x (H * C) x T] tensor after attention.
361
- """
362
- bs, width, length = qkv.shape
363
- assert width % (3 * self.n_heads) == 0
364
- ch = width // (3 * self.n_heads)
365
- q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
366
- scale = 1 / math.sqrt(math.sqrt(ch))
367
- weight = th.einsum(
368
- "bct,bcs->bts", q * scale, k * scale
369
- ) # More stable with f16 than dividing afterwards
370
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
371
- a = th.einsum("bts,bcs->bct", weight, v)
372
- return a.reshape(bs, -1, length)
373
-
374
- @staticmethod
375
- def count_flops(model, _x, y):
376
- return count_flops_attn(model, _x, y)
377
-
378
-
379
- class QKVAttention(nn.Module):
380
- """
381
- A module which performs QKV attention and splits in a different order.
382
- """
383
-
384
- def __init__(self, n_heads):
385
- super().__init__()
386
- self.n_heads = n_heads
387
-
388
- def forward(self, qkv):
389
- """
390
- Apply QKV attention.
391
- :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
392
- :return: an [N x (H * C) x T] tensor after attention.
393
- """
394
- bs, width, length = qkv.shape
395
- assert width % (3 * self.n_heads) == 0
396
- ch = width // (3 * self.n_heads)
397
- q, k, v = qkv.chunk(3, dim=1)
398
- scale = 1 / math.sqrt(math.sqrt(ch))
399
- weight = th.einsum(
400
- "bct,bcs->bts",
401
- (q * scale).view(bs * self.n_heads, ch, length),
402
- (k * scale).view(bs * self.n_heads, ch, length),
403
- ) # More stable with f16 than dividing afterwards
404
- weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
405
- a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
406
- return a.reshape(bs, -1, length)
407
-
408
- @staticmethod
409
- def count_flops(model, _x, y):
410
- return count_flops_attn(model, _x, y)
411
-
412
-
413
- class UNetModel(nn.Module):
414
- """
415
- The full UNet model with attention and timestep embedding.
416
- :param in_channels: channels in the input Tensor.
417
- :param model_channels: base channel count for the model.
418
- :param out_channels: channels in the output Tensor.
419
- :param num_res_blocks: number of residual blocks per downsample.
420
- :param attention_resolutions: a collection of downsample rates at which
421
- attention will take place. May be a set, list, or tuple.
422
- For example, if this contains 4, then at 4x downsampling, attention
423
- will be used.
424
- :param dropout: the dropout probability.
425
- :param channel_mult: channel multiplier for each level of the UNet.
426
- :param conv_resample: if True, use learned convolutions for upsampling and
427
- downsampling.
428
- :param dims: determines if the signal is 1D, 2D, or 3D.
429
- :param num_classes: if specified (as an int), then this model will be
430
- class-conditional with `num_classes` classes.
431
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
432
- :param num_heads: the number of attention heads in each attention layer.
433
- :param num_heads_channels: if specified, ignore num_heads and instead use
434
- a fixed channel width per attention head.
435
- :param num_heads_upsample: works with num_heads to set a different number
436
- of heads for upsampling. Deprecated.
437
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
438
- :param resblock_updown: use residual blocks for up/downsampling.
439
- :param use_new_attention_order: use a different attention pattern for potentially
440
- increased efficiency.
441
- """
442
-
443
- def __init__(
444
- self,
445
- image_size,
446
- in_channels,
447
- model_channels,
448
- out_channels,
449
- num_res_blocks,
450
- attention_resolutions,
451
- dropout=0,
452
- channel_mult=(1, 2, 4, 8),
453
- conv_resample=True,
454
- dims=2,
455
- num_classes=None,
456
- use_checkpoint=False,
457
- use_fp16=False,
458
- num_heads=-1,
459
- num_head_channels=-1,
460
- num_heads_upsample=-1,
461
- use_scale_shift_norm=False,
462
- resblock_updown=False,
463
- use_new_attention_order=False,
464
- use_spatial_transformer=False, # custom transformer support
465
- transformer_depth=1, # custom transformer support
466
- context_dim=None, # custom transformer support
467
- n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
468
- legacy=True,
469
- disable_self_attentions=None,
470
- num_attention_blocks=None,
471
- disable_middle_self_attn=False,
472
- use_linear_in_transformer=False,
473
- ):
474
- super().__init__()
475
- if use_spatial_transformer:
476
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
477
-
478
- if context_dim is not None:
479
- assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
480
- from omegaconf.listconfig import ListConfig
481
- if type(context_dim) == ListConfig:
482
- context_dim = list(context_dim)
483
-
484
- if num_heads_upsample == -1:
485
- num_heads_upsample = num_heads
486
-
487
- if num_heads == -1:
488
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
489
-
490
- if num_head_channels == -1:
491
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
492
-
493
- self.image_size = image_size
494
- self.in_channels = in_channels
495
- self.model_channels = model_channels
496
- self.out_channels = out_channels
497
- if isinstance(num_res_blocks, int):
498
- self.num_res_blocks = len(channel_mult) * [num_res_blocks]
499
- else:
500
- if len(num_res_blocks) != len(channel_mult):
501
- raise ValueError("provide num_res_blocks either as an int (globally constant) or "
502
- "as a list/tuple (per-level) with the same length as channel_mult")
503
- self.num_res_blocks = num_res_blocks
504
- if disable_self_attentions is not None:
505
- # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
506
- assert len(disable_self_attentions) == len(channel_mult)
507
- if num_attention_blocks is not None:
508
- assert len(num_attention_blocks) == len(self.num_res_blocks)
509
- assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
510
- print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
511
- f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
512
- f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
513
- f"attention will still not be set.")
514
-
515
- self.attention_resolutions = attention_resolutions
516
- self.dropout = dropout
517
- self.channel_mult = channel_mult
518
- self.conv_resample = conv_resample
519
- self.num_classes = num_classes
520
- self.use_checkpoint = use_checkpoint
521
- self.dtype = th.float16 if use_fp16 else th.float32
522
- self.num_heads = num_heads
523
- self.num_head_channels = num_head_channels
524
- self.num_heads_upsample = num_heads_upsample
525
- self.predict_codebook_ids = n_embed is not None
526
-
527
- time_embed_dim = model_channels * 4
528
- self.time_embed = nn.Sequential(
529
- linear(model_channels, time_embed_dim),
530
- nn.SiLU(),
531
- linear(time_embed_dim, time_embed_dim),
532
- )
533
-
534
- if self.num_classes is not None:
535
- if isinstance(self.num_classes, int):
536
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
537
- elif self.num_classes == "continuous":
538
- print("setting up linear c_adm embedding layer")
539
- self.label_emb = nn.Linear(1, time_embed_dim)
540
- else:
541
- raise ValueError()
542
-
543
- self.input_blocks = nn.ModuleList(
544
- [
545
- TimestepEmbedSequential(
546
- conv_nd(dims, in_channels, model_channels, 3, padding=1)
547
- )
548
- ]
549
- )
550
- self._feature_size = model_channels
551
- input_block_chans = [model_channels]
552
- ch = model_channels
553
- ds = 1
554
- for level, mult in enumerate(channel_mult):
555
- for nr in range(self.num_res_blocks[level]):
556
- layers = [
557
- ResBlock(
558
- ch,
559
- time_embed_dim,
560
- dropout,
561
- out_channels=mult * model_channels,
562
- dims=dims,
563
- use_checkpoint=use_checkpoint,
564
- use_scale_shift_norm=use_scale_shift_norm,
565
- )
566
- ]
567
- ch = mult * model_channels
568
- if ds in attention_resolutions:
569
- if num_head_channels == -1:
570
- dim_head = ch // num_heads
571
- else:
572
- num_heads = ch // num_head_channels
573
- dim_head = num_head_channels
574
- if legacy:
575
- #num_heads = 1
576
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
577
- if exists(disable_self_attentions):
578
- disabled_sa = disable_self_attentions[level]
579
- else:
580
- disabled_sa = False
581
-
582
- if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
583
- layers.append(
584
- AttentionBlock(
585
- ch,
586
- use_checkpoint=use_checkpoint,
587
- num_heads=num_heads,
588
- num_head_channels=dim_head,
589
- use_new_attention_order=use_new_attention_order,
590
- ) if not use_spatial_transformer else SpatialTransformer(
591
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
592
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
593
- use_checkpoint=use_checkpoint
594
- )
595
- )
596
- self.input_blocks.append(TimestepEmbedSequential(*layers))
597
- self._feature_size += ch
598
- input_block_chans.append(ch)
599
- if level != len(channel_mult) - 1:
600
- out_ch = ch
601
- self.input_blocks.append(
602
- TimestepEmbedSequential(
603
- ResBlock(
604
- ch,
605
- time_embed_dim,
606
- dropout,
607
- out_channels=out_ch,
608
- dims=dims,
609
- use_checkpoint=use_checkpoint,
610
- use_scale_shift_norm=use_scale_shift_norm,
611
- down=True,
612
- )
613
- if resblock_updown
614
- else Downsample(
615
- ch, conv_resample, dims=dims, out_channels=out_ch
616
- )
617
- )
618
- )
619
- ch = out_ch
620
- input_block_chans.append(ch)
621
- ds *= 2
622
- self._feature_size += ch
623
-
624
- if num_head_channels == -1:
625
- dim_head = ch // num_heads
626
- else:
627
- num_heads = ch // num_head_channels
628
- dim_head = num_head_channels
629
- if legacy:
630
- #num_heads = 1
631
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
632
- self.middle_block = TimestepEmbedSequential(
633
- ResBlock(
634
- ch,
635
- time_embed_dim,
636
- dropout,
637
- dims=dims,
638
- use_checkpoint=use_checkpoint,
639
- use_scale_shift_norm=use_scale_shift_norm,
640
- ),
641
- AttentionBlock(
642
- ch,
643
- use_checkpoint=use_checkpoint,
644
- num_heads=num_heads,
645
- num_head_channels=dim_head,
646
- use_new_attention_order=use_new_attention_order,
647
- ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
648
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
649
- disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
650
- use_checkpoint=use_checkpoint
651
- ),
652
- ResBlock(
653
- ch,
654
- time_embed_dim,
655
- dropout,
656
- dims=dims,
657
- use_checkpoint=use_checkpoint,
658
- use_scale_shift_norm=use_scale_shift_norm,
659
- ),
660
- )
661
- self._feature_size += ch
662
-
663
- self.output_blocks = nn.ModuleList([])
664
- for level, mult in list(enumerate(channel_mult))[::-1]:
665
- for i in range(self.num_res_blocks[level] + 1):
666
- ich = input_block_chans.pop()
667
- layers = [
668
- ResBlock(
669
- ch + ich,
670
- time_embed_dim,
671
- dropout,
672
- out_channels=model_channels * mult,
673
- dims=dims,
674
- use_checkpoint=use_checkpoint,
675
- use_scale_shift_norm=use_scale_shift_norm,
676
- )
677
- ]
678
- ch = model_channels * mult
679
- if ds in attention_resolutions:
680
- if num_head_channels == -1:
681
- dim_head = ch // num_heads
682
- else:
683
- num_heads = ch // num_head_channels
684
- dim_head = num_head_channels
685
- if legacy:
686
- #num_heads = 1
687
- dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
688
- if exists(disable_self_attentions):
689
- disabled_sa = disable_self_attentions[level]
690
- else:
691
- disabled_sa = False
692
-
693
- if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
694
- layers.append(
695
- AttentionBlock(
696
- ch,
697
- use_checkpoint=use_checkpoint,
698
- num_heads=num_heads_upsample,
699
- num_head_channels=dim_head,
700
- use_new_attention_order=use_new_attention_order,
701
- ) if not use_spatial_transformer else SpatialTransformer(
702
- ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
703
- disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
704
- use_checkpoint=use_checkpoint
705
- )
706
- )
707
- if level and i == self.num_res_blocks[level]:
708
- out_ch = ch
709
- layers.append(
710
- ResBlock(
711
- ch,
712
- time_embed_dim,
713
- dropout,
714
- out_channels=out_ch,
715
- dims=dims,
716
- use_checkpoint=use_checkpoint,
717
- use_scale_shift_norm=use_scale_shift_norm,
718
- up=True,
719
- )
720
- if resblock_updown
721
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
722
- )
723
- ds //= 2
724
- self.output_blocks.append(TimestepEmbedSequential(*layers))
725
- self._feature_size += ch
726
-
727
- self.out = nn.Sequential(
728
- normalization(ch),
729
- nn.SiLU(),
730
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
731
- )
732
- if self.predict_codebook_ids:
733
- self.id_predictor = nn.Sequential(
734
- normalization(ch),
735
- conv_nd(dims, model_channels, n_embed, 1),
736
- #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
737
- )
738
-
739
- def convert_to_fp16(self):
740
- """
741
- Convert the torso of the model to float16.
742
- """
743
- self.input_blocks.apply(convert_module_to_f16)
744
- self.middle_block.apply(convert_module_to_f16)
745
- self.output_blocks.apply(convert_module_to_f16)
746
-
747
- def convert_to_fp32(self):
748
- """
749
- Convert the torso of the model to float32.
750
- """
751
- self.input_blocks.apply(convert_module_to_f32)
752
- self.middle_block.apply(convert_module_to_f32)
753
- self.output_blocks.apply(convert_module_to_f32)
754
-
755
- def forward(self, x, timesteps=None, context=None, y=None, features_adapter=None, append_to_context=None, **kwargs):
756
- """
757
- Apply the model to an input batch.
758
- :param x: an [N x C x ...] Tensor of inputs.
759
- :param timesteps: a 1-D batch of timesteps.
760
- :param context: conditioning plugged in via crossattn
761
- :param y: an [N] Tensor of labels, if class-conditional.
762
- :return: an [N x C x ...] Tensor of outputs.
763
- """
764
- assert (y is not None) == (
765
- self.num_classes is not None
766
- ), "must specify y if and only if the model is class-conditional"
767
- hs = []
768
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
769
- emb = self.time_embed(t_emb)
770
-
771
- if self.num_classes is not None:
772
- assert y.shape[0] == x.shape[0]
773
- emb = emb + self.label_emb(y)
774
-
775
- h = x.type(self.dtype)
776
-
777
- if append_to_context is not None:
778
- context = torch.cat([context, append_to_context], dim=1)
779
-
780
- adapter_idx = 0
781
- for id, module in enumerate(self.input_blocks):
782
- h = module(h, emb, context)
783
- if ((id+1)%3 == 0) and features_adapter is not None:
784
- h = h + features_adapter[adapter_idx]
785
- adapter_idx += 1
786
- hs.append(h)
787
- if features_adapter is not None:
788
- assert len(features_adapter)==adapter_idx, 'Wrong features_adapter'
789
-
790
- h = self.middle_block(h, emb, context)
791
- for module in self.output_blocks:
792
- h = th.cat([h, hs.pop()], dim=1)
793
- h = module(h, emb, context)
794
- h = h.type(x.dtype)
795
- if self.predict_codebook_ids:
796
- return self.id_predictor(h)
797
- else:
798
- return self.out(h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/utils/aws/userdata.sh DELETED
@@ -1,27 +0,0 @@
1
- #!/bin/bash
2
- # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3
- # This script will run only once on first instance start (for a re-start script see mime.sh)
4
- # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5
- # Use >300 GB SSD
6
-
7
- cd home/ubuntu
8
- if [ ! -d yolor ]; then
9
- echo "Running first-time script." # install dependencies, download COCO, pull Docker
10
- git clone -b main https://github.com/WongKinYiu/yolov7 && sudo chmod -R 777 yolov7
11
- cd yolov7
12
- bash data/scripts/get_coco.sh && echo "Data done." &
13
- sudo docker pull nvcr.io/nvidia/pytorch:21.08-py3 && echo "Docker done." &
14
- python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15
- wait && echo "All tasks done." # finish background tasks
16
- else
17
- echo "Running re-start script." # resume interrupted runs
18
- i=0
19
- list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20
- while IFS= read -r id; do
21
- ((i++))
22
- echo "restarting container $i: $id"
23
- sudo docker start $id
24
- # sudo docker exec -it $id python train.py --resume # single-GPU
25
- sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26
- done <<<"$list"
27
- fi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/monotonic_align/core.c DELETED
The diff for this file is too large to render. See raw diff
 
spaces/AlbertoFH98/CastenaApp/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: CastenaApp
3
- emoji: 📈
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.27.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/frames_dataset.py DELETED
@@ -1,173 +0,0 @@
1
- import os
2
- from skimage import io, img_as_float32
3
- from skimage.color import gray2rgb
4
- from sklearn.model_selection import train_test_split
5
- from imageio import mimread
6
- from skimage.transform import resize
7
- import numpy as np
8
- from torch.utils.data import Dataset
9
- from augmentation import AllAugmentationTransform
10
- import glob
11
- from functools import partial
12
-
13
-
14
- def read_video(name, frame_shape):
15
- """
16
- Read video which can be:
17
- - an image of concatenated frames
18
- - '.mp4' and'.gif'
19
- - folder with videos
20
- """
21
-
22
- if os.path.isdir(name):
23
- frames = sorted(os.listdir(name))
24
- num_frames = len(frames)
25
- video_array = np.array(
26
- [img_as_float32(io.imread(os.path.join(name, frames[idx]))) for idx in range(num_frames)])
27
- elif name.lower().endswith('.png') or name.lower().endswith('.jpg'):
28
- image = io.imread(name)
29
-
30
- if len(image.shape) == 2 or image.shape[2] == 1:
31
- image = gray2rgb(image)
32
-
33
- if image.shape[2] == 4:
34
- image = image[..., :3]
35
-
36
- image = img_as_float32(image)
37
-
38
- video_array = np.moveaxis(image, 1, 0)
39
-
40
- video_array = video_array.reshape((-1,) + frame_shape)
41
- video_array = np.moveaxis(video_array, 1, 2)
42
- elif name.lower().endswith('.gif') or name.lower().endswith('.mp4') or name.lower().endswith('.mov'):
43
- video = mimread(name)
44
- if len(video[0].shape) == 2:
45
- video = [gray2rgb(frame) for frame in video]
46
- if frame_shape is not None:
47
- video = np.array([resize(frame, frame_shape) for frame in video])
48
- video = np.array(video)
49
- if video.shape[-1] == 4:
50
- video = video[..., :3]
51
- video_array = img_as_float32(video)
52
- else:
53
- raise Exception("Unknown file extensions %s" % name)
54
-
55
- return video_array
56
-
57
-
58
- class FramesDataset(Dataset):
59
- """
60
- Dataset of videos, each video can be represented as:
61
- - an image of concatenated frames
62
- - '.mp4' or '.gif'
63
- - folder with all frames
64
- """
65
-
66
- def __init__(self, root_dir, frame_shape=(256, 256, 3), id_sampling=False, is_train=True,
67
- random_seed=0, pairs_list=None, augmentation_params=None):
68
- self.root_dir = root_dir
69
- self.videos = os.listdir(root_dir)
70
- self.frame_shape = frame_shape
71
- print(self.frame_shape)
72
- self.pairs_list = pairs_list
73
- self.id_sampling = id_sampling
74
-
75
- if os.path.exists(os.path.join(root_dir, 'train')):
76
- assert os.path.exists(os.path.join(root_dir, 'test'))
77
- print("Use predefined train-test split.")
78
- if id_sampling:
79
- train_videos = {os.path.basename(video).split('#')[0] for video in
80
- os.listdir(os.path.join(root_dir, 'train'))}
81
- train_videos = list(train_videos)
82
- else:
83
- train_videos = os.listdir(os.path.join(root_dir, 'train'))
84
- test_videos = os.listdir(os.path.join(root_dir, 'test'))
85
- self.root_dir = os.path.join(self.root_dir, 'train' if is_train else 'test')
86
- else:
87
- print("Use random train-test split.")
88
- train_videos, test_videos = train_test_split(self.videos, random_state=random_seed, test_size=0.2)
89
-
90
- if is_train:
91
- self.videos = train_videos
92
- else:
93
- self.videos = test_videos
94
-
95
- self.is_train = is_train
96
-
97
- if self.is_train:
98
- self.transform = AllAugmentationTransform(**augmentation_params)
99
- else:
100
- self.transform = None
101
-
102
- def __len__(self):
103
- return len(self.videos)
104
-
105
- def __getitem__(self, idx):
106
-
107
- if self.is_train and self.id_sampling:
108
- name = self.videos[idx]
109
- path = np.random.choice(glob.glob(os.path.join(self.root_dir, name + '*.mp4')))
110
- else:
111
- name = self.videos[idx]
112
- path = os.path.join(self.root_dir, name)
113
-
114
- video_name = os.path.basename(path)
115
- if self.is_train and os.path.isdir(path):
116
-
117
- frames = os.listdir(path)
118
- num_frames = len(frames)
119
- frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2))
120
-
121
- if self.frame_shape is not None:
122
- resize_fn = partial(resize, output_shape=self.frame_shape)
123
- else:
124
- resize_fn = img_as_float32
125
-
126
- if type(frames[0]) is bytes:
127
- video_array = [resize_fn(io.imread(os.path.join(path, frames[idx].decode('utf-8')))) for idx in
128
- frame_idx]
129
- else:
130
- video_array = [resize_fn(io.imread(os.path.join(path, frames[idx]))) for idx in frame_idx]
131
- else:
132
-
133
- video_array = read_video(path, frame_shape=self.frame_shape)
134
-
135
- num_frames = len(video_array)
136
- frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2)) if self.is_train else range(
137
- num_frames)
138
- video_array = video_array[frame_idx]
139
-
140
-
141
- if self.transform is not None:
142
- video_array = self.transform(video_array)
143
-
144
- out = {}
145
- if self.is_train:
146
- source = np.array(video_array[0], dtype='float32')
147
- driving = np.array(video_array[1], dtype='float32')
148
-
149
- out['driving'] = driving.transpose((2, 0, 1))
150
- out['source'] = source.transpose((2, 0, 1))
151
- else:
152
- video = np.array(video_array, dtype='float32')
153
- out['video'] = video.transpose((3, 0, 1, 2))
154
-
155
- out['name'] = video_name
156
- return out
157
-
158
-
159
- class DatasetRepeater(Dataset):
160
- """
161
- Pass several times over the same dataset for better i/o performance
162
- """
163
-
164
- def __init__(self, dataset, num_repeats=100):
165
- self.dataset = dataset
166
- self.num_repeats = num_repeats
167
-
168
- def __len__(self):
169
- return self.num_repeats * self.dataset.__len__()
170
-
171
- def __getitem__(self, idx):
172
- return self.dataset[idx % self.dataset.__len__()]
173
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/latent_mappers.py DELETED
@@ -1,81 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from torch.nn import Module
4
-
5
- from models.StyleCLIP.models.stylegan2.model import EqualLinear, PixelNorm
6
-
7
-
8
- class Mapper(Module):
9
-
10
- def __init__(self, opts):
11
- super(Mapper, self).__init__()
12
-
13
- self.opts = opts
14
- layers = [PixelNorm()]
15
-
16
- for i in range(4):
17
- layers.append(
18
- EqualLinear(
19
- 512, 512, lr_mul=0.01, activation='fused_lrelu'
20
- )
21
- )
22
-
23
- self.mapping = nn.Sequential(*layers)
24
-
25
-
26
- def forward(self, x):
27
- x = self.mapping(x)
28
- return x
29
-
30
-
31
- class SingleMapper(Module):
32
-
33
- def __init__(self, opts):
34
- super(SingleMapper, self).__init__()
35
-
36
- self.opts = opts
37
-
38
- self.mapping = Mapper(opts)
39
-
40
- def forward(self, x):
41
- out = self.mapping(x)
42
- return out
43
-
44
-
45
- class LevelsMapper(Module):
46
-
47
- def __init__(self, opts):
48
- super(LevelsMapper, self).__init__()
49
-
50
- self.opts = opts
51
-
52
- if not opts.no_coarse_mapper:
53
- self.course_mapping = Mapper(opts)
54
- if not opts.no_medium_mapper:
55
- self.medium_mapping = Mapper(opts)
56
- if not opts.no_fine_mapper:
57
- self.fine_mapping = Mapper(opts)
58
-
59
- def forward(self, x):
60
- x_coarse = x[:, :4, :]
61
- x_medium = x[:, 4:8, :]
62
- x_fine = x[:, 8:, :]
63
-
64
- if not self.opts.no_coarse_mapper:
65
- x_coarse = self.course_mapping(x_coarse)
66
- else:
67
- x_coarse = torch.zeros_like(x_coarse)
68
- if not self.opts.no_medium_mapper:
69
- x_medium = self.medium_mapping(x_medium)
70
- else:
71
- x_medium = torch.zeros_like(x_medium)
72
- if not self.opts.no_fine_mapper:
73
- x_fine = self.fine_mapping(x_fine)
74
- else:
75
- x_fine = torch.zeros_like(x_fine)
76
-
77
-
78
- out = torch.cat([x_coarse, x_medium, x_fine], dim=1)
79
-
80
- return out
81
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/fp16.md DELETED
@@ -1,434 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Memory and speed
14
-
15
- We present some techniques and ideas to optimize 🤗 Diffusers _inference_ for memory or speed. As a general rule, we recommend the use of [xFormers](https://github.com/facebookresearch/xformers) for memory efficient attention, please see the recommended [installation instructions](xformers).
16
-
17
- We'll discuss how the following settings impact performance and memory.
18
-
19
- | | Latency | Speedup |
20
- | ---------------- | ------- | ------- |
21
- | original | 9.50s | x1 |
22
- | fp16 | 3.61s | x2.63 |
23
- | channels last | 3.30s | x2.88 |
24
- | traced UNet | 3.21s | x2.96 |
25
- | memory efficient attention | 2.63s | x3.61 |
26
-
27
- <em>
28
- obtained on NVIDIA TITAN RTX by generating a single image of size 512x512 from
29
- the prompt "a photo of an astronaut riding a horse on mars" with 50 DDIM
30
- steps.
31
- </em>
32
-
33
- ### Use tf32 instead of fp32 (on Ampere and later CUDA devices)
34
-
35
- On Ampere and later CUDA devices matrix multiplications and convolutions can use the TensorFloat32 (TF32) mode for faster but slightly less accurate computations. By default PyTorch enables TF32 mode for convolutions but not matrix multiplications, and unless a network requires full float32 precision we recommend enabling this setting for matrix multiplications, too. It can significantly speed up computations with typically negligible loss of numerical accuracy. You can read more about it [here](https://huggingface.co/docs/transformers/v4.18.0/en/performance#tf32). All you need to do is to add this before your inference:
36
-
37
- ```python
38
- import torch
39
-
40
- torch.backends.cuda.matmul.allow_tf32 = True
41
- ```
42
-
43
- ## Half precision weights
44
-
45
- To save more GPU memory and get more speed, you can load and run the model weights directly in half precision. This involves loading the float16 version of the weights, which was saved to a branch named `fp16`, and telling PyTorch to use the `float16` type when loading them:
46
-
47
- ```Python
48
- import torch
49
- from diffusers import DiffusionPipeline
50
-
51
- pipe = DiffusionPipeline.from_pretrained(
52
- "runwayml/stable-diffusion-v1-5",
53
- torch_dtype=torch.float16,
54
- )
55
- pipe = pipe.to("cuda")
56
-
57
- prompt = "a photo of an astronaut riding a horse on mars"
58
- image = pipe(prompt).images[0]
59
- ```
60
-
61
- <Tip warning={true}>
62
-
63
- It is strongly discouraged to make use of [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) in any of the pipelines as it can lead to black images and is always slower than using pure
64
- float16 precision.
65
-
66
- </Tip>
67
-
68
- ## Sliced attention for additional memory savings
69
-
70
- For even additional memory savings, you can use a sliced version of attention that performs the computation in steps instead of all at once.
71
-
72
- <Tip>
73
- Attention slicing is useful even if a batch size of just 1 is used - as long
74
- as the model uses more than one attention head. If there is more than one
75
- attention head the *QK^T* attention matrix can be computed sequentially for
76
- each head which can save a significant amount of memory.
77
- </Tip>
78
-
79
- To perform the attention computation sequentially over each head, you only need to invoke [`~DiffusionPipeline.enable_attention_slicing`] in your pipeline before inference, like here:
80
-
81
- ```Python
82
- import torch
83
- from diffusers import DiffusionPipeline
84
-
85
- pipe = DiffusionPipeline.from_pretrained(
86
- "runwayml/stable-diffusion-v1-5",
87
- torch_dtype=torch.float16,
88
- )
89
- pipe = pipe.to("cuda")
90
-
91
- prompt = "a photo of an astronaut riding a horse on mars"
92
- pipe.enable_attention_slicing()
93
- image = pipe(prompt).images[0]
94
- ```
95
-
96
- There's a small performance penalty of about 10% slower inference times, but this method allows you to use Stable Diffusion in as little as 3.2 GB of VRAM!
97
-
98
-
99
- ## Sliced VAE decode for larger batches
100
-
101
- To decode large batches of images with limited VRAM, or to enable batches with 32 images or more, you can use sliced VAE decode that decodes the batch latents one image at a time.
102
-
103
- You likely want to couple this with [`~StableDiffusionPipeline.enable_attention_slicing`] or [`~StableDiffusionPipeline.enable_xformers_memory_efficient_attention`] to further minimize memory use.
104
-
105
- To perform the VAE decode one image at a time, invoke [`~StableDiffusionPipeline.enable_vae_slicing`] in your pipeline before inference. For example:
106
-
107
- ```Python
108
- import torch
109
- from diffusers import StableDiffusionPipeline
110
-
111
- pipe = StableDiffusionPipeline.from_pretrained(
112
- "runwayml/stable-diffusion-v1-5",
113
- torch_dtype=torch.float16,
114
- )
115
- pipe = pipe.to("cuda")
116
-
117
- prompt = "a photo of an astronaut riding a horse on mars"
118
- pipe.enable_vae_slicing()
119
- images = pipe([prompt] * 32).images
120
- ```
121
-
122
- You may see a small performance boost in VAE decode on multi-image batches. There should be no performance impact on single-image batches.
123
-
124
-
125
- ## Tiled VAE decode and encode for large images
126
-
127
- Tiled VAE processing makes it possible to work with large images on limited VRAM. For example, generating 4k images in 8GB of VRAM. Tiled VAE decoder splits the image into overlapping tiles, decodes the tiles, and blends the outputs to make the final image.
128
-
129
- You want to couple this with [`~StableDiffusionPipeline.enable_attention_slicing`] or [`~StableDiffusionPipeline.enable_xformers_memory_efficient_attention`] to further minimize memory use.
130
-
131
- To use tiled VAE processing, invoke [`~StableDiffusionPipeline.enable_vae_tiling`] in your pipeline before inference. For example:
132
-
133
- ```python
134
- import torch
135
- from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler
136
-
137
- pipe = StableDiffusionPipeline.from_pretrained(
138
- "runwayml/stable-diffusion-v1-5",
139
- torch_dtype=torch.float16,
140
- )
141
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
142
- pipe = pipe.to("cuda")
143
- prompt = "a beautiful landscape photograph"
144
- pipe.enable_vae_tiling()
145
- pipe.enable_xformers_memory_efficient_attention()
146
-
147
- image = pipe([prompt], width=3840, height=2224, num_inference_steps=20).images[0]
148
- ```
149
-
150
- The output image will have some tile-to-tile tone variation from the tiles having separate decoders, but you shouldn't see sharp seams between the tiles. The tiling is turned off for images that are 512x512 or smaller.
151
-
152
-
153
- <a name="sequential_offloading"></a>
154
- ## Offloading to CPU with accelerate for memory savings
155
-
156
- For additional memory savings, you can offload the weights to CPU and only load them to GPU when performing the forward pass.
157
-
158
- To perform CPU offloading, all you have to do is invoke [`~StableDiffusionPipeline.enable_sequential_cpu_offload`]:
159
-
160
- ```Python
161
- import torch
162
- from diffusers import StableDiffusionPipeline
163
-
164
- pipe = StableDiffusionPipeline.from_pretrained(
165
- "runwayml/stable-diffusion-v1-5",
166
- torch_dtype=torch.float16,
167
- )
168
-
169
- prompt = "a photo of an astronaut riding a horse on mars"
170
- pipe.enable_sequential_cpu_offload()
171
- image = pipe(prompt).images[0]
172
- ```
173
-
174
- And you can get the memory consumption to < 3GB.
175
-
176
- Note that this method works at the submodule level, not on whole models. This is the best way to minimize memory consumption, but inference is much slower due to the iterative nature of the process. The UNet component of the pipeline runs several times (as many as `num_inference_steps`); each time, the different submodules of the UNet are sequentially onloaded and then offloaded as they are needed, so the number of memory transfers is large.
177
-
178
- <Tip>
179
- Consider using <a href="#model_offloading">model offloading</a> as another point in the optimization space: it will be much faster, but memory savings won't be as large.
180
- </Tip>
181
-
182
- It is also possible to chain offloading with attention slicing for minimal memory consumption (< 2GB).
183
-
184
- ```Python
185
- import torch
186
- from diffusers import StableDiffusionPipeline
187
-
188
- pipe = StableDiffusionPipeline.from_pretrained(
189
- "runwayml/stable-diffusion-v1-5",
190
- torch_dtype=torch.float16,
191
- )
192
-
193
- prompt = "a photo of an astronaut riding a horse on mars"
194
- pipe.enable_sequential_cpu_offload()
195
- pipe.enable_attention_slicing(1)
196
-
197
- image = pipe(prompt).images[0]
198
- ```
199
-
200
- **Note**: When using `enable_sequential_cpu_offload()`, it is important to **not** move the pipeline to CUDA beforehand or else the gain in memory consumption will only be minimal. See [this issue](https://github.com/huggingface/diffusers/issues/1934) for more information.
201
-
202
- **Note**: `enable_sequential_cpu_offload()` is a stateful operation that installs hooks on the models.
203
-
204
-
205
- <a name="model_offloading"></a>
206
- ## Model offloading for fast inference and memory savings
207
-
208
- [Sequential CPU offloading](#sequential_offloading), as discussed in the previous section, preserves a lot of memory but makes inference slower, because submodules are moved to GPU as needed, and immediately returned to CPU when a new module runs.
209
-
210
- Full-model offloading is an alternative that moves whole models to the GPU, instead of handling each model's constituent _modules_. This results in a negligible impact on inference time (compared with moving the pipeline to `cuda`), while still providing some memory savings.
211
-
212
- In this scenario, only one of the main components of the pipeline (typically: text encoder, unet and vae)
213
- will be in the GPU while the others wait in the CPU. Components like the UNet that run for multiple iterations will stay on GPU until they are no longer needed.
214
-
215
- This feature can be enabled by invoking `enable_model_cpu_offload()` on the pipeline, as shown below.
216
-
217
- ```Python
218
- import torch
219
- from diffusers import StableDiffusionPipeline
220
-
221
- pipe = StableDiffusionPipeline.from_pretrained(
222
- "runwayml/stable-diffusion-v1-5",
223
- torch_dtype=torch.float16,
224
- )
225
-
226
- prompt = "a photo of an astronaut riding a horse on mars"
227
- pipe.enable_model_cpu_offload()
228
- image = pipe(prompt).images[0]
229
- ```
230
-
231
- This is also compatible with attention slicing for additional memory savings.
232
-
233
- ```Python
234
- import torch
235
- from diffusers import StableDiffusionPipeline
236
-
237
- pipe = StableDiffusionPipeline.from_pretrained(
238
- "runwayml/stable-diffusion-v1-5",
239
- torch_dtype=torch.float16,
240
- )
241
-
242
- prompt = "a photo of an astronaut riding a horse on mars"
243
- pipe.enable_model_cpu_offload()
244
- pipe.enable_attention_slicing(1)
245
-
246
- image = pipe(prompt).images[0]
247
- ```
248
-
249
- <Tip>
250
- This feature requires `accelerate` version 0.17.0 or larger.
251
- </Tip>
252
-
253
- **Note**: `enable_model_cpu_offload()` is a stateful operation that installs hooks on the models and state on the pipeline. In order to properly offload
254
- models after they are called, it is required that the entire pipeline is run and models are called in the order the pipeline expects them to be. Exercise caution
255
- if models are re-used outside the context of the pipeline after hooks have been installed. See [accelerate](https://huggingface.co/docs/accelerate/v0.18.0/en/package_reference/big_modeling#accelerate.hooks.remove_hook_from_module)
256
- for further docs on removing hooks.
257
-
258
- ## Using Channels Last memory format
259
-
260
- Channels last memory format is an alternative way of ordering NCHW tensors in memory preserving dimensions ordering. Channels last tensors ordered in such a way that channels become the densest dimension (aka storing images pixel-per-pixel). Since not all operators currently support channels last format it may result in a worst performance, so it's better to try it and see if it works for your model.
261
-
262
- For example, in order to set the UNet model in our pipeline to use channels last format, we can use the following:
263
-
264
- ```python
265
- print(pipe.unet.conv_out.state_dict()["weight"].stride()) # (2880, 9, 3, 1)
266
- pipe.unet.to(memory_format=torch.channels_last) # in-place operation
267
- print(
268
- pipe.unet.conv_out.state_dict()["weight"].stride()
269
- ) # (2880, 1, 960, 320) having a stride of 1 for the 2nd dimension proves that it works
270
- ```
271
-
272
- ## Tracing
273
-
274
- Tracing runs an example input tensor through your model, and captures the operations that are invoked as that input makes its way through the model's layers so that an executable or `ScriptFunction` is returned that will be optimized using just-in-time compilation.
275
-
276
- To trace our UNet model, we can use the following:
277
-
278
- ```python
279
- import time
280
- import torch
281
- from diffusers import StableDiffusionPipeline
282
- import functools
283
-
284
- # torch disable grad
285
- torch.set_grad_enabled(False)
286
-
287
- # set variables
288
- n_experiments = 2
289
- unet_runs_per_experiment = 50
290
-
291
-
292
- # load inputs
293
- def generate_inputs():
294
- sample = torch.randn(2, 4, 64, 64).half().cuda()
295
- timestep = torch.rand(1).half().cuda() * 999
296
- encoder_hidden_states = torch.randn(2, 77, 768).half().cuda()
297
- return sample, timestep, encoder_hidden_states
298
-
299
-
300
- pipe = StableDiffusionPipeline.from_pretrained(
301
- "runwayml/stable-diffusion-v1-5",
302
- torch_dtype=torch.float16,
303
- ).to("cuda")
304
- unet = pipe.unet
305
- unet.eval()
306
- unet.to(memory_format=torch.channels_last) # use channels_last memory format
307
- unet.forward = functools.partial(unet.forward, return_dict=False) # set return_dict=False as default
308
-
309
- # warmup
310
- for _ in range(3):
311
- with torch.inference_mode():
312
- inputs = generate_inputs()
313
- orig_output = unet(*inputs)
314
-
315
- # trace
316
- print("tracing..")
317
- unet_traced = torch.jit.trace(unet, inputs)
318
- unet_traced.eval()
319
- print("done tracing")
320
-
321
-
322
- # warmup and optimize graph
323
- for _ in range(5):
324
- with torch.inference_mode():
325
- inputs = generate_inputs()
326
- orig_output = unet_traced(*inputs)
327
-
328
-
329
- # benchmarking
330
- with torch.inference_mode():
331
- for _ in range(n_experiments):
332
- torch.cuda.synchronize()
333
- start_time = time.time()
334
- for _ in range(unet_runs_per_experiment):
335
- orig_output = unet_traced(*inputs)
336
- torch.cuda.synchronize()
337
- print(f"unet traced inference took {time.time() - start_time:.2f} seconds")
338
- for _ in range(n_experiments):
339
- torch.cuda.synchronize()
340
- start_time = time.time()
341
- for _ in range(unet_runs_per_experiment):
342
- orig_output = unet(*inputs)
343
- torch.cuda.synchronize()
344
- print(f"unet inference took {time.time() - start_time:.2f} seconds")
345
-
346
- # save the model
347
- unet_traced.save("unet_traced.pt")
348
- ```
349
-
350
- Then we can replace the `unet` attribute of the pipeline with the traced model like the following
351
-
352
- ```python
353
- from diffusers import StableDiffusionPipeline
354
- import torch
355
- from dataclasses import dataclass
356
-
357
-
358
- @dataclass
359
- class UNet2DConditionOutput:
360
- sample: torch.FloatTensor
361
-
362
-
363
- pipe = StableDiffusionPipeline.from_pretrained(
364
- "runwayml/stable-diffusion-v1-5",
365
- torch_dtype=torch.float16,
366
- ).to("cuda")
367
-
368
- # use jitted unet
369
- unet_traced = torch.jit.load("unet_traced.pt")
370
-
371
-
372
- # del pipe.unet
373
- class TracedUNet(torch.nn.Module):
374
- def __init__(self):
375
- super().__init__()
376
- self.in_channels = pipe.unet.in_channels
377
- self.device = pipe.unet.device
378
-
379
- def forward(self, latent_model_input, t, encoder_hidden_states):
380
- sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0]
381
- return UNet2DConditionOutput(sample=sample)
382
-
383
-
384
- pipe.unet = TracedUNet()
385
-
386
- with torch.inference_mode():
387
- image = pipe([prompt] * 1, num_inference_steps=50).images[0]
388
- ```
389
-
390
-
391
- ## Memory Efficient Attention
392
-
393
- Recent work on optimizing the bandwitdh in the attention block has generated huge speed ups and gains in GPU memory usage. The most recent being Flash Attention from @tridao: [code](https://github.com/HazyResearch/flash-attention), [paper](https://arxiv.org/pdf/2205.14135.pdf).
394
-
395
- Here are the speedups we obtain on a few Nvidia GPUs when running the inference at 512x512 with a batch size of 1 (one prompt):
396
-
397
- | GPU | Base Attention FP16 | Memory Efficient Attention FP16 |
398
- |------------------ |--------------------- |--------------------------------- |
399
- | NVIDIA Tesla T4 | 3.5it/s | 5.5it/s |
400
- | NVIDIA 3060 RTX | 4.6it/s | 7.8it/s |
401
- | NVIDIA A10G | 8.88it/s | 15.6it/s |
402
- | NVIDIA RTX A6000 | 11.7it/s | 21.09it/s |
403
- | NVIDIA TITAN RTX | 12.51it/s | 18.22it/s |
404
- | A100-SXM4-40GB | 18.6it/s | 29.it/s |
405
- | A100-SXM-80GB | 18.7it/s | 29.5it/s |
406
-
407
- To leverage it just make sure you have:
408
-
409
- <Tip warning={true}>
410
-
411
- If you have PyTorch 2.0 installed, you shouldn't use xFormers!
412
-
413
- </Tip>
414
-
415
- - PyTorch > 1.12
416
- - Cuda available
417
- - [Installed the xformers library](xformers).
418
- ```python
419
- from diffusers import DiffusionPipeline
420
- import torch
421
-
422
- pipe = DiffusionPipeline.from_pretrained(
423
- "runwayml/stable-diffusion-v1-5",
424
- torch_dtype=torch.float16,
425
- ).to("cuda")
426
-
427
- pipe.enable_xformers_memory_efficient_attention()
428
-
429
- with torch.inference_mode():
430
- sample = pipe("a small cat")
431
-
432
- # optional: You can disable it via
433
- # pipe.disable_xformers_memory_efficient_attention()
434
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
 
 
 
 
 
spaces/Andy1621/uniformer_light/imagenet_class_index.py DELETED
@@ -1,1002 +0,0 @@
1
- imagenet_classnames = {
2
- "0": ["n01440764", "tench"],
3
- "1": ["n01443537", "goldfish"],
4
- "2": ["n01484850", "great_white_shark"],
5
- "3": ["n01491361", "tiger_shark"],
6
- "4": ["n01494475", "hammerhead"],
7
- "5": ["n01496331", "electric_ray"],
8
- "6": ["n01498041", "stingray"],
9
- "7": ["n01514668", "cock"],
10
- "8": ["n01514859", "hen"],
11
- "9": ["n01518878", "ostrich"],
12
- "10": ["n01530575", "brambling"],
13
- "11": ["n01531178", "goldfinch"],
14
- "12": ["n01532829", "house_finch"],
15
- "13": ["n01534433", "junco"],
16
- "14": ["n01537544", "indigo_bunting"],
17
- "15": ["n01558993", "robin"],
18
- "16": ["n01560419", "bulbul"],
19
- "17": ["n01580077", "jay"],
20
- "18": ["n01582220", "magpie"],
21
- "19": ["n01592084", "chickadee"],
22
- "20": ["n01601694", "water_ouzel"],
23
- "21": ["n01608432", "kite"],
24
- "22": ["n01614925", "bald_eagle"],
25
- "23": ["n01616318", "vulture"],
26
- "24": ["n01622779", "great_grey_owl"],
27
- "25": ["n01629819", "European_fire_salamander"],
28
- "26": ["n01630670", "common_newt"],
29
- "27": ["n01631663", "eft"],
30
- "28": ["n01632458", "spotted_salamander"],
31
- "29": ["n01632777", "axolotl"],
32
- "30": ["n01641577", "bullfrog"],
33
- "31": ["n01644373", "tree_frog"],
34
- "32": ["n01644900", "tailed_frog"],
35
- "33": ["n01664065", "loggerhead"],
36
- "34": ["n01665541", "leatherback_turtle"],
37
- "35": ["n01667114", "mud_turtle"],
38
- "36": ["n01667778", "terrapin"],
39
- "37": ["n01669191", "box_turtle"],
40
- "38": ["n01675722", "banded_gecko"],
41
- "39": ["n01677366", "common_iguana"],
42
- "40": ["n01682714", "American_chameleon"],
43
- "41": ["n01685808", "whiptail"],
44
- "42": ["n01687978", "agama"],
45
- "43": ["n01688243", "frilled_lizard"],
46
- "44": ["n01689811", "alligator_lizard"],
47
- "45": ["n01692333", "Gila_monster"],
48
- "46": ["n01693334", "green_lizard"],
49
- "47": ["n01694178", "African_chameleon"],
50
- "48": ["n01695060", "Komodo_dragon"],
51
- "49": ["n01697457", "African_crocodile"],
52
- "50": ["n01698640", "American_alligator"],
53
- "51": ["n01704323", "triceratops"],
54
- "52": ["n01728572", "thunder_snake"],
55
- "53": ["n01728920", "ringneck_snake"],
56
- "54": ["n01729322", "hognose_snake"],
57
- "55": ["n01729977", "green_snake"],
58
- "56": ["n01734418", "king_snake"],
59
- "57": ["n01735189", "garter_snake"],
60
- "58": ["n01737021", "water_snake"],
61
- "59": ["n01739381", "vine_snake"],
62
- "60": ["n01740131", "night_snake"],
63
- "61": ["n01742172", "boa_constrictor"],
64
- "62": ["n01744401", "rock_python"],
65
- "63": ["n01748264", "Indian_cobra"],
66
- "64": ["n01749939", "green_mamba"],
67
- "65": ["n01751748", "sea_snake"],
68
- "66": ["n01753488", "horned_viper"],
69
- "67": ["n01755581", "diamondback"],
70
- "68": ["n01756291", "sidewinder"],
71
- "69": ["n01768244", "trilobite"],
72
- "70": ["n01770081", "harvestman"],
73
- "71": ["n01770393", "scorpion"],
74
- "72": ["n01773157", "black_and_gold_garden_spider"],
75
- "73": ["n01773549", "barn_spider"],
76
- "74": ["n01773797", "garden_spider"],
77
- "75": ["n01774384", "black_widow"],
78
- "76": ["n01774750", "tarantula"],
79
- "77": ["n01775062", "wolf_spider"],
80
- "78": ["n01776313", "tick"],
81
- "79": ["n01784675", "centipede"],
82
- "80": ["n01795545", "black_grouse"],
83
- "81": ["n01796340", "ptarmigan"],
84
- "82": ["n01797886", "ruffed_grouse"],
85
- "83": ["n01798484", "prairie_chicken"],
86
- "84": ["n01806143", "peacock"],
87
- "85": ["n01806567", "quail"],
88
- "86": ["n01807496", "partridge"],
89
- "87": ["n01817953", "African_grey"],
90
- "88": ["n01818515", "macaw"],
91
- "89": ["n01819313", "sulphur-crested_cockatoo"],
92
- "90": ["n01820546", "lorikeet"],
93
- "91": ["n01824575", "coucal"],
94
- "92": ["n01828970", "bee_eater"],
95
- "93": ["n01829413", "hornbill"],
96
- "94": ["n01833805", "hummingbird"],
97
- "95": ["n01843065", "jacamar"],
98
- "96": ["n01843383", "toucan"],
99
- "97": ["n01847000", "drake"],
100
- "98": ["n01855032", "red-breasted_merganser"],
101
- "99": ["n01855672", "goose"],
102
- "100": ["n01860187", "black_swan"],
103
- "101": ["n01871265", "tusker"],
104
- "102": ["n01872401", "echidna"],
105
- "103": ["n01873310", "platypus"],
106
- "104": ["n01877812", "wallaby"],
107
- "105": ["n01882714", "koala"],
108
- "106": ["n01883070", "wombat"],
109
- "107": ["n01910747", "jellyfish"],
110
- "108": ["n01914609", "sea_anemone"],
111
- "109": ["n01917289", "brain_coral"],
112
- "110": ["n01924916", "flatworm"],
113
- "111": ["n01930112", "nematode"],
114
- "112": ["n01943899", "conch"],
115
- "113": ["n01944390", "snail"],
116
- "114": ["n01945685", "slug"],
117
- "115": ["n01950731", "sea_slug"],
118
- "116": ["n01955084", "chiton"],
119
- "117": ["n01968897", "chambered_nautilus"],
120
- "118": ["n01978287", "Dungeness_crab"],
121
- "119": ["n01978455", "rock_crab"],
122
- "120": ["n01980166", "fiddler_crab"],
123
- "121": ["n01981276", "king_crab"],
124
- "122": ["n01983481", "American_lobster"],
125
- "123": ["n01984695", "spiny_lobster"],
126
- "124": ["n01985128", "crayfish"],
127
- "125": ["n01986214", "hermit_crab"],
128
- "126": ["n01990800", "isopod"],
129
- "127": ["n02002556", "white_stork"],
130
- "128": ["n02002724", "black_stork"],
131
- "129": ["n02006656", "spoonbill"],
132
- "130": ["n02007558", "flamingo"],
133
- "131": ["n02009229", "little_blue_heron"],
134
- "132": ["n02009912", "American_egret"],
135
- "133": ["n02011460", "bittern"],
136
- "134": ["n02012849", "crane"],
137
- "135": ["n02013706", "limpkin"],
138
- "136": ["n02017213", "European_gallinule"],
139
- "137": ["n02018207", "American_coot"],
140
- "138": ["n02018795", "bustard"],
141
- "139": ["n02025239", "ruddy_turnstone"],
142
- "140": ["n02027492", "red-backed_sandpiper"],
143
- "141": ["n02028035", "redshank"],
144
- "142": ["n02033041", "dowitcher"],
145
- "143": ["n02037110", "oystercatcher"],
146
- "144": ["n02051845", "pelican"],
147
- "145": ["n02056570", "king_penguin"],
148
- "146": ["n02058221", "albatross"],
149
- "147": ["n02066245", "grey_whale"],
150
- "148": ["n02071294", "killer_whale"],
151
- "149": ["n02074367", "dugong"],
152
- "150": ["n02077923", "sea_lion"],
153
- "151": ["n02085620", "Chihuahua"],
154
- "152": ["n02085782", "Japanese_spaniel"],
155
- "153": ["n02085936", "Maltese_dog"],
156
- "154": ["n02086079", "Pekinese"],
157
- "155": ["n02086240", "Shih-Tzu"],
158
- "156": ["n02086646", "Blenheim_spaniel"],
159
- "157": ["n02086910", "papillon"],
160
- "158": ["n02087046", "toy_terrier"],
161
- "159": ["n02087394", "Rhodesian_ridgeback"],
162
- "160": ["n02088094", "Afghan_hound"],
163
- "161": ["n02088238", "basset"],
164
- "162": ["n02088364", "beagle"],
165
- "163": ["n02088466", "bloodhound"],
166
- "164": ["n02088632", "bluetick"],
167
- "165": ["n02089078", "black-and-tan_coonhound"],
168
- "166": ["n02089867", "Walker_hound"],
169
- "167": ["n02089973", "English_foxhound"],
170
- "168": ["n02090379", "redbone"],
171
- "169": ["n02090622", "borzoi"],
172
- "170": ["n02090721", "Irish_wolfhound"],
173
- "171": ["n02091032", "Italian_greyhound"],
174
- "172": ["n02091134", "whippet"],
175
- "173": ["n02091244", "Ibizan_hound"],
176
- "174": ["n02091467", "Norwegian_elkhound"],
177
- "175": ["n02091635", "otterhound"],
178
- "176": ["n02091831", "Saluki"],
179
- "177": ["n02092002", "Scottish_deerhound"],
180
- "178": ["n02092339", "Weimaraner"],
181
- "179": ["n02093256", "Staffordshire_bullterrier"],
182
- "180": ["n02093428", "American_Staffordshire_terrier"],
183
- "181": ["n02093647", "Bedlington_terrier"],
184
- "182": ["n02093754", "Border_terrier"],
185
- "183": ["n02093859", "Kerry_blue_terrier"],
186
- "184": ["n02093991", "Irish_terrier"],
187
- "185": ["n02094114", "Norfolk_terrier"],
188
- "186": ["n02094258", "Norwich_terrier"],
189
- "187": ["n02094433", "Yorkshire_terrier"],
190
- "188": ["n02095314", "wire-haired_fox_terrier"],
191
- "189": ["n02095570", "Lakeland_terrier"],
192
- "190": ["n02095889", "Sealyham_terrier"],
193
- "191": ["n02096051", "Airedale"],
194
- "192": ["n02096177", "cairn"],
195
- "193": ["n02096294", "Australian_terrier"],
196
- "194": ["n02096437", "Dandie_Dinmont"],
197
- "195": ["n02096585", "Boston_bull"],
198
- "196": ["n02097047", "miniature_schnauzer"],
199
- "197": ["n02097130", "giant_schnauzer"],
200
- "198": ["n02097209", "standard_schnauzer"],
201
- "199": ["n02097298", "Scotch_terrier"],
202
- "200": ["n02097474", "Tibetan_terrier"],
203
- "201": ["n02097658", "silky_terrier"],
204
- "202": ["n02098105", "soft-coated_wheaten_terrier"],
205
- "203": ["n02098286", "West_Highland_white_terrier"],
206
- "204": ["n02098413", "Lhasa"],
207
- "205": ["n02099267", "flat-coated_retriever"],
208
- "206": ["n02099429", "curly-coated_retriever"],
209
- "207": ["n02099601", "golden_retriever"],
210
- "208": ["n02099712", "Labrador_retriever"],
211
- "209": ["n02099849", "Chesapeake_Bay_retriever"],
212
- "210": ["n02100236", "German_short-haired_pointer"],
213
- "211": ["n02100583", "vizsla"],
214
- "212": ["n02100735", "English_setter"],
215
- "213": ["n02100877", "Irish_setter"],
216
- "214": ["n02101006", "Gordon_setter"],
217
- "215": ["n02101388", "Brittany_spaniel"],
218
- "216": ["n02101556", "clumber"],
219
- "217": ["n02102040", "English_springer"],
220
- "218": ["n02102177", "Welsh_springer_spaniel"],
221
- "219": ["n02102318", "cocker_spaniel"],
222
- "220": ["n02102480", "Sussex_spaniel"],
223
- "221": ["n02102973", "Irish_water_spaniel"],
224
- "222": ["n02104029", "kuvasz"],
225
- "223": ["n02104365", "schipperke"],
226
- "224": ["n02105056", "groenendael"],
227
- "225": ["n02105162", "malinois"],
228
- "226": ["n02105251", "briard"],
229
- "227": ["n02105412", "kelpie"],
230
- "228": ["n02105505", "komondor"],
231
- "229": ["n02105641", "Old_English_sheepdog"],
232
- "230": ["n02105855", "Shetland_sheepdog"],
233
- "231": ["n02106030", "collie"],
234
- "232": ["n02106166", "Border_collie"],
235
- "233": ["n02106382", "Bouvier_des_Flandres"],
236
- "234": ["n02106550", "Rottweiler"],
237
- "235": ["n02106662", "German_shepherd"],
238
- "236": ["n02107142", "Doberman"],
239
- "237": ["n02107312", "miniature_pinscher"],
240
- "238": ["n02107574", "Greater_Swiss_Mountain_dog"],
241
- "239": ["n02107683", "Bernese_mountain_dog"],
242
- "240": ["n02107908", "Appenzeller"],
243
- "241": ["n02108000", "EntleBucher"],
244
- "242": ["n02108089", "boxer"],
245
- "243": ["n02108422", "bull_mastiff"],
246
- "244": ["n02108551", "Tibetan_mastiff"],
247
- "245": ["n02108915", "French_bulldog"],
248
- "246": ["n02109047", "Great_Dane"],
249
- "247": ["n02109525", "Saint_Bernard"],
250
- "248": ["n02109961", "Eskimo_dog"],
251
- "249": ["n02110063", "malamute"],
252
- "250": ["n02110185", "Siberian_husky"],
253
- "251": ["n02110341", "dalmatian"],
254
- "252": ["n02110627", "affenpinscher"],
255
- "253": ["n02110806", "basenji"],
256
- "254": ["n02110958", "pug"],
257
- "255": ["n02111129", "Leonberg"],
258
- "256": ["n02111277", "Newfoundland"],
259
- "257": ["n02111500", "Great_Pyrenees"],
260
- "258": ["n02111889", "Samoyed"],
261
- "259": ["n02112018", "Pomeranian"],
262
- "260": ["n02112137", "chow"],
263
- "261": ["n02112350", "keeshond"],
264
- "262": ["n02112706", "Brabancon_griffon"],
265
- "263": ["n02113023", "Pembroke"],
266
- "264": ["n02113186", "Cardigan"],
267
- "265": ["n02113624", "toy_poodle"],
268
- "266": ["n02113712", "miniature_poodle"],
269
- "267": ["n02113799", "standard_poodle"],
270
- "268": ["n02113978", "Mexican_hairless"],
271
- "269": ["n02114367", "timber_wolf"],
272
- "270": ["n02114548", "white_wolf"],
273
- "271": ["n02114712", "red_wolf"],
274
- "272": ["n02114855", "coyote"],
275
- "273": ["n02115641", "dingo"],
276
- "274": ["n02115913", "dhole"],
277
- "275": ["n02116738", "African_hunting_dog"],
278
- "276": ["n02117135", "hyena"],
279
- "277": ["n02119022", "red_fox"],
280
- "278": ["n02119789", "kit_fox"],
281
- "279": ["n02120079", "Arctic_fox"],
282
- "280": ["n02120505", "grey_fox"],
283
- "281": ["n02123045", "tabby"],
284
- "282": ["n02123159", "tiger_cat"],
285
- "283": ["n02123394", "Persian_cat"],
286
- "284": ["n02123597", "Siamese_cat"],
287
- "285": ["n02124075", "Egyptian_cat"],
288
- "286": ["n02125311", "cougar"],
289
- "287": ["n02127052", "lynx"],
290
- "288": ["n02128385", "leopard"],
291
- "289": ["n02128757", "snow_leopard"],
292
- "290": ["n02128925", "jaguar"],
293
- "291": ["n02129165", "lion"],
294
- "292": ["n02129604", "tiger"],
295
- "293": ["n02130308", "cheetah"],
296
- "294": ["n02132136", "brown_bear"],
297
- "295": ["n02133161", "American_black_bear"],
298
- "296": ["n02134084", "ice_bear"],
299
- "297": ["n02134418", "sloth_bear"],
300
- "298": ["n02137549", "mongoose"],
301
- "299": ["n02138441", "meerkat"],
302
- "300": ["n02165105", "tiger_beetle"],
303
- "301": ["n02165456", "ladybug"],
304
- "302": ["n02167151", "ground_beetle"],
305
- "303": ["n02168699", "long-horned_beetle"],
306
- "304": ["n02169497", "leaf_beetle"],
307
- "305": ["n02172182", "dung_beetle"],
308
- "306": ["n02174001", "rhinoceros_beetle"],
309
- "307": ["n02177972", "weevil"],
310
- "308": ["n02190166", "fly"],
311
- "309": ["n02206856", "bee"],
312
- "310": ["n02219486", "ant"],
313
- "311": ["n02226429", "grasshopper"],
314
- "312": ["n02229544", "cricket"],
315
- "313": ["n02231487", "walking_stick"],
316
- "314": ["n02233338", "cockroach"],
317
- "315": ["n02236044", "mantis"],
318
- "316": ["n02256656", "cicada"],
319
- "317": ["n02259212", "leafhopper"],
320
- "318": ["n02264363", "lacewing"],
321
- "319": ["n02268443", "dragonfly"],
322
- "320": ["n02268853", "damselfly"],
323
- "321": ["n02276258", "admiral"],
324
- "322": ["n02277742", "ringlet"],
325
- "323": ["n02279972", "monarch"],
326
- "324": ["n02280649", "cabbage_butterfly"],
327
- "325": ["n02281406", "sulphur_butterfly"],
328
- "326": ["n02281787", "lycaenid"],
329
- "327": ["n02317335", "starfish"],
330
- "328": ["n02319095", "sea_urchin"],
331
- "329": ["n02321529", "sea_cucumber"],
332
- "330": ["n02325366", "wood_rabbit"],
333
- "331": ["n02326432", "hare"],
334
- "332": ["n02328150", "Angora"],
335
- "333": ["n02342885", "hamster"],
336
- "334": ["n02346627", "porcupine"],
337
- "335": ["n02356798", "fox_squirrel"],
338
- "336": ["n02361337", "marmot"],
339
- "337": ["n02363005", "beaver"],
340
- "338": ["n02364673", "guinea_pig"],
341
- "339": ["n02389026", "sorrel"],
342
- "340": ["n02391049", "zebra"],
343
- "341": ["n02395406", "hog"],
344
- "342": ["n02396427", "wild_boar"],
345
- "343": ["n02397096", "warthog"],
346
- "344": ["n02398521", "hippopotamus"],
347
- "345": ["n02403003", "ox"],
348
- "346": ["n02408429", "water_buffalo"],
349
- "347": ["n02410509", "bison"],
350
- "348": ["n02412080", "ram"],
351
- "349": ["n02415577", "bighorn"],
352
- "350": ["n02417914", "ibex"],
353
- "351": ["n02422106", "hartebeest"],
354
- "352": ["n02422699", "impala"],
355
- "353": ["n02423022", "gazelle"],
356
- "354": ["n02437312", "Arabian_camel"],
357
- "355": ["n02437616", "llama"],
358
- "356": ["n02441942", "weasel"],
359
- "357": ["n02442845", "mink"],
360
- "358": ["n02443114", "polecat"],
361
- "359": ["n02443484", "black-footed_ferret"],
362
- "360": ["n02444819", "otter"],
363
- "361": ["n02445715", "skunk"],
364
- "362": ["n02447366", "badger"],
365
- "363": ["n02454379", "armadillo"],
366
- "364": ["n02457408", "three-toed_sloth"],
367
- "365": ["n02480495", "orangutan"],
368
- "366": ["n02480855", "gorilla"],
369
- "367": ["n02481823", "chimpanzee"],
370
- "368": ["n02483362", "gibbon"],
371
- "369": ["n02483708", "siamang"],
372
- "370": ["n02484975", "guenon"],
373
- "371": ["n02486261", "patas"],
374
- "372": ["n02486410", "baboon"],
375
- "373": ["n02487347", "macaque"],
376
- "374": ["n02488291", "langur"],
377
- "375": ["n02488702", "colobus"],
378
- "376": ["n02489166", "proboscis_monkey"],
379
- "377": ["n02490219", "marmoset"],
380
- "378": ["n02492035", "capuchin"],
381
- "379": ["n02492660", "howler_monkey"],
382
- "380": ["n02493509", "titi"],
383
- "381": ["n02493793", "spider_monkey"],
384
- "382": ["n02494079", "squirrel_monkey"],
385
- "383": ["n02497673", "Madagascar_cat"],
386
- "384": ["n02500267", "indri"],
387
- "385": ["n02504013", "Indian_elephant"],
388
- "386": ["n02504458", "African_elephant"],
389
- "387": ["n02509815", "lesser_panda"],
390
- "388": ["n02510455", "giant_panda"],
391
- "389": ["n02514041", "barracouta"],
392
- "390": ["n02526121", "eel"],
393
- "391": ["n02536864", "coho"],
394
- "392": ["n02606052", "rock_beauty"],
395
- "393": ["n02607072", "anemone_fish"],
396
- "394": ["n02640242", "sturgeon"],
397
- "395": ["n02641379", "gar"],
398
- "396": ["n02643566", "lionfish"],
399
- "397": ["n02655020", "puffer"],
400
- "398": ["n02666196", "abacus"],
401
- "399": ["n02667093", "abaya"],
402
- "400": ["n02669723", "academic_gown"],
403
- "401": ["n02672831", "accordion"],
404
- "402": ["n02676566", "acoustic_guitar"],
405
- "403": ["n02687172", "aircraft_carrier"],
406
- "404": ["n02690373", "airliner"],
407
- "405": ["n02692877", "airship"],
408
- "406": ["n02699494", "altar"],
409
- "407": ["n02701002", "ambulance"],
410
- "408": ["n02704792", "amphibian"],
411
- "409": ["n02708093", "analog_clock"],
412
- "410": ["n02727426", "apiary"],
413
- "411": ["n02730930", "apron"],
414
- "412": ["n02747177", "ashcan"],
415
- "413": ["n02749479", "assault_rifle"],
416
- "414": ["n02769748", "backpack"],
417
- "415": ["n02776631", "bakery"],
418
- "416": ["n02777292", "balance_beam"],
419
- "417": ["n02782093", "balloon"],
420
- "418": ["n02783161", "ballpoint"],
421
- "419": ["n02786058", "Band_Aid"],
422
- "420": ["n02787622", "banjo"],
423
- "421": ["n02788148", "bannister"],
424
- "422": ["n02790996", "barbell"],
425
- "423": ["n02791124", "barber_chair"],
426
- "424": ["n02791270", "barbershop"],
427
- "425": ["n02793495", "barn"],
428
- "426": ["n02794156", "barometer"],
429
- "427": ["n02795169", "barrel"],
430
- "428": ["n02797295", "barrow"],
431
- "429": ["n02799071", "baseball"],
432
- "430": ["n02802426", "basketball"],
433
- "431": ["n02804414", "bassinet"],
434
- "432": ["n02804610", "bassoon"],
435
- "433": ["n02807133", "bathing_cap"],
436
- "434": ["n02808304", "bath_towel"],
437
- "435": ["n02808440", "bathtub"],
438
- "436": ["n02814533", "beach_wagon"],
439
- "437": ["n02814860", "beacon"],
440
- "438": ["n02815834", "beaker"],
441
- "439": ["n02817516", "bearskin"],
442
- "440": ["n02823428", "beer_bottle"],
443
- "441": ["n02823750", "beer_glass"],
444
- "442": ["n02825657", "bell_cote"],
445
- "443": ["n02834397", "bib"],
446
- "444": ["n02835271", "bicycle-built-for-two"],
447
- "445": ["n02837789", "bikini"],
448
- "446": ["n02840245", "binder"],
449
- "447": ["n02841315", "binoculars"],
450
- "448": ["n02843684", "birdhouse"],
451
- "449": ["n02859443", "boathouse"],
452
- "450": ["n02860847", "bobsled"],
453
- "451": ["n02865351", "bolo_tie"],
454
- "452": ["n02869837", "bonnet"],
455
- "453": ["n02870880", "bookcase"],
456
- "454": ["n02871525", "bookshop"],
457
- "455": ["n02877765", "bottlecap"],
458
- "456": ["n02879718", "bow"],
459
- "457": ["n02883205", "bow_tie"],
460
- "458": ["n02892201", "brass"],
461
- "459": ["n02892767", "brassiere"],
462
- "460": ["n02894605", "breakwater"],
463
- "461": ["n02895154", "breastplate"],
464
- "462": ["n02906734", "broom"],
465
- "463": ["n02909870", "bucket"],
466
- "464": ["n02910353", "buckle"],
467
- "465": ["n02916936", "bulletproof_vest"],
468
- "466": ["n02917067", "bullet_train"],
469
- "467": ["n02927161", "butcher_shop"],
470
- "468": ["n02930766", "cab"],
471
- "469": ["n02939185", "caldron"],
472
- "470": ["n02948072", "candle"],
473
- "471": ["n02950826", "cannon"],
474
- "472": ["n02951358", "canoe"],
475
- "473": ["n02951585", "can_opener"],
476
- "474": ["n02963159", "cardigan"],
477
- "475": ["n02965783", "car_mirror"],
478
- "476": ["n02966193", "carousel"],
479
- "477": ["n02966687", "carpenter's_kit"],
480
- "478": ["n02971356", "carton"],
481
- "479": ["n02974003", "car_wheel"],
482
- "480": ["n02977058", "cash_machine"],
483
- "481": ["n02978881", "cassette"],
484
- "482": ["n02979186", "cassette_player"],
485
- "483": ["n02980441", "castle"],
486
- "484": ["n02981792", "catamaran"],
487
- "485": ["n02988304", "CD_player"],
488
- "486": ["n02992211", "cello"],
489
- "487": ["n02992529", "cellular_telephone"],
490
- "488": ["n02999410", "chain"],
491
- "489": ["n03000134", "chainlink_fence"],
492
- "490": ["n03000247", "chain_mail"],
493
- "491": ["n03000684", "chain_saw"],
494
- "492": ["n03014705", "chest"],
495
- "493": ["n03016953", "chiffonier"],
496
- "494": ["n03017168", "chime"],
497
- "495": ["n03018349", "china_cabinet"],
498
- "496": ["n03026506", "Christmas_stocking"],
499
- "497": ["n03028079", "church"],
500
- "498": ["n03032252", "cinema"],
501
- "499": ["n03041632", "cleaver"],
502
- "500": ["n03042490", "cliff_dwelling"],
503
- "501": ["n03045698", "cloak"],
504
- "502": ["n03047690", "clog"],
505
- "503": ["n03062245", "cocktail_shaker"],
506
- "504": ["n03063599", "coffee_mug"],
507
- "505": ["n03063689", "coffeepot"],
508
- "506": ["n03065424", "coil"],
509
- "507": ["n03075370", "combination_lock"],
510
- "508": ["n03085013", "computer_keyboard"],
511
- "509": ["n03089624", "confectionery"],
512
- "510": ["n03095699", "container_ship"],
513
- "511": ["n03100240", "convertible"],
514
- "512": ["n03109150", "corkscrew"],
515
- "513": ["n03110669", "cornet"],
516
- "514": ["n03124043", "cowboy_boot"],
517
- "515": ["n03124170", "cowboy_hat"],
518
- "516": ["n03125729", "cradle"],
519
- "517": ["n03126707", "crane"],
520
- "518": ["n03127747", "crash_helmet"],
521
- "519": ["n03127925", "crate"],
522
- "520": ["n03131574", "crib"],
523
- "521": ["n03133878", "Crock_Pot"],
524
- "522": ["n03134739", "croquet_ball"],
525
- "523": ["n03141823", "crutch"],
526
- "524": ["n03146219", "cuirass"],
527
- "525": ["n03160309", "dam"],
528
- "526": ["n03179701", "desk"],
529
- "527": ["n03180011", "desktop_computer"],
530
- "528": ["n03187595", "dial_telephone"],
531
- "529": ["n03188531", "diaper"],
532
- "530": ["n03196217", "digital_clock"],
533
- "531": ["n03197337", "digital_watch"],
534
- "532": ["n03201208", "dining_table"],
535
- "533": ["n03207743", "dishrag"],
536
- "534": ["n03207941", "dishwasher"],
537
- "535": ["n03208938", "disk_brake"],
538
- "536": ["n03216828", "dock"],
539
- "537": ["n03218198", "dogsled"],
540
- "538": ["n03220513", "dome"],
541
- "539": ["n03223299", "doormat"],
542
- "540": ["n03240683", "drilling_platform"],
543
- "541": ["n03249569", "drum"],
544
- "542": ["n03250847", "drumstick"],
545
- "543": ["n03255030", "dumbbell"],
546
- "544": ["n03259280", "Dutch_oven"],
547
- "545": ["n03271574", "electric_fan"],
548
- "546": ["n03272010", "electric_guitar"],
549
- "547": ["n03272562", "electric_locomotive"],
550
- "548": ["n03290653", "entertainment_center"],
551
- "549": ["n03291819", "envelope"],
552
- "550": ["n03297495", "espresso_maker"],
553
- "551": ["n03314780", "face_powder"],
554
- "552": ["n03325584", "feather_boa"],
555
- "553": ["n03337140", "file"],
556
- "554": ["n03344393", "fireboat"],
557
- "555": ["n03345487", "fire_engine"],
558
- "556": ["n03347037", "fire_screen"],
559
- "557": ["n03355925", "flagpole"],
560
- "558": ["n03372029", "flute"],
561
- "559": ["n03376595", "folding_chair"],
562
- "560": ["n03379051", "football_helmet"],
563
- "561": ["n03384352", "forklift"],
564
- "562": ["n03388043", "fountain"],
565
- "563": ["n03388183", "fountain_pen"],
566
- "564": ["n03388549", "four-poster"],
567
- "565": ["n03393912", "freight_car"],
568
- "566": ["n03394916", "French_horn"],
569
- "567": ["n03400231", "frying_pan"],
570
- "568": ["n03404251", "fur_coat"],
571
- "569": ["n03417042", "garbage_truck"],
572
- "570": ["n03424325", "gasmask"],
573
- "571": ["n03425413", "gas_pump"],
574
- "572": ["n03443371", "goblet"],
575
- "573": ["n03444034", "go-kart"],
576
- "574": ["n03445777", "golf_ball"],
577
- "575": ["n03445924", "golfcart"],
578
- "576": ["n03447447", "gondola"],
579
- "577": ["n03447721", "gong"],
580
- "578": ["n03450230", "gown"],
581
- "579": ["n03452741", "grand_piano"],
582
- "580": ["n03457902", "greenhouse"],
583
- "581": ["n03459775", "grille"],
584
- "582": ["n03461385", "grocery_store"],
585
- "583": ["n03467068", "guillotine"],
586
- "584": ["n03476684", "hair_slide"],
587
- "585": ["n03476991", "hair_spray"],
588
- "586": ["n03478589", "half_track"],
589
- "587": ["n03481172", "hammer"],
590
- "588": ["n03482405", "hamper"],
591
- "589": ["n03483316", "hand_blower"],
592
- "590": ["n03485407", "hand-held_computer"],
593
- "591": ["n03485794", "handkerchief"],
594
- "592": ["n03492542", "hard_disc"],
595
- "593": ["n03494278", "harmonica"],
596
- "594": ["n03495258", "harp"],
597
- "595": ["n03496892", "harvester"],
598
- "596": ["n03498962", "hatchet"],
599
- "597": ["n03527444", "holster"],
600
- "598": ["n03529860", "home_theater"],
601
- "599": ["n03530642", "honeycomb"],
602
- "600": ["n03532672", "hook"],
603
- "601": ["n03534580", "hoopskirt"],
604
- "602": ["n03535780", "horizontal_bar"],
605
- "603": ["n03538406", "horse_cart"],
606
- "604": ["n03544143", "hourglass"],
607
- "605": ["n03584254", "iPod"],
608
- "606": ["n03584829", "iron"],
609
- "607": ["n03590841", "jack-o'-lantern"],
610
- "608": ["n03594734", "jean"],
611
- "609": ["n03594945", "jeep"],
612
- "610": ["n03595614", "jersey"],
613
- "611": ["n03598930", "jigsaw_puzzle"],
614
- "612": ["n03599486", "jinrikisha"],
615
- "613": ["n03602883", "joystick"],
616
- "614": ["n03617480", "kimono"],
617
- "615": ["n03623198", "knee_pad"],
618
- "616": ["n03627232", "knot"],
619
- "617": ["n03630383", "lab_coat"],
620
- "618": ["n03633091", "ladle"],
621
- "619": ["n03637318", "lampshade"],
622
- "620": ["n03642806", "laptop"],
623
- "621": ["n03649909", "lawn_mower"],
624
- "622": ["n03657121", "lens_cap"],
625
- "623": ["n03658185", "letter_opener"],
626
- "624": ["n03661043", "library"],
627
- "625": ["n03662601", "lifeboat"],
628
- "626": ["n03666591", "lighter"],
629
- "627": ["n03670208", "limousine"],
630
- "628": ["n03673027", "liner"],
631
- "629": ["n03676483", "lipstick"],
632
- "630": ["n03680355", "Loafer"],
633
- "631": ["n03690938", "lotion"],
634
- "632": ["n03691459", "loudspeaker"],
635
- "633": ["n03692522", "loupe"],
636
- "634": ["n03697007", "lumbermill"],
637
- "635": ["n03706229", "magnetic_compass"],
638
- "636": ["n03709823", "mailbag"],
639
- "637": ["n03710193", "mailbox"],
640
- "638": ["n03710637", "maillot"],
641
- "639": ["n03710721", "maillot"],
642
- "640": ["n03717622", "manhole_cover"],
643
- "641": ["n03720891", "maraca"],
644
- "642": ["n03721384", "marimba"],
645
- "643": ["n03724870", "mask"],
646
- "644": ["n03729826", "matchstick"],
647
- "645": ["n03733131", "maypole"],
648
- "646": ["n03733281", "maze"],
649
- "647": ["n03733805", "measuring_cup"],
650
- "648": ["n03742115", "medicine_chest"],
651
- "649": ["n03743016", "megalith"],
652
- "650": ["n03759954", "microphone"],
653
- "651": ["n03761084", "microwave"],
654
- "652": ["n03763968", "military_uniform"],
655
- "653": ["n03764736", "milk_can"],
656
- "654": ["n03769881", "minibus"],
657
- "655": ["n03770439", "miniskirt"],
658
- "656": ["n03770679", "minivan"],
659
- "657": ["n03773504", "missile"],
660
- "658": ["n03775071", "mitten"],
661
- "659": ["n03775546", "mixing_bowl"],
662
- "660": ["n03776460", "mobile_home"],
663
- "661": ["n03777568", "Model_T"],
664
- "662": ["n03777754", "modem"],
665
- "663": ["n03781244", "monastery"],
666
- "664": ["n03782006", "monitor"],
667
- "665": ["n03785016", "moped"],
668
- "666": ["n03786901", "mortar"],
669
- "667": ["n03787032", "mortarboard"],
670
- "668": ["n03788195", "mosque"],
671
- "669": ["n03788365", "mosquito_net"],
672
- "670": ["n03791053", "motor_scooter"],
673
- "671": ["n03792782", "mountain_bike"],
674
- "672": ["n03792972", "mountain_tent"],
675
- "673": ["n03793489", "mouse"],
676
- "674": ["n03794056", "mousetrap"],
677
- "675": ["n03796401", "moving_van"],
678
- "676": ["n03803284", "muzzle"],
679
- "677": ["n03804744", "nail"],
680
- "678": ["n03814639", "neck_brace"],
681
- "679": ["n03814906", "necklace"],
682
- "680": ["n03825788", "nipple"],
683
- "681": ["n03832673", "notebook"],
684
- "682": ["n03837869", "obelisk"],
685
- "683": ["n03838899", "oboe"],
686
- "684": ["n03840681", "ocarina"],
687
- "685": ["n03841143", "odometer"],
688
- "686": ["n03843555", "oil_filter"],
689
- "687": ["n03854065", "organ"],
690
- "688": ["n03857828", "oscilloscope"],
691
- "689": ["n03866082", "overskirt"],
692
- "690": ["n03868242", "oxcart"],
693
- "691": ["n03868863", "oxygen_mask"],
694
- "692": ["n03871628", "packet"],
695
- "693": ["n03873416", "paddle"],
696
- "694": ["n03874293", "paddlewheel"],
697
- "695": ["n03874599", "padlock"],
698
- "696": ["n03876231", "paintbrush"],
699
- "697": ["n03877472", "pajama"],
700
- "698": ["n03877845", "palace"],
701
- "699": ["n03884397", "panpipe"],
702
- "700": ["n03887697", "paper_towel"],
703
- "701": ["n03888257", "parachute"],
704
- "702": ["n03888605", "parallel_bars"],
705
- "703": ["n03891251", "park_bench"],
706
- "704": ["n03891332", "parking_meter"],
707
- "705": ["n03895866", "passenger_car"],
708
- "706": ["n03899768", "patio"],
709
- "707": ["n03902125", "pay-phone"],
710
- "708": ["n03903868", "pedestal"],
711
- "709": ["n03908618", "pencil_box"],
712
- "710": ["n03908714", "pencil_sharpener"],
713
- "711": ["n03916031", "perfume"],
714
- "712": ["n03920288", "Petri_dish"],
715
- "713": ["n03924679", "photocopier"],
716
- "714": ["n03929660", "pick"],
717
- "715": ["n03929855", "pickelhaube"],
718
- "716": ["n03930313", "picket_fence"],
719
- "717": ["n03930630", "pickup"],
720
- "718": ["n03933933", "pier"],
721
- "719": ["n03935335", "piggy_bank"],
722
- "720": ["n03937543", "pill_bottle"],
723
- "721": ["n03938244", "pillow"],
724
- "722": ["n03942813", "ping-pong_ball"],
725
- "723": ["n03944341", "pinwheel"],
726
- "724": ["n03947888", "pirate"],
727
- "725": ["n03950228", "pitcher"],
728
- "726": ["n03954731", "plane"],
729
- "727": ["n03956157", "planetarium"],
730
- "728": ["n03958227", "plastic_bag"],
731
- "729": ["n03961711", "plate_rack"],
732
- "730": ["n03967562", "plow"],
733
- "731": ["n03970156", "plunger"],
734
- "732": ["n03976467", "Polaroid_camera"],
735
- "733": ["n03976657", "pole"],
736
- "734": ["n03977966", "police_van"],
737
- "735": ["n03980874", "poncho"],
738
- "736": ["n03982430", "pool_table"],
739
- "737": ["n03983396", "pop_bottle"],
740
- "738": ["n03991062", "pot"],
741
- "739": ["n03992509", "potter's_wheel"],
742
- "740": ["n03995372", "power_drill"],
743
- "741": ["n03998194", "prayer_rug"],
744
- "742": ["n04004767", "printer"],
745
- "743": ["n04005630", "prison"],
746
- "744": ["n04008634", "projectile"],
747
- "745": ["n04009552", "projector"],
748
- "746": ["n04019541", "puck"],
749
- "747": ["n04023962", "punching_bag"],
750
- "748": ["n04026417", "purse"],
751
- "749": ["n04033901", "quill"],
752
- "750": ["n04033995", "quilt"],
753
- "751": ["n04037443", "racer"],
754
- "752": ["n04039381", "racket"],
755
- "753": ["n04040759", "radiator"],
756
- "754": ["n04041544", "radio"],
757
- "755": ["n04044716", "radio_telescope"],
758
- "756": ["n04049303", "rain_barrel"],
759
- "757": ["n04065272", "recreational_vehicle"],
760
- "758": ["n04067472", "reel"],
761
- "759": ["n04069434", "reflex_camera"],
762
- "760": ["n04070727", "refrigerator"],
763
- "761": ["n04074963", "remote_control"],
764
- "762": ["n04081281", "restaurant"],
765
- "763": ["n04086273", "revolver"],
766
- "764": ["n04090263", "rifle"],
767
- "765": ["n04099969", "rocking_chair"],
768
- "766": ["n04111531", "rotisserie"],
769
- "767": ["n04116512", "rubber_eraser"],
770
- "768": ["n04118538", "rugby_ball"],
771
- "769": ["n04118776", "rule"],
772
- "770": ["n04120489", "running_shoe"],
773
- "771": ["n04125021", "safe"],
774
- "772": ["n04127249", "safety_pin"],
775
- "773": ["n04131690", "saltshaker"],
776
- "774": ["n04133789", "sandal"],
777
- "775": ["n04136333", "sarong"],
778
- "776": ["n04141076", "sax"],
779
- "777": ["n04141327", "scabbard"],
780
- "778": ["n04141975", "scale"],
781
- "779": ["n04146614", "school_bus"],
782
- "780": ["n04147183", "schooner"],
783
- "781": ["n04149813", "scoreboard"],
784
- "782": ["n04152593", "screen"],
785
- "783": ["n04153751", "screw"],
786
- "784": ["n04154565", "screwdriver"],
787
- "785": ["n04162706", "seat_belt"],
788
- "786": ["n04179913", "sewing_machine"],
789
- "787": ["n04192698", "shield"],
790
- "788": ["n04200800", "shoe_shop"],
791
- "789": ["n04201297", "shoji"],
792
- "790": ["n04204238", "shopping_basket"],
793
- "791": ["n04204347", "shopping_cart"],
794
- "792": ["n04208210", "shovel"],
795
- "793": ["n04209133", "shower_cap"],
796
- "794": ["n04209239", "shower_curtain"],
797
- "795": ["n04228054", "ski"],
798
- "796": ["n04229816", "ski_mask"],
799
- "797": ["n04235860", "sleeping_bag"],
800
- "798": ["n04238763", "slide_rule"],
801
- "799": ["n04239074", "sliding_door"],
802
- "800": ["n04243546", "slot"],
803
- "801": ["n04251144", "snorkel"],
804
- "802": ["n04252077", "snowmobile"],
805
- "803": ["n04252225", "snowplow"],
806
- "804": ["n04254120", "soap_dispenser"],
807
- "805": ["n04254680", "soccer_ball"],
808
- "806": ["n04254777", "sock"],
809
- "807": ["n04258138", "solar_dish"],
810
- "808": ["n04259630", "sombrero"],
811
- "809": ["n04263257", "soup_bowl"],
812
- "810": ["n04264628", "space_bar"],
813
- "811": ["n04265275", "space_heater"],
814
- "812": ["n04266014", "space_shuttle"],
815
- "813": ["n04270147", "spatula"],
816
- "814": ["n04273569", "speedboat"],
817
- "815": ["n04275548", "spider_web"],
818
- "816": ["n04277352", "spindle"],
819
- "817": ["n04285008", "sports_car"],
820
- "818": ["n04286575", "spotlight"],
821
- "819": ["n04296562", "stage"],
822
- "820": ["n04310018", "steam_locomotive"],
823
- "821": ["n04311004", "steel_arch_bridge"],
824
- "822": ["n04311174", "steel_drum"],
825
- "823": ["n04317175", "stethoscope"],
826
- "824": ["n04325704", "stole"],
827
- "825": ["n04326547", "stone_wall"],
828
- "826": ["n04328186", "stopwatch"],
829
- "827": ["n04330267", "stove"],
830
- "828": ["n04332243", "strainer"],
831
- "829": ["n04335435", "streetcar"],
832
- "830": ["n04336792", "stretcher"],
833
- "831": ["n04344873", "studio_couch"],
834
- "832": ["n04346328", "stupa"],
835
- "833": ["n04347754", "submarine"],
836
- "834": ["n04350905", "suit"],
837
- "835": ["n04355338", "sundial"],
838
- "836": ["n04355933", "sunglass"],
839
- "837": ["n04356056", "sunglasses"],
840
- "838": ["n04357314", "sunscreen"],
841
- "839": ["n04366367", "suspension_bridge"],
842
- "840": ["n04367480", "swab"],
843
- "841": ["n04370456", "sweatshirt"],
844
- "842": ["n04371430", "swimming_trunks"],
845
- "843": ["n04371774", "swing"],
846
- "844": ["n04372370", "switch"],
847
- "845": ["n04376876", "syringe"],
848
- "846": ["n04380533", "table_lamp"],
849
- "847": ["n04389033", "tank"],
850
- "848": ["n04392985", "tape_player"],
851
- "849": ["n04398044", "teapot"],
852
- "850": ["n04399382", "teddy"],
853
- "851": ["n04404412", "television"],
854
- "852": ["n04409515", "tennis_ball"],
855
- "853": ["n04417672", "thatch"],
856
- "854": ["n04418357", "theater_curtain"],
857
- "855": ["n04423845", "thimble"],
858
- "856": ["n04428191", "thresher"],
859
- "857": ["n04429376", "throne"],
860
- "858": ["n04435653", "tile_roof"],
861
- "859": ["n04442312", "toaster"],
862
- "860": ["n04443257", "tobacco_shop"],
863
- "861": ["n04447861", "toilet_seat"],
864
- "862": ["n04456115", "torch"],
865
- "863": ["n04458633", "totem_pole"],
866
- "864": ["n04461696", "tow_truck"],
867
- "865": ["n04462240", "toyshop"],
868
- "866": ["n04465501", "tractor"],
869
- "867": ["n04467665", "trailer_truck"],
870
- "868": ["n04476259", "tray"],
871
- "869": ["n04479046", "trench_coat"],
872
- "870": ["n04482393", "tricycle"],
873
- "871": ["n04483307", "trimaran"],
874
- "872": ["n04485082", "tripod"],
875
- "873": ["n04486054", "triumphal_arch"],
876
- "874": ["n04487081", "trolleybus"],
877
- "875": ["n04487394", "trombone"],
878
- "876": ["n04493381", "tub"],
879
- "877": ["n04501370", "turnstile"],
880
- "878": ["n04505470", "typewriter_keyboard"],
881
- "879": ["n04507155", "umbrella"],
882
- "880": ["n04509417", "unicycle"],
883
- "881": ["n04515003", "upright"],
884
- "882": ["n04517823", "vacuum"],
885
- "883": ["n04522168", "vase"],
886
- "884": ["n04523525", "vault"],
887
- "885": ["n04525038", "velvet"],
888
- "886": ["n04525305", "vending_machine"],
889
- "887": ["n04532106", "vestment"],
890
- "888": ["n04532670", "viaduct"],
891
- "889": ["n04536866", "violin"],
892
- "890": ["n04540053", "volleyball"],
893
- "891": ["n04542943", "waffle_iron"],
894
- "892": ["n04548280", "wall_clock"],
895
- "893": ["n04548362", "wallet"],
896
- "894": ["n04550184", "wardrobe"],
897
- "895": ["n04552348", "warplane"],
898
- "896": ["n04553703", "washbasin"],
899
- "897": ["n04554684", "washer"],
900
- "898": ["n04557648", "water_bottle"],
901
- "899": ["n04560804", "water_jug"],
902
- "900": ["n04562935", "water_tower"],
903
- "901": ["n04579145", "whiskey_jug"],
904
- "902": ["n04579432", "whistle"],
905
- "903": ["n04584207", "wig"],
906
- "904": ["n04589890", "window_screen"],
907
- "905": ["n04590129", "window_shade"],
908
- "906": ["n04591157", "Windsor_tie"],
909
- "907": ["n04591713", "wine_bottle"],
910
- "908": ["n04592741", "wing"],
911
- "909": ["n04596742", "wok"],
912
- "910": ["n04597913", "wooden_spoon"],
913
- "911": ["n04599235", "wool"],
914
- "912": ["n04604644", "worm_fence"],
915
- "913": ["n04606251", "wreck"],
916
- "914": ["n04612504", "yawl"],
917
- "915": ["n04613696", "yurt"],
918
- "916": ["n06359193", "web_site"],
919
- "917": ["n06596364", "comic_book"],
920
- "918": ["n06785654", "crossword_puzzle"],
921
- "919": ["n06794110", "street_sign"],
922
- "920": ["n06874185", "traffic_light"],
923
- "921": ["n07248320", "book_jacket"],
924
- "922": ["n07565083", "menu"],
925
- "923": ["n07579787", "plate"],
926
- "924": ["n07583066", "guacamole"],
927
- "925": ["n07584110", "consomme"],
928
- "926": ["n07590611", "hot_pot"],
929
- "927": ["n07613480", "trifle"],
930
- "928": ["n07614500", "ice_cream"],
931
- "929": ["n07615774", "ice_lolly"],
932
- "930": ["n07684084", "French_loaf"],
933
- "931": ["n07693725", "bagel"],
934
- "932": ["n07695742", "pretzel"],
935
- "933": ["n07697313", "cheeseburger"],
936
- "934": ["n07697537", "hotdog"],
937
- "935": ["n07711569", "mashed_potato"],
938
- "936": ["n07714571", "head_cabbage"],
939
- "937": ["n07714990", "broccoli"],
940
- "938": ["n07715103", "cauliflower"],
941
- "939": ["n07716358", "zucchini"],
942
- "940": ["n07716906", "spaghetti_squash"],
943
- "941": ["n07717410", "acorn_squash"],
944
- "942": ["n07717556", "butternut_squash"],
945
- "943": ["n07718472", "cucumber"],
946
- "944": ["n07718747", "artichoke"],
947
- "945": ["n07720875", "bell_pepper"],
948
- "946": ["n07730033", "cardoon"],
949
- "947": ["n07734744", "mushroom"],
950
- "948": ["n07742313", "Granny_Smith"],
951
- "949": ["n07745940", "strawberry"],
952
- "950": ["n07747607", "orange"],
953
- "951": ["n07749582", "lemon"],
954
- "952": ["n07753113", "fig"],
955
- "953": ["n07753275", "pineapple"],
956
- "954": ["n07753592", "banana"],
957
- "955": ["n07754684", "jackfruit"],
958
- "956": ["n07760859", "custard_apple"],
959
- "957": ["n07768694", "pomegranate"],
960
- "958": ["n07802026", "hay"],
961
- "959": ["n07831146", "carbonara"],
962
- "960": ["n07836838", "chocolate_sauce"],
963
- "961": ["n07860988", "dough"],
964
- "962": ["n07871810", "meat_loaf"],
965
- "963": ["n07873807", "pizza"],
966
- "964": ["n07875152", "potpie"],
967
- "965": ["n07880968", "burrito"],
968
- "966": ["n07892512", "red_wine"],
969
- "967": ["n07920052", "espresso"],
970
- "968": ["n07930864", "cup"],
971
- "969": ["n07932039", "eggnog"],
972
- "970": ["n09193705", "alp"],
973
- "971": ["n09229709", "bubble"],
974
- "972": ["n09246464", "cliff"],
975
- "973": ["n09256479", "coral_reef"],
976
- "974": ["n09288635", "geyser"],
977
- "975": ["n09332890", "lakeside"],
978
- "976": ["n09399592", "promontory"],
979
- "977": ["n09421951", "sandbar"],
980
- "978": ["n09428293", "seashore"],
981
- "979": ["n09468604", "valley"],
982
- "980": ["n09472597", "volcano"],
983
- "981": ["n09835506", "ballplayer"],
984
- "982": ["n10148035", "groom"],
985
- "983": ["n10565667", "scuba_diver"],
986
- "984": ["n11879895", "rapeseed"],
987
- "985": ["n11939491", "daisy"],
988
- "986": ["n12057211", "yellow_lady's_slipper"],
989
- "987": ["n12144580", "corn"],
990
- "988": ["n12267677", "acorn"],
991
- "989": ["n12620546", "hip"],
992
- "990": ["n12768682", "buckeye"],
993
- "991": ["n12985857", "coral_fungus"],
994
- "992": ["n12998815", "agaric"],
995
- "993": ["n13037406", "gyromitra"],
996
- "994": ["n13040303", "stinkhorn"],
997
- "995": ["n13044778", "earthstar"],
998
- "996": ["n13052670", "hen-of-the-woods"],
999
- "997": ["n13054560", "bolete"],
1000
- "998": ["n13133613", "ear"],
1001
- "999": ["n15075141", "toilet_tissue"]
1002
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-search/fuse.min.js DELETED
@@ -1,9 +0,0 @@
1
- /**
2
- * Fuse.js v6.6.2 - Lightweight fuzzy-search (http://fusejs.io)
3
- *
4
- * Copyright (c) 2022 Kiro Risk (http://kiro.me)
5
- * All Rights Reserved. Apache Software License 2.0
6
- *
7
- * http://www.apache.org/licenses/LICENSE-2.0
8
- */
9
- var e,t;e=this,t=function(){"use strict";function e(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function t(t){for(var n=1;n<arguments.length;n++){var r=null!=arguments[n]?arguments[n]:{};n%2?e(Object(r),!0).forEach((function(e){c(t,e,r[e])})):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(r)):e(Object(r)).forEach((function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(r,e))}))}return t}function n(e){return n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},n(e)}function r(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}function o(e,t,n){return t&&i(e.prototype,t),n&&i(e,n),Object.defineProperty(e,"prototype",{writable:!1}),e}function c(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");Object.defineProperty(e,"prototype",{value:Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),writable:!1}),t&&u(e,t)}function s(e){return s=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},s(e)}function u(e,t){return u=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},u(e,t)}function h(e,t){if(t&&("object"==typeof t||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return function(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}(e)}function l(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var n,r=s(e);if(t){var i=s(this).constructor;n=Reflect.construct(r,arguments,i)}else n=r.apply(this,arguments);return h(this,n)}}function f(e){return function(e){if(Array.isArray(e))return d(e)}(e)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(e)||function(e,t){if(e){if("string"==typeof e)return d(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);return"Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n?Array.from(e):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?d(e,t):void 0}}(e)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function d(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n<t;n++)r[n]=e[n];return r}function v(e){return Array.isArray?Array.isArray(e):"[object Array]"===b(e)}function g(e){return"string"==typeof e}function y(e){return"number"==typeof e}function p(e){return!0===e||!1===e||function(e){return m(e)&&null!==e}(e)&&"[object Boolean]"==b(e)}function m(e){return"object"===n(e)}function k(e){return null!=e}function M(e){return!e.trim().length}function b(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":Object.prototype.toString.call(e)}var x=function(e){return"Invalid value for key ".concat(e)},w=function(e){return"Pattern length exceeds max of ".concat(e,".")},L=Object.prototype.hasOwnProperty,S=function(){function e(t){var n=this;r(this,e),this._keys=[],this._keyMap={};var i=0;t.forEach((function(e){var t=_(e);i+=t.weight,n._keys.push(t),n._keyMap[t.id]=t,i+=t.weight})),this._keys.forEach((function(e){e.weight/=i}))}return o(e,[{key:"get",value:function(e){return this._keyMap[e]}},{key:"keys",value:function(){return this._keys}},{key:"toJSON",value:function(){return JSON.stringify(this._keys)}}]),e}();function _(e){var t=null,n=null,r=null,i=1,o=null;if(g(e)||v(e))r=e,t=O(e),n=j(e);else{if(!L.call(e,"name"))throw new Error(function(e){return"Missing ".concat(e," property in key")}("name"));var c=e.name;if(r=c,L.call(e,"weight")&&(i=e.weight)<=0)throw new Error(function(e){return"Property 'weight' in key '".concat(e,"' must be a positive integer")}(c));t=O(c),n=j(c),o=e.getFn}return{path:t,id:n,weight:i,src:r,getFn:o}}function O(e){return v(e)?e:e.split(".")}function j(e){return v(e)?e.join("."):e}var A={useExtendedSearch:!1,getFn:function(e,t){var n=[],r=!1;return function e(t,i,o){if(k(t))if(i[o]){var c=t[i[o]];if(!k(c))return;if(o===i.length-1&&(g(c)||y(c)||p(c)))n.push(function(e){return null==e?"":function(e){if("string"==typeof e)return e;var t=e+"";return"0"==t&&1/e==-1/0?"-0":t}(e)}(c));else if(v(c)){r=!0;for(var a=0,s=c.length;a<s;a+=1)e(c[a],i,o+1)}else i.length&&e(c,i,o+1)}else n.push(t)}(e,g(t)?t.split("."):t,0),r?n:n[0]},ignoreLocation:!1,ignoreFieldNorm:!1,fieldNormWeight:1},I=t(t(t(t({},{isCaseSensitive:!1,includeScore:!1,keys:[],shouldSort:!0,sortFn:function(e,t){return e.score===t.score?e.idx<t.idx?-1:1:e.score<t.score?-1:1}}),{includeMatches:!1,findAllMatches:!1,minMatchCharLength:1}),{location:0,threshold:.6,distance:100}),A),C=/[^ ]+/g;function E(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:3,n=new Map,r=Math.pow(10,t);return{get:function(t){var i=t.match(C).length;if(n.has(i))return n.get(i);var o=1/Math.pow(i,.5*e),c=parseFloat(Math.round(o*r)/r);return n.set(i,c),c},clear:function(){n.clear()}}}var $=function(){function e(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.getFn,i=void 0===n?I.getFn:n,o=t.fieldNormWeight,c=void 0===o?I.fieldNormWeight:o;r(this,e),this.norm=E(c,3),this.getFn=i,this.isCreated=!1,this.setIndexRecords()}return o(e,[{key:"setSources",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.docs=e}},{key:"setIndexRecords",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.records=e}},{key:"setKeys",value:function(){var e=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];this.keys=t,this._keysMap={},t.forEach((function(t,n){e._keysMap[t.id]=n}))}},{key:"create",value:function(){var e=this;!this.isCreated&&this.docs.length&&(this.isCreated=!0,g(this.docs[0])?this.docs.forEach((function(t,n){e._addString(t,n)})):this.docs.forEach((function(t,n){e._addObject(t,n)})),this.norm.clear())}},{key:"add",value:function(e){var t=this.size();g(e)?this._addString(e,t):this._addObject(e,t)}},{key:"removeAt",value:function(e){this.records.splice(e,1);for(var t=e,n=this.size();t<n;t+=1)this.records[t].i-=1}},{key:"getValueForItemAtKeyId",value:function(e,t){return e[this._keysMap[t]]}},{key:"size",value:function(){return this.records.length}},{key:"_addString",value:function(e,t){if(k(e)&&!M(e)){var n={v:e,i:t,n:this.norm.get(e)};this.records.push(n)}}},{key:"_addObject",value:function(e,t){var n=this,r={i:t,$:{}};this.keys.forEach((function(t,i){var o=t.getFn?t.getFn(e):n.getFn(e,t.path);if(k(o))if(v(o))!function(){for(var e=[],t=[{nestedArrIndex:-1,value:o}];t.length;){var c=t.pop(),a=c.nestedArrIndex,s=c.value;if(k(s))if(g(s)&&!M(s)){var u={v:s,i:a,n:n.norm.get(s)};e.push(u)}else v(s)&&s.forEach((function(e,n){t.push({nestedArrIndex:n,value:e})}))}r.$[i]=e}();else if(g(o)&&!M(o)){var c={v:o,n:n.norm.get(o)};r.$[i]=c}})),this.records.push(r)}},{key:"toJSON",value:function(){return{keys:this.keys,records:this.records}}}]),e}();function F(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.getFn,i=void 0===r?I.getFn:r,o=n.fieldNormWeight,c=void 0===o?I.fieldNormWeight:o,a=new $({getFn:i,fieldNormWeight:c});return a.setKeys(e.map(_)),a.setSources(t),a.create(),a}function R(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.errors,r=void 0===n?0:n,i=t.currentLocation,o=void 0===i?0:i,c=t.expectedLocation,a=void 0===c?0:c,s=t.distance,u=void 0===s?I.distance:s,h=t.ignoreLocation,l=void 0===h?I.ignoreLocation:h,f=r/e.length;if(l)return f;var d=Math.abs(a-o);return u?f+d/u:d?1:f}function N(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:I.minMatchCharLength,n=[],r=-1,i=-1,o=0,c=e.length;o<c;o+=1){var a=e[o];a&&-1===r?r=o:a||-1===r||((i=o-1)-r+1>=t&&n.push([r,i]),r=-1)}return e[o-1]&&o-r>=t&&n.push([r,o-1]),n}var P=32;function W(e){for(var t={},n=0,r=e.length;n<r;n+=1){var i=e.charAt(n);t[i]=(t[i]||0)|1<<r-n-1}return t}var T=function(){function e(t){var n=this,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=i.location,c=void 0===o?I.location:o,a=i.threshold,s=void 0===a?I.threshold:a,u=i.distance,h=void 0===u?I.distance:u,l=i.includeMatches,f=void 0===l?I.includeMatches:l,d=i.findAllMatches,v=void 0===d?I.findAllMatches:d,g=i.minMatchCharLength,y=void 0===g?I.minMatchCharLength:g,p=i.isCaseSensitive,m=void 0===p?I.isCaseSensitive:p,k=i.ignoreLocation,M=void 0===k?I.ignoreLocation:k;if(r(this,e),this.options={location:c,threshold:s,distance:h,includeMatches:f,findAllMatches:v,minMatchCharLength:y,isCaseSensitive:m,ignoreLocation:M},this.pattern=m?t:t.toLowerCase(),this.chunks=[],this.pattern.length){var b=function(e,t){n.chunks.push({pattern:e,alphabet:W(e),startIndex:t})},x=this.pattern.length;if(x>P){for(var w=0,L=x%P,S=x-L;w<S;)b(this.pattern.substr(w,P),w),w+=P;if(L){var _=x-P;b(this.pattern.substr(_),_)}}else b(this.pattern,0)}}return o(e,[{key:"searchIn",value:function(e){var t=this.options,n=t.isCaseSensitive,r=t.includeMatches;if(n||(e=e.toLowerCase()),this.pattern===e){var i={isMatch:!0,score:0};return r&&(i.indices=[[0,e.length-1]]),i}var o=this.options,c=o.location,a=o.distance,s=o.threshold,u=o.findAllMatches,h=o.minMatchCharLength,l=o.ignoreLocation,d=[],v=0,g=!1;this.chunks.forEach((function(t){var n=t.pattern,i=t.alphabet,o=t.startIndex,y=function(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},i=r.location,o=void 0===i?I.location:i,c=r.distance,a=void 0===c?I.distance:c,s=r.threshold,u=void 0===s?I.threshold:s,h=r.findAllMatches,l=void 0===h?I.findAllMatches:h,f=r.minMatchCharLength,d=void 0===f?I.minMatchCharLength:f,v=r.includeMatches,g=void 0===v?I.includeMatches:v,y=r.ignoreLocation,p=void 0===y?I.ignoreLocation:y;if(t.length>P)throw new Error(w(P));for(var m,k=t.length,M=e.length,b=Math.max(0,Math.min(o,M)),x=u,L=b,S=d>1||g,_=S?Array(M):[];(m=e.indexOf(t,L))>-1;){var O=R(t,{currentLocation:m,expectedLocation:b,distance:a,ignoreLocation:p});if(x=Math.min(O,x),L=m+k,S)for(var j=0;j<k;)_[m+j]=1,j+=1}L=-1;for(var A=[],C=1,E=k+M,$=1<<k-1,F=0;F<k;F+=1){for(var W=0,T=E;W<T;)R(t,{errors:F,currentLocation:b+T,expectedLocation:b,distance:a,ignoreLocation:p})<=x?W=T:E=T,T=Math.floor((E-W)/2+W);E=T;var z=Math.max(1,b-T+1),D=l?M:Math.min(b+T,M)+k,K=Array(D+2);K[D+1]=(1<<F)-1;for(var q=D;q>=z;q-=1){var B=q-1,J=n[e.charAt(B)];if(S&&(_[B]=+!!J),K[q]=(K[q+1]<<1|1)&J,F&&(K[q]|=(A[q+1]|A[q])<<1|1|A[q+1]),K[q]&$&&(C=R(t,{errors:F,currentLocation:B,expectedLocation:b,distance:a,ignoreLocation:p}))<=x){if(x=C,(L=B)<=b)break;z=Math.max(1,2*b-L)}}if(R(t,{errors:F+1,currentLocation:b,expectedLocation:b,distance:a,ignoreLocation:p})>x)break;A=K}var U={isMatch:L>=0,score:Math.max(.001,C)};if(S){var V=N(_,d);V.length?g&&(U.indices=V):U.isMatch=!1}return U}(e,n,i,{location:c+o,distance:a,threshold:s,findAllMatches:u,minMatchCharLength:h,includeMatches:r,ignoreLocation:l}),p=y.isMatch,m=y.score,k=y.indices;p&&(g=!0),v+=m,p&&k&&(d=[].concat(f(d),f(k)))}));var y={isMatch:g,score:g?v/this.chunks.length:1};return g&&r&&(y.indices=d),y}}]),e}(),z=function(){function e(t){r(this,e),this.pattern=t}return o(e,[{key:"search",value:function(){}}],[{key:"isMultiMatch",value:function(e){return D(e,this.multiRegex)}},{key:"isSingleMatch",value:function(e){return D(e,this.singleRegex)}}]),e}();function D(e,t){var n=e.match(t);return n?n[1]:null}var K=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=e===this.pattern;return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}}],[{key:"type",get:function(){return"exact"}},{key:"multiRegex",get:function(){return/^="(.*)"$/}},{key:"singleRegex",get:function(){return/^=(.*)$/}}]),n}(z),q=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=-1===e.indexOf(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-exact"}},{key:"multiRegex",get:function(){return/^!"(.*)"$/}},{key:"singleRegex",get:function(){return/^!(.*)$/}}]),n}(z),B=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,this.pattern.length-1]}}}],[{key:"type",get:function(){return"prefix-exact"}},{key:"multiRegex",get:function(){return/^\^"(.*)"$/}},{key:"singleRegex",get:function(){return/^\^(.*)$/}}]),n}(z),J=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=!e.startsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-prefix-exact"}},{key:"multiRegex",get:function(){return/^!\^"(.*)"$/}},{key:"singleRegex",get:function(){return/^!\^(.*)$/}}]),n}(z),U=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[e.length-this.pattern.length,e.length-1]}}}],[{key:"type",get:function(){return"suffix-exact"}},{key:"multiRegex",get:function(){return/^"(.*)"\$$/}},{key:"singleRegex",get:function(){return/^(.*)\$$/}}]),n}(z),V=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){var t=!e.endsWith(this.pattern);return{isMatch:t,score:t?0:1,indices:[0,e.length-1]}}}],[{key:"type",get:function(){return"inverse-suffix-exact"}},{key:"multiRegex",get:function(){return/^!"(.*)"\$$/}},{key:"singleRegex",get:function(){return/^!(.*)\$$/}}]),n}(z),G=function(e){a(n,e);var t=l(n);function n(e){var i,o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},c=o.location,a=void 0===c?I.location:c,s=o.threshold,u=void 0===s?I.threshold:s,h=o.distance,l=void 0===h?I.distance:h,f=o.includeMatches,d=void 0===f?I.includeMatches:f,v=o.findAllMatches,g=void 0===v?I.findAllMatches:v,y=o.minMatchCharLength,p=void 0===y?I.minMatchCharLength:y,m=o.isCaseSensitive,k=void 0===m?I.isCaseSensitive:m,M=o.ignoreLocation,b=void 0===M?I.ignoreLocation:M;return r(this,n),(i=t.call(this,e))._bitapSearch=new T(e,{location:a,threshold:u,distance:l,includeMatches:d,findAllMatches:g,minMatchCharLength:p,isCaseSensitive:k,ignoreLocation:b}),i}return o(n,[{key:"search",value:function(e){return this._bitapSearch.searchIn(e)}}],[{key:"type",get:function(){return"fuzzy"}},{key:"multiRegex",get:function(){return/^"(.*)"$/}},{key:"singleRegex",get:function(){return/^(.*)$/}}]),n}(z),H=function(e){a(n,e);var t=l(n);function n(e){return r(this,n),t.call(this,e)}return o(n,[{key:"search",value:function(e){for(var t,n=0,r=[],i=this.pattern.length;(t=e.indexOf(this.pattern,n))>-1;)n=t+i,r.push([t,n-1]);var o=!!r.length;return{isMatch:o,score:o?0:1,indices:r}}}],[{key:"type",get:function(){return"include"}},{key:"multiRegex",get:function(){return/^'"(.*)"$/}},{key:"singleRegex",get:function(){return/^'(.*)$/}}]),n}(z),Q=[K,H,B,J,V,U,q,G],X=Q.length,Y=/ +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)/;function Z(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return e.split("|").map((function(e){for(var n=e.trim().split(Y).filter((function(e){return e&&!!e.trim()})),r=[],i=0,o=n.length;i<o;i+=1){for(var c=n[i],a=!1,s=-1;!a&&++s<X;){var u=Q[s],h=u.isMultiMatch(c);h&&(r.push(new u(h,t)),a=!0)}if(!a)for(s=-1;++s<X;){var l=Q[s],f=l.isSingleMatch(c);if(f){r.push(new l(f,t));break}}}return r}))}var ee=new Set([G.type,H.type]),te=function(){function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=n.isCaseSensitive,o=void 0===i?I.isCaseSensitive:i,c=n.includeMatches,a=void 0===c?I.includeMatches:c,s=n.minMatchCharLength,u=void 0===s?I.minMatchCharLength:s,h=n.ignoreLocation,l=void 0===h?I.ignoreLocation:h,f=n.findAllMatches,d=void 0===f?I.findAllMatches:f,v=n.location,g=void 0===v?I.location:v,y=n.threshold,p=void 0===y?I.threshold:y,m=n.distance,k=void 0===m?I.distance:m;r(this,e),this.query=null,this.options={isCaseSensitive:o,includeMatches:a,minMatchCharLength:u,findAllMatches:d,ignoreLocation:l,location:g,threshold:p,distance:k},this.pattern=o?t:t.toLowerCase(),this.query=Z(this.pattern,this.options)}return o(e,[{key:"searchIn",value:function(e){var t=this.query;if(!t)return{isMatch:!1,score:1};var n=this.options,r=n.includeMatches;e=n.isCaseSensitive?e:e.toLowerCase();for(var i=0,o=[],c=0,a=0,s=t.length;a<s;a+=1){var u=t[a];o.length=0,i=0;for(var h=0,l=u.length;h<l;h+=1){var d=u[h],v=d.search(e),g=v.isMatch,y=v.indices,p=v.score;if(!g){c=0,i=0,o.length=0;break}if(i+=1,c+=p,r){var m=d.constructor.type;ee.has(m)?o=[].concat(f(o),f(y)):o.push(y)}}if(i){var k={isMatch:!0,score:c/i};return r&&(k.indices=o),k}}return{isMatch:!1,score:1}}}],[{key:"condition",value:function(e,t){return t.useExtendedSearch}}]),e}(),ne=[];function re(e,t){for(var n=0,r=ne.length;n<r;n+=1){var i=ne[n];if(i.condition(e,t))return new i(e,t)}return new T(e,t)}var ie="$and",oe="$or",ce="$path",ae="$val",se=function(e){return!(!e[ie]&&!e[oe])},ue=function(e){return!!e[ce]},he=function(e){return!v(e)&&m(e)&&!se(e)},le=function(e){return c({},ie,Object.keys(e).map((function(t){return c({},t,e[t])})))};function fe(e,t){var n=t.ignoreFieldNorm,r=void 0===n?I.ignoreFieldNorm:n;e.forEach((function(e){var t=1;e.matches.forEach((function(e){var n=e.key,i=e.norm,o=e.score,c=n?n.weight:null;t*=Math.pow(0===o&&c?Number.EPSILON:o,(c||1)*(r?1:i))})),e.score=t}))}function de(e,t){var n=e.matches;t.matches=[],k(n)&&n.forEach((function(e){if(k(e.indices)&&e.indices.length){var n={indices:e.indices,value:e.value};e.key&&(n.key=e.key.src),e.idx>-1&&(n.refIndex=e.idx),t.matches.push(n)}}))}function ve(e,t){t.score=e.score}function ge(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=n.includeMatches,i=void 0===r?I.includeMatches:r,o=n.includeScore,c=void 0===o?I.includeScore:o,a=[];return i&&a.push(de),c&&a.push(ve),e.map((function(e){var n=e.idx,r={item:t[n],refIndex:n};return a.length&&a.forEach((function(t){t(e,r)})),r}))}var ye=function(){function e(n){var i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=arguments.length>2?arguments[2]:void 0;r(this,e),this.options=t(t({},I),i),this.options.useExtendedSearch,this._keyStore=new S(this.options.keys),this.setCollection(n,o)}return o(e,[{key:"setCollection",value:function(e,t){if(this._docs=e,t&&!(t instanceof $))throw new Error("Incorrect 'index' type");this._myIndex=t||F(this.options.keys,this._docs,{getFn:this.options.getFn,fieldNormWeight:this.options.fieldNormWeight})}},{key:"add",value:function(e){k(e)&&(this._docs.push(e),this._myIndex.add(e))}},{key:"remove",value:function(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){return!1},t=[],n=0,r=this._docs.length;n<r;n+=1){var i=this._docs[n];e(i,n)&&(this.removeAt(n),n-=1,r-=1,t.push(i))}return t}},{key:"removeAt",value:function(e){this._docs.splice(e,1),this._myIndex.removeAt(e)}},{key:"getIndex",value:function(){return this._myIndex}},{key:"search",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.limit,r=void 0===n?-1:n,i=this.options,o=i.includeMatches,c=i.includeScore,a=i.shouldSort,s=i.sortFn,u=i.ignoreFieldNorm,h=g(e)?g(this._docs[0])?this._searchStringList(e):this._searchObjectList(e):this._searchLogical(e);return fe(h,{ignoreFieldNorm:u}),a&&h.sort(s),y(r)&&r>-1&&(h=h.slice(0,r)),ge(h,this._docs,{includeMatches:o,includeScore:c})}},{key:"_searchStringList",value:function(e){var t=re(e,this.options),n=this._myIndex.records,r=[];return n.forEach((function(e){var n=e.v,i=e.i,o=e.n;if(k(n)){var c=t.searchIn(n),a=c.isMatch,s=c.score,u=c.indices;a&&r.push({item:n,idx:i,matches:[{score:s,value:n,norm:o,indices:u}]})}})),r}},{key:"_searchLogical",value:function(e){var t=this,n=function(e,t){var n=(arguments.length>2&&void 0!==arguments[2]?arguments[2]:{}).auto,r=void 0===n||n,i=function e(n){var i=Object.keys(n),o=ue(n);if(!o&&i.length>1&&!se(n))return e(le(n));if(he(n)){var c=o?n[ce]:i[0],a=o?n[ae]:n[c];if(!g(a))throw new Error(x(c));var s={keyId:j(c),pattern:a};return r&&(s.searcher=re(a,t)),s}var u={children:[],operator:i[0]};return i.forEach((function(t){var r=n[t];v(r)&&r.forEach((function(t){u.children.push(e(t))}))})),u};return se(e)||(e=le(e)),i(e)}(e,this.options),r=function e(n,r,i){if(!n.children){var o=n.keyId,c=n.searcher,a=t._findMatches({key:t._keyStore.get(o),value:t._myIndex.getValueForItemAtKeyId(r,o),searcher:c});return a&&a.length?[{idx:i,item:r,matches:a}]:[]}for(var s=[],u=0,h=n.children.length;u<h;u+=1){var l=e(n.children[u],r,i);if(l.length)s.push.apply(s,f(l));else if(n.operator===ie)return[]}return s},i=this._myIndex.records,o={},c=[];return i.forEach((function(e){var t=e.$,i=e.i;if(k(t)){var a=r(n,t,i);a.length&&(o[i]||(o[i]={idx:i,item:t,matches:[]},c.push(o[i])),a.forEach((function(e){var t,n=e.matches;(t=o[i].matches).push.apply(t,f(n))})))}})),c}},{key:"_searchObjectList",value:function(e){var t=this,n=re(e,this.options),r=this._myIndex,i=r.keys,o=r.records,c=[];return o.forEach((function(e){var r=e.$,o=e.i;if(k(r)){var a=[];i.forEach((function(e,i){a.push.apply(a,f(t._findMatches({key:e,value:r[i],searcher:n})))})),a.length&&c.push({idx:o,item:r,matches:a})}})),c}},{key:"_findMatches",value:function(e){var t=e.key,n=e.value,r=e.searcher;if(!k(n))return[];var i=[];if(v(n))n.forEach((function(e){var n=e.v,o=e.i,c=e.n;if(k(n)){var a=r.searchIn(n),s=a.isMatch,u=a.score,h=a.indices;s&&i.push({score:u,key:t,value:n,idx:o,norm:c,indices:h})}}));else{var o=n.v,c=n.n,a=r.searchIn(o),s=a.isMatch,u=a.score,h=a.indices;s&&i.push({score:u,key:t,value:o,norm:c,indices:h})}return i}}]),e}();return ye.version="6.6.2",ye.createIndex=F,ye.parseIndex=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.getFn,r=void 0===n?I.getFn:n,i=t.fieldNormWeight,o=void 0===i?I.fieldNormWeight:i,c=e.keys,a=e.records,s=new $({getFn:r,fieldNormWeight:o});return s.setKeys(c),s.setIndexRecords(a),s},ye.config=I,function(){ne.push.apply(ne,arguments)}(te),ye},"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).Fuse=t();
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/losses.py DELETED
@@ -1,364 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import torchvision.models as models
5
-
6
-
7
- ####################################################################################################
8
- # adversarial loss for different gan mode
9
- ####################################################################################################
10
- class GANLoss(nn.Module):
11
- """Define different GAN objectives.
12
-
13
- The GANLoss class abstracts away the need to create the target label tensor
14
- that has the same size as the input.
15
- """
16
-
17
- def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
18
- """ Initialize the GANLoss class.
19
-
20
- Parameters:
21
- gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
22
- target_real_label (bool) - - label for a real image
23
- target_fake_label (bool) - - label of a fake image
24
-
25
- Note: Do not use sigmoid as the last layer of Discriminator.
26
- LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
27
- """
28
- super(GANLoss, self).__init__()
29
- self.register_buffer('real_label', torch.tensor(target_real_label))
30
- self.register_buffer('fake_label', torch.tensor(target_fake_label))
31
- self.gan_mode = gan_mode
32
- if gan_mode == 'lsgan':
33
- self.loss = nn.MSELoss()
34
- elif gan_mode == 'vanilla':
35
- self.loss = nn.BCEWithLogitsLoss()
36
- elif gan_mode == 'hinge':
37
- self.loss = nn.ReLU()
38
- elif gan_mode in ['wgangp', 'nonsaturating']:
39
- self.loss = None
40
- else:
41
- raise NotImplementedError('gan mode %s not implemented' % gan_mode)
42
-
43
- def get_target_tensor(self, prediction, target_is_real):
44
- """Create label tensors with the same size as the input.
45
-
46
- Parameters:
47
- prediction (tensor) - - tpyically the prediction from a discriminator
48
- target_is_real (bool) - - if the ground truth label is for real examples or fake examples
49
-
50
- Returns:
51
- A label tensor filled with ground truth label, and with the size of the input
52
- """
53
-
54
- if target_is_real:
55
- target_tensor = self.real_label
56
- else:
57
- target_tensor = self.fake_label
58
- return target_tensor.expand_as(prediction)
59
-
60
- def calculate_loss(self, prediction, target_is_real, is_dis=False):
61
- """Calculate loss given Discriminator's output and grount truth labels.
62
-
63
- Parameters:
64
- prediction (tensor) - - tpyically the prediction output from a discriminator
65
- target_is_real (bool) - - if the ground truth label is for real examples or fake examples
66
-
67
- Returns:
68
- the calculated loss.
69
- """
70
- if self.gan_mode in ['lsgan', 'vanilla']:
71
- target_tensor = self.get_target_tensor(prediction, target_is_real)
72
- loss = self.loss(prediction, target_tensor)
73
- if self.gan_mode == 'lsgan':
74
- loss = loss * 0.5
75
- else:
76
- if is_dis:
77
- if target_is_real:
78
- prediction = -prediction
79
- if self.gan_mode == 'wgangp':
80
- loss = prediction.mean()
81
- elif self.gan_mode == 'nonsaturating':
82
- loss = F.softplus(prediction).mean()
83
- elif self.gan_mode == 'hinge':
84
- loss = self.loss(1+prediction).mean()
85
- else:
86
- if self.gan_mode == 'nonsaturating':
87
- loss = F.softplus(-prediction).mean()
88
- else:
89
- loss = -prediction.mean()
90
- return loss
91
-
92
- def __call__(self, predictions, target_is_real, is_dis=False):
93
- """Calculate loss for multi-scales gan"""
94
- if isinstance(predictions, list):
95
- losses = []
96
- for prediction in predictions:
97
- losses.append(self.calculate_loss(prediction, target_is_real, is_dis))
98
- loss = sum(losses)
99
- else:
100
- loss = self.calculate_loss(predictions, target_is_real, is_dis)
101
-
102
- return loss
103
-
104
-
105
- def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
106
- """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
107
-
108
- Arguments:
109
- netD (network) -- discriminator network
110
- real_data (tensor array) -- real examples
111
- fake_data (tensor array) -- generated examples from the generator
112
- device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
113
- type (str) -- if we mix real and fake data or not [real | fake | mixed].
114
- constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
115
- lambda_gp (float) -- weight for this loss
116
-
117
- Returns the gradient penalty loss
118
- """
119
- if lambda_gp > 0.0:
120
- if type == 'real': # either use real examples, fake examples, or a linear interpolation of two.
121
- interpolatesv = real_data
122
- elif type == 'fake':
123
- interpolatesv = fake_data
124
- elif type == 'mixed':
125
- alpha = torch.rand(real_data.shape[0], 1, device=device)
126
- alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
127
- interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
128
- else:
129
- raise NotImplementedError('{} not implemented'.format(type))
130
- interpolatesv.requires_grad_(True)
131
- disc_interpolates = netD(interpolatesv)
132
- if isinstance(disc_interpolates, list):
133
- gradients = 0
134
- for disc_interpolate in disc_interpolates:
135
- gradients += torch.autograd.grad(outputs=disc_interpolate, inputs=interpolatesv,
136
- grad_outputs=torch.ones(disc_interpolate.size()).to(device),
137
- create_graph=True, retain_graph=True, only_inputs=True)[0]
138
- else:
139
- gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
140
- grad_outputs=torch.ones(disc_interpolates.size()).to(device),
141
- create_graph=True, retain_graph=True, only_inputs=True)[0]
142
- gradients = gradients.view(real_data.size(0), -1) # flat the data
143
- gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
144
- return gradient_penalty, gradients
145
- else:
146
- return 0.0, None
147
-
148
-
149
- ####################################################################################################
150
- # trained LPIPS loss
151
- ####################################################################################################
152
- def normalize_tensor(x, eps=1e-10):
153
- norm_factor = torch.sqrt(torch.sum(x**2, dim=1, keepdim=True))
154
- return x/(norm_factor+eps)
155
-
156
-
157
- def spatial_average(x, keepdim=True):
158
- return x.mean([2, 3], keepdim=keepdim)
159
-
160
-
161
- class NetLinLayer(nn.Module):
162
- """ A single linear layer which does a 1x1 conv """
163
- def __init__(self, chn_in, chn_out=1, use_dropout=False):
164
- super(NetLinLayer, self).__init__()
165
- layers = [nn.Dropout(), ] if (use_dropout) else []
166
- layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
167
- self.model = nn.Sequential(*layers)
168
-
169
-
170
- class LPIPSLoss(nn.Module):
171
- """
172
- Learned perceptual metric
173
- https://github.com/richzhang/PerceptualSimilarity
174
- """
175
- def __init__(self, use_dropout=True, ckpt_path=None):
176
- super(LPIPSLoss, self).__init__()
177
- self.path = ckpt_path
178
- self.net = VGG16()
179
- self.chns = [64, 128, 256, 512, 512] # vg16 features
180
- self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
181
- self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
182
- self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
183
- self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
184
- self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
185
- self.load_from_pretrained()
186
- for param in self.parameters():
187
- param.requires_grad = False
188
-
189
- def load_from_pretrained(self):
190
- self.load_state_dict(torch.load(self.path, map_location=torch.device("cpu")), strict=False)
191
- print("loaded pretrained LPIPS loss from {}".format(self.path))
192
-
193
- def _get_features(self, vgg_f):
194
- names = ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']
195
- feats = []
196
- for i in range(len(names)):
197
- name = names[i]
198
- feat = vgg_f[name]
199
- feats.append(feat)
200
- return feats
201
-
202
- def forward(self, x, y):
203
- x_vgg, y_vgg = self._get_features(self.net(x)), self._get_features(self.net(y))
204
- lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
205
- reses = []
206
- loss = 0
207
-
208
- for i in range(len(self.chns)):
209
- x_feats, y_feats = normalize_tensor(x_vgg[i]), normalize_tensor(y_vgg[i])
210
- diffs = (x_feats - y_feats) ** 2
211
- res = spatial_average(lins[i].model(diffs))
212
- loss += res
213
- reses.append(res)
214
-
215
- return loss
216
-
217
-
218
- class PerceptualLoss(nn.Module):
219
- r"""
220
- Perceptual loss, VGG-based
221
- https://arxiv.org/abs/1603.08155
222
- https://github.com/dxyang/StyleTransfer/blob/master/utils.py
223
- """
224
-
225
- def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 0.0]):
226
- super(PerceptualLoss, self).__init__()
227
- self.add_module('vgg', VGG16())
228
- self.criterion = nn.L1Loss()
229
- self.weights = weights
230
-
231
- def __call__(self, x, y):
232
- # Compute features
233
- x_vgg, y_vgg = self.vgg(x), self.vgg(y)
234
-
235
- content_loss = 0.0
236
- content_loss += self.weights[0] * self.criterion(x_vgg['relu1_2'], y_vgg['relu1_2']) if self.weights[0] > 0 else 0
237
- content_loss += self.weights[1] * self.criterion(x_vgg['relu2_2'], y_vgg['relu2_2']) if self.weights[1] > 0 else 0
238
- content_loss += self.weights[2] * self.criterion(x_vgg['relu3_3'], y_vgg['relu3_3']) if self.weights[2] > 0 else 0
239
- content_loss += self.weights[3] * self.criterion(x_vgg['relu4_3'], y_vgg['relu4_3']) if self.weights[3] > 0 else 0
240
- content_loss += self.weights[4] * self.criterion(x_vgg['relu5_3'], y_vgg['relu5_3']) if self.weights[4] > 0 else 0
241
-
242
- return content_loss
243
-
244
-
245
- class Normalization(nn.Module):
246
- def __init__(self, device):
247
- super(Normalization, self).__init__()
248
- # .view the mean and std to make them [C x 1 x 1] so that they can
249
- # directly work with image Tensor of shape [B x C x H x W].
250
- # B is batch size. C is number of channels. H is height and W is width.
251
- mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
252
- std = torch.tensor([0.229, 0.224, 0.225]).to(device)
253
- self.mean = mean.view(-1, 1, 1)
254
- self.std = std.view(-1, 1, 1)
255
-
256
- def forward(self, img):
257
- # normalize img
258
- return (img - self.mean) / self.std
259
-
260
-
261
- class VGG16(nn.Module):
262
- def __init__(self):
263
- super(VGG16, self).__init__()
264
- features = models.vgg16(pretrained=True).features
265
- self.relu1_1 = torch.nn.Sequential()
266
- self.relu1_2 = torch.nn.Sequential()
267
-
268
- self.relu2_1 = torch.nn.Sequential()
269
- self.relu2_2 = torch.nn.Sequential()
270
-
271
- self.relu3_1 = torch.nn.Sequential()
272
- self.relu3_2 = torch.nn.Sequential()
273
- self.relu3_3 = torch.nn.Sequential()
274
-
275
- self.relu4_1 = torch.nn.Sequential()
276
- self.relu4_2 = torch.nn.Sequential()
277
- self.relu4_3 = torch.nn.Sequential()
278
-
279
- self.relu5_1 = torch.nn.Sequential()
280
- self.relu5_2 = torch.nn.Sequential()
281
- self.relu5_3 = torch.nn.Sequential()
282
-
283
- for x in range(2):
284
- self.relu1_1.add_module(str(x), features[x])
285
-
286
- for x in range(2, 4):
287
- self.relu1_2.add_module(str(x), features[x])
288
-
289
- for x in range(4, 7):
290
- self.relu2_1.add_module(str(x), features[x])
291
-
292
- for x in range(7, 9):
293
- self.relu2_2.add_module(str(x), features[x])
294
-
295
- for x in range(9, 12):
296
- self.relu3_1.add_module(str(x), features[x])
297
-
298
- for x in range(12, 14):
299
- self.relu3_2.add_module(str(x), features[x])
300
-
301
- for x in range(14, 16):
302
- self.relu3_3.add_module(str(x), features[x])
303
-
304
- for x in range(16, 18):
305
- self.relu4_1.add_module(str(x), features[x])
306
-
307
- for x in range(18, 21):
308
- self.relu4_2.add_module(str(x), features[x])
309
-
310
- for x in range(21, 23):
311
- self.relu4_3.add_module(str(x), features[x])
312
-
313
- for x in range(23, 26):
314
- self.relu5_1.add_module(str(x), features[x])
315
-
316
- for x in range(26, 28):
317
- self.relu5_2.add_module(str(x), features[x])
318
-
319
- for x in range(28, 30):
320
- self.relu5_3.add_module(str(x), features[x])
321
-
322
- # don't need the gradients, just want the features
323
- for param in self.parameters():
324
- param.requires_grad = False
325
-
326
- def forward(self, x,):
327
- relu1_1 = self.relu1_1(x)
328
- relu1_2 = self.relu1_2(relu1_1)
329
-
330
- relu2_1 = self.relu2_1(relu1_2)
331
- relu2_2 = self.relu2_2(relu2_1)
332
-
333
- relu3_1 = self.relu3_1(relu2_2)
334
- relu3_2 = self.relu3_2(relu3_1)
335
- relu3_3 = self.relu3_3(relu3_2)
336
-
337
- relu4_1 = self.relu4_1(relu3_3)
338
- relu4_2 = self.relu4_2(relu4_1)
339
- relu4_3 = self.relu4_3(relu4_2)
340
-
341
- relu5_1 = self.relu5_1(relu4_3)
342
- relu5_2 = self.relu5_2(relu5_1)
343
- relu5_3 = self.relu5_3(relu5_2)
344
-
345
- out = {
346
- 'relu1_1': relu1_1,
347
- 'relu1_2': relu1_2,
348
-
349
- 'relu2_1': relu2_1,
350
- 'relu2_2': relu2_2,
351
-
352
- 'relu3_1': relu3_1,
353
- 'relu3_2': relu3_2,
354
- 'relu3_3': relu3_3,
355
-
356
- 'relu4_1': relu4_1,
357
- 'relu4_2': relu4_2,
358
- 'relu4_3': relu4_3,
359
-
360
- 'relu5_1': relu5_1,
361
- 'relu5_2': relu5_2,
362
- 'relu5_3': relu5_3,
363
- }
364
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/io.py DELETED
@@ -1,258 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import io
3
- import os.path as osp
4
- from pathlib import Path
5
-
6
- import cv2
7
- import numpy as np
8
- from cv2 import (IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_IGNORE_ORIENTATION,
9
- IMREAD_UNCHANGED)
10
-
11
- from annotator.uniformer.mmcv.utils import check_file_exist, is_str, mkdir_or_exist
12
-
13
- try:
14
- from turbojpeg import TJCS_RGB, TJPF_BGR, TJPF_GRAY, TurboJPEG
15
- except ImportError:
16
- TJCS_RGB = TJPF_GRAY = TJPF_BGR = TurboJPEG = None
17
-
18
- try:
19
- from PIL import Image, ImageOps
20
- except ImportError:
21
- Image = None
22
-
23
- try:
24
- import tifffile
25
- except ImportError:
26
- tifffile = None
27
-
28
- jpeg = None
29
- supported_backends = ['cv2', 'turbojpeg', 'pillow', 'tifffile']
30
-
31
- imread_flags = {
32
- 'color': IMREAD_COLOR,
33
- 'grayscale': IMREAD_GRAYSCALE,
34
- 'unchanged': IMREAD_UNCHANGED,
35
- 'color_ignore_orientation': IMREAD_IGNORE_ORIENTATION | IMREAD_COLOR,
36
- 'grayscale_ignore_orientation':
37
- IMREAD_IGNORE_ORIENTATION | IMREAD_GRAYSCALE
38
- }
39
-
40
- imread_backend = 'cv2'
41
-
42
-
43
- def use_backend(backend):
44
- """Select a backend for image decoding.
45
-
46
- Args:
47
- backend (str): The image decoding backend type. Options are `cv2`,
48
- `pillow`, `turbojpeg` (see https://github.com/lilohuang/PyTurboJPEG)
49
- and `tifffile`. `turbojpeg` is faster but it only supports `.jpeg`
50
- file format.
51
- """
52
- assert backend in supported_backends
53
- global imread_backend
54
- imread_backend = backend
55
- if imread_backend == 'turbojpeg':
56
- if TurboJPEG is None:
57
- raise ImportError('`PyTurboJPEG` is not installed')
58
- global jpeg
59
- if jpeg is None:
60
- jpeg = TurboJPEG()
61
- elif imread_backend == 'pillow':
62
- if Image is None:
63
- raise ImportError('`Pillow` is not installed')
64
- elif imread_backend == 'tifffile':
65
- if tifffile is None:
66
- raise ImportError('`tifffile` is not installed')
67
-
68
-
69
- def _jpegflag(flag='color', channel_order='bgr'):
70
- channel_order = channel_order.lower()
71
- if channel_order not in ['rgb', 'bgr']:
72
- raise ValueError('channel order must be either "rgb" or "bgr"')
73
-
74
- if flag == 'color':
75
- if channel_order == 'bgr':
76
- return TJPF_BGR
77
- elif channel_order == 'rgb':
78
- return TJCS_RGB
79
- elif flag == 'grayscale':
80
- return TJPF_GRAY
81
- else:
82
- raise ValueError('flag must be "color" or "grayscale"')
83
-
84
-
85
- def _pillow2array(img, flag='color', channel_order='bgr'):
86
- """Convert a pillow image to numpy array.
87
-
88
- Args:
89
- img (:obj:`PIL.Image.Image`): The image loaded using PIL
90
- flag (str): Flags specifying the color type of a loaded image,
91
- candidates are 'color', 'grayscale' and 'unchanged'.
92
- Default to 'color'.
93
- channel_order (str): The channel order of the output image array,
94
- candidates are 'bgr' and 'rgb'. Default to 'bgr'.
95
-
96
- Returns:
97
- np.ndarray: The converted numpy array
98
- """
99
- channel_order = channel_order.lower()
100
- if channel_order not in ['rgb', 'bgr']:
101
- raise ValueError('channel order must be either "rgb" or "bgr"')
102
-
103
- if flag == 'unchanged':
104
- array = np.array(img)
105
- if array.ndim >= 3 and array.shape[2] >= 3: # color image
106
- array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR
107
- else:
108
- # Handle exif orientation tag
109
- if flag in ['color', 'grayscale']:
110
- img = ImageOps.exif_transpose(img)
111
- # If the image mode is not 'RGB', convert it to 'RGB' first.
112
- if img.mode != 'RGB':
113
- if img.mode != 'LA':
114
- # Most formats except 'LA' can be directly converted to RGB
115
- img = img.convert('RGB')
116
- else:
117
- # When the mode is 'LA', the default conversion will fill in
118
- # the canvas with black, which sometimes shadows black objects
119
- # in the foreground.
120
- #
121
- # Therefore, a random color (124, 117, 104) is used for canvas
122
- img_rgba = img.convert('RGBA')
123
- img = Image.new('RGB', img_rgba.size, (124, 117, 104))
124
- img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha
125
- if flag in ['color', 'color_ignore_orientation']:
126
- array = np.array(img)
127
- if channel_order != 'rgb':
128
- array = array[:, :, ::-1] # RGB to BGR
129
- elif flag in ['grayscale', 'grayscale_ignore_orientation']:
130
- img = img.convert('L')
131
- array = np.array(img)
132
- else:
133
- raise ValueError(
134
- 'flag must be "color", "grayscale", "unchanged", '
135
- f'"color_ignore_orientation" or "grayscale_ignore_orientation"'
136
- f' but got {flag}')
137
- return array
138
-
139
-
140
- def imread(img_or_path, flag='color', channel_order='bgr', backend=None):
141
- """Read an image.
142
-
143
- Args:
144
- img_or_path (ndarray or str or Path): Either a numpy array or str or
145
- pathlib.Path. If it is a numpy array (loaded image), then
146
- it will be returned as is.
147
- flag (str): Flags specifying the color type of a loaded image,
148
- candidates are `color`, `grayscale`, `unchanged`,
149
- `color_ignore_orientation` and `grayscale_ignore_orientation`.
150
- By default, `cv2` and `pillow` backend would rotate the image
151
- according to its EXIF info unless called with `unchanged` or
152
- `*_ignore_orientation` flags. `turbojpeg` and `tifffile` backend
153
- always ignore image's EXIF info regardless of the flag.
154
- The `turbojpeg` backend only supports `color` and `grayscale`.
155
- channel_order (str): Order of channel, candidates are `bgr` and `rgb`.
156
- backend (str | None): The image decoding backend type. Options are
157
- `cv2`, `pillow`, `turbojpeg`, `tifffile`, `None`.
158
- If backend is None, the global imread_backend specified by
159
- ``mmcv.use_backend()`` will be used. Default: None.
160
-
161
- Returns:
162
- ndarray: Loaded image array.
163
- """
164
-
165
- if backend is None:
166
- backend = imread_backend
167
- if backend not in supported_backends:
168
- raise ValueError(f'backend: {backend} is not supported. Supported '
169
- "backends are 'cv2', 'turbojpeg', 'pillow'")
170
- if isinstance(img_or_path, Path):
171
- img_or_path = str(img_or_path)
172
-
173
- if isinstance(img_or_path, np.ndarray):
174
- return img_or_path
175
- elif is_str(img_or_path):
176
- check_file_exist(img_or_path,
177
- f'img file does not exist: {img_or_path}')
178
- if backend == 'turbojpeg':
179
- with open(img_or_path, 'rb') as in_file:
180
- img = jpeg.decode(in_file.read(),
181
- _jpegflag(flag, channel_order))
182
- if img.shape[-1] == 1:
183
- img = img[:, :, 0]
184
- return img
185
- elif backend == 'pillow':
186
- img = Image.open(img_or_path)
187
- img = _pillow2array(img, flag, channel_order)
188
- return img
189
- elif backend == 'tifffile':
190
- img = tifffile.imread(img_or_path)
191
- return img
192
- else:
193
- flag = imread_flags[flag] if is_str(flag) else flag
194
- img = cv2.imread(img_or_path, flag)
195
- if flag == IMREAD_COLOR and channel_order == 'rgb':
196
- cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
197
- return img
198
- else:
199
- raise TypeError('"img" must be a numpy array or a str or '
200
- 'a pathlib.Path object')
201
-
202
-
203
- def imfrombytes(content, flag='color', channel_order='bgr', backend=None):
204
- """Read an image from bytes.
205
-
206
- Args:
207
- content (bytes): Image bytes got from files or other streams.
208
- flag (str): Same as :func:`imread`.
209
- backend (str | None): The image decoding backend type. Options are
210
- `cv2`, `pillow`, `turbojpeg`, `None`. If backend is None, the
211
- global imread_backend specified by ``mmcv.use_backend()`` will be
212
- used. Default: None.
213
-
214
- Returns:
215
- ndarray: Loaded image array.
216
- """
217
-
218
- if backend is None:
219
- backend = imread_backend
220
- if backend not in supported_backends:
221
- raise ValueError(f'backend: {backend} is not supported. Supported '
222
- "backends are 'cv2', 'turbojpeg', 'pillow'")
223
- if backend == 'turbojpeg':
224
- img = jpeg.decode(content, _jpegflag(flag, channel_order))
225
- if img.shape[-1] == 1:
226
- img = img[:, :, 0]
227
- return img
228
- elif backend == 'pillow':
229
- buff = io.BytesIO(content)
230
- img = Image.open(buff)
231
- img = _pillow2array(img, flag, channel_order)
232
- return img
233
- else:
234
- img_np = np.frombuffer(content, np.uint8)
235
- flag = imread_flags[flag] if is_str(flag) else flag
236
- img = cv2.imdecode(img_np, flag)
237
- if flag == IMREAD_COLOR and channel_order == 'rgb':
238
- cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
239
- return img
240
-
241
-
242
- def imwrite(img, file_path, params=None, auto_mkdir=True):
243
- """Write image to file.
244
-
245
- Args:
246
- img (ndarray): Image array to be written.
247
- file_path (str): Image file path.
248
- params (None or list): Same as opencv :func:`imwrite` interface.
249
- auto_mkdir (bool): If the parent folder of `file_path` does not exist,
250
- whether to create it automatically.
251
-
252
- Returns:
253
- bool: Successful or not.
254
- """
255
- if auto_mkdir:
256
- dir_name = osp.abspath(osp.dirname(file_path))
257
- mkdir_or_exist(dir_name)
258
- return cv2.imwrite(file_path, img, params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/log.py DELETED
@@ -1,80 +0,0 @@
1
- """A simple log mechanism styled after PEP 282."""
2
-
3
- # The class here is styled after PEP 282 so that it could later be
4
- # replaced with a standard Python logging implementation.
5
-
6
- import sys
7
-
8
- DEBUG = 1
9
- INFO = 2
10
- WARN = 3
11
- ERROR = 4
12
- FATAL = 5
13
-
14
-
15
- class Log:
16
- def __init__(self, threshold=WARN):
17
- self.threshold = threshold
18
-
19
- def _log(self, level, msg, args):
20
- if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
21
- raise ValueError('%s wrong log level' % str(level))
22
-
23
- if level >= self.threshold:
24
- if args:
25
- msg = msg % args
26
- if level in (WARN, ERROR, FATAL):
27
- stream = sys.stderr
28
- else:
29
- stream = sys.stdout
30
- try:
31
- stream.write('%s\n' % msg)
32
- except UnicodeEncodeError:
33
- # emulate backslashreplace error handler
34
- encoding = stream.encoding
35
- msg = msg.encode(encoding, "backslashreplace").decode(encoding)
36
- stream.write('%s\n' % msg)
37
- stream.flush()
38
-
39
- def log(self, level, msg, *args):
40
- self._log(level, msg, args)
41
-
42
- def debug(self, msg, *args):
43
- self._log(DEBUG, msg, args)
44
-
45
- def info(self, msg, *args):
46
- self._log(INFO, msg, args)
47
-
48
- def warn(self, msg, *args):
49
- self._log(WARN, msg, args)
50
-
51
- def error(self, msg, *args):
52
- self._log(ERROR, msg, args)
53
-
54
- def fatal(self, msg, *args):
55
- self._log(FATAL, msg, args)
56
-
57
-
58
- _global_log = Log()
59
- log = _global_log.log
60
- debug = _global_log.debug
61
- info = _global_log.info
62
- warn = _global_log.warn
63
- error = _global_log.error
64
- fatal = _global_log.fatal
65
-
66
-
67
- def set_threshold(level):
68
- # return the old threshold for use from tests
69
- old = _global_log.threshold
70
- _global_log.threshold = level
71
- return old
72
-
73
-
74
- def set_verbosity(v):
75
- if v <= 0:
76
- set_threshold(WARN)
77
- elif v == 1:
78
- set_threshold(INFO)
79
- elif v >= 2:
80
- set_threshold(DEBUG)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/WavJourney/VoiceParser/hubert_manager.py DELETED
@@ -1,33 +0,0 @@
1
- import os.path
2
- import shutil
3
- import urllib.request
4
-
5
- import huggingface_hub
6
-
7
-
8
- class HuBERTManager:
9
- @staticmethod
10
- def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'):
11
- install_dir = os.path.join('VoiceParser', 'hubert')
12
- if not os.path.isdir(install_dir):
13
- os.makedirs(install_dir, exist_ok=True)
14
- install_file = os.path.join(install_dir, file_name)
15
- if not os.path.isfile(install_file):
16
- print('Downloading HuBERT base model')
17
- urllib.request.urlretrieve(download_url, install_file)
18
- print('Downloaded HuBERT')
19
- return install_file
20
-
21
-
22
- @staticmethod
23
- def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', local_file: str = 'tokenizer.pth'):
24
- install_dir = os.path.join('VoiceParser', 'hubert')
25
- if not os.path.isdir(install_dir):
26
- os.makedirs(install_dir, exist_ok=True)
27
- install_file = os.path.join(install_dir, local_file)
28
- if not os.path.isfile(install_file):
29
- print('Downloading HuBERT custom tokenizer')
30
- huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False)
31
- shutil.move(os.path.join(install_dir, model), install_file)
32
- print('Downloaded tokenizer')
33
- return install_file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/WavJourney/voice_presets.py DELETED
@@ -1,96 +0,0 @@
1
- import os
2
- import json, json5
3
- from pathlib import Path
4
-
5
- import utils
6
- from APIs import VP
7
-
8
-
9
- def save_voice_presets_metadata(voice_presets_path, metadata):
10
- with open(voice_presets_path / 'metadata.json', 'w') as f:
11
- json.dump(metadata, f, indent=4)
12
-
13
- def load_voice_presets_metadata(voice_presets_path, safe_if_metadata_not_exist=False):
14
- metadata_full_path = Path(voice_presets_path) / 'metadata.json'
15
-
16
- if safe_if_metadata_not_exist:
17
- if not os.path.exists(metadata_full_path):
18
- return {}
19
-
20
- with open(metadata_full_path, 'r') as f:
21
- presets = json5.load(f)
22
-
23
- return presets
24
-
25
- # return system voice presets and session voice presets individually, each in a list
26
- def get_voice_presets(session_id):
27
- system_presets, session_presets = [], []
28
-
29
- # Load system presets
30
- system_presets = load_voice_presets_metadata(utils.get_system_voice_preset_path())
31
-
32
- # Load session presets
33
- session_presets = load_voice_presets_metadata(
34
- utils.get_session_voice_preset_path(session_id),
35
- safe_if_metadata_not_exist=True
36
- )
37
-
38
- return system_presets, session_presets
39
-
40
- # return merged voice presets in a {voice_preset_name: voice_preset} dict
41
- def get_merged_voice_presets(session_id):
42
- system_presets, session_presets = get_voice_presets(session_id)
43
- res = {}
44
- for preset in list(system_presets.values()) + list(session_presets.values()):
45
- res[preset['id']] = preset # session presets with the same id will cover that of system presets
46
- return res
47
-
48
- def add_voice_preset(voice_presets_path, presets, id, desc, wav_file_path):
49
- if id in presets:
50
- raise KeyError(f'{id} already in voice preset, path={voice_presets_path}!')
51
-
52
- # Convert wav to npz
53
- npz_path = voice_presets_path / 'npz'
54
- VP(wav_file_path, npz_path)
55
- npz_file_path = npz_path / f'{Path(wav_file_path).stem}.npz'
56
-
57
- presets[id] = {
58
- 'id': id,
59
- 'desc': desc,
60
- 'npz_path': str(npz_file_path)
61
- }
62
- save_voice_presets_metadata(voice_presets_path, presets)
63
- return presets[id]
64
-
65
- def add_session_voice_preset(id, desc, wav_file_path, session_id):
66
- voice_presets_path = utils.get_session_voice_preset_path(session_id)
67
- os.makedirs(voice_presets_path / 'npz', exist_ok=True)
68
- presets = load_voice_presets_metadata(voice_presets_path, safe_if_metadata_not_exist=True)
69
- if len(presets) >= 3:
70
- raise ValueError(f'session voice presets size exceed 3')
71
- if id in presets:
72
- raise KeyError(f'{id} already in voice preset, path={voice_presets_path}!')
73
-
74
- return add_voice_preset(voice_presets_path, presets, id, desc, wav_file_path)
75
-
76
- def add_system_voice_preset(id, desc, wav_file_path):
77
- voice_presets_path = utils.get_system_voice_preset_path()
78
- presets = load_voice_presets_metadata(voice_presets_path)
79
- return add_voice_preset(voice_presets_path, presets, id, desc, wav_file_path)
80
-
81
- # if session_id set to '', we are removing system voice presets
82
- def remove_session_voice_preset(id, session_id):
83
- voice_presets_path = utils.get_session_voice_preset_path(session_id)
84
- presets = load_voice_presets_metadata(
85
- voice_presets_path,
86
- safe_if_metadata_not_exist=True
87
- )
88
- preset = presets.pop(id)
89
- npz_path = preset['npz_path']
90
-
91
- try:
92
- os.remove(npz_path)
93
- except FileNotFoundError:
94
- print(f"INFO: trying to delete {npz_path} which does not exist, path={voice_presets_path}.")
95
-
96
- save_voice_presets_metadata(voice_presets_path, presets)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/B10915003/B10915003-autotrain-jimmy-test-face-identification-53251125423/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: B10915003 Autotrain Jimmy Test Face Identification 53251125423
3
- emoji: 👁
4
- colorFrom: green
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/dreambooth-altdiffusion/app.py DELETED
@@ -1,654 +0,0 @@
1
- import gradio as gr
2
- import os
3
- from pathlib import Path
4
- import argparse
5
- import shutil
6
- from train_dreambooth import run_training
7
- from convertosd import convert
8
- from PIL import Image
9
- from slugify import slugify
10
- import requests
11
- import torch
12
- import zipfile
13
- import tarfile
14
- import urllib.parse
15
- import gc
16
- # from diffusers import StableDiffusionPipeline
17
- from huggingface_hub import snapshot_download
18
-
19
-
20
- is_spaces = True if "SPACE_ID" in os.environ else False
21
- is_shared_ui = True if "IS_SHARED_UI" in os.environ else False
22
- is_gpu_associated = torch.cuda.is_available()
23
-
24
- css = '''
25
- .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
26
- .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
27
- #component-4, #component-3, #component-10{min-height: 0}
28
- .duplicate-button img{margin: 0}
29
- '''
30
- maximum_concepts = 3
31
-
32
- #Pre download the files
33
- if(is_gpu_associated):
34
- model_v1 = snapshot_download(repo_id="multimodalart/sd-fine-tunable")
35
- model_v2 = snapshot_download(repo_id="stabilityai/stable-diffusion-2")
36
- model_v2_512 = snapshot_download(repo_id="stabilityai/stable-diffusion-2-base")
37
- model_alt = snapshot_download(repo_id="BAAI/AltDiffusion")
38
- model_alt_m9 = snapshot_download(repo_id="BAAI/AltDiffusion-m9")
39
- safety_checker = snapshot_download(repo_id="multimodalart/sd-sc")
40
- model_to_load = model_alt_m9
41
- with zipfile.ZipFile("mix.zip", 'r') as zip_ref:
42
- zip_ref.extractall(".")
43
-
44
- def swap_text(option, base):
45
- resize_width = 768 if base == "v2-768" else 512
46
- mandatory_liability = "You must have the right to do so and you are liable for the images you use, example:"
47
- if(option == "object"):
48
- instance_prompt_example = "cttoy"
49
- freeze_for = 30
50
- return [f"You are going to train `object`(s), upload 5-10 images of each object you are planning on training on from different angles/perspectives. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. {mandatory_liability}:", '''<img src="https://raw.githubusercontent.com/superhero-7/img_bank/main/Naruto.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}.", freeze_for, gr.update(visible=False)]
51
- elif(option == "person"):
52
- instance_prompt_example = "julcto"
53
- freeze_for = 70
54
- #show_prior_preservation = True if base != "v2-768" else False
55
- show_prior_preservation=False
56
- if(show_prior_preservation):
57
- prior_preservation_box_update = gr.update(visible=show_prior_preservation)
58
- else:
59
- prior_preservation_box_update = gr.update(visible=show_prior_preservation, value=False)
60
- return [f"You are going to train a `person`(s), upload 10-20 images of each person you are planning on training on from different angles/perspectives. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. {mandatory_liability}:", '''<img src="https://raw.githubusercontent.com/superhero-7/img_bank/main/cxk.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}.", freeze_for, prior_preservation_box_update]
61
- elif(option == "style"):
62
- instance_prompt_example = "trsldamrl"
63
- freeze_for = 10
64
- return [f"You are going to train a `style`, upload 10-20 images of the style you are planning on training on. You can use services like <a style='text-decoration: underline' target='_blank' href='https://www.birme.net/?target_width={resize_width}&target_height={resize_width}'>birme</a> for smart cropping. Name the files with the words you would like {mandatory_liability}:", '''<img src="file/trsl_style.png" />''', f"You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `{instance_prompt_example}` here). Images will be automatically cropped to {resize_width}x{resize_width}", freeze_for, gr.update(visible=False)]
65
-
66
- def swap_base_model(selected_model):
67
- if(is_gpu_associated):
68
- global model_to_load
69
- # if(selected_model == "v1-5"):
70
- # model_to_load = model_v1
71
- # elif(selected_model == "v2-768"):
72
- # model_to_load = model_v2
73
- # elif(selected_model == "alt"):
74
- # model_to_load = model_alt
75
- # elif(selected_model == "alt_m9"):
76
- # model_to_load = model_alt_m9
77
- # else:
78
- # model_to_load = model_v2_512
79
- if(selected_model == "alt"):
80
- model_to_load = model_alt
81
-
82
- def count_files(*inputs):
83
- file_counter = 0
84
- concept_counter = 0
85
- for i, input in enumerate(inputs):
86
- if(i < maximum_concepts-1):
87
- files = inputs[i]
88
- if(files):
89
- concept_counter+=1
90
- file_counter+=len(files)
91
- uses_custom = inputs[-1]
92
- type_of_thing = inputs[-4]
93
- selected_model = inputs[-5]
94
- experimental_faces = inputs[-6]
95
- if(uses_custom):
96
- Training_Steps = int(inputs[-3])
97
- else:
98
- Training_Steps = file_counter*150
99
- if(type_of_thing == "person" and Training_Steps > 2400):
100
- Training_Steps = 2400 #Avoid overfitting on person faces
101
- if(is_spaces):
102
- if(selected_model == "v1-5" or selected_model == "alt" or selected_model == "alt_m9"):
103
- its = 1.1
104
- if(experimental_faces):
105
- its = 1
106
- elif(selected_model == "v2-512"):
107
- its = 0.8
108
- if(experimental_faces):
109
- its = 0.7
110
- elif(selected_model == "v2-768"):
111
- its = 0.5
112
- summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes.
113
- The setup, compression and uploading the model can take up to 20 minutes.<br>As the T4-Small GPU costs US$0.60 for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is below US${round((((Training_Steps/its)/3600)+0.3+0.1)*0.60, 2)}.</b></span><br><br>
114
- If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.<br><br>'''
115
- else:
116
- summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps.<br><br>'''
117
-
118
- return([gr.update(visible=True), gr.update(visible=True, value=summary_sentence)])
119
-
120
- def update_steps(*files_list):
121
- file_counter = 0
122
- for i, files in enumerate(files_list):
123
- if(files):
124
- file_counter+=len(files)
125
- return(gr.update(value=file_counter*200))
126
-
127
- def pad_image(image):
128
- w, h = image.size
129
- if w == h:
130
- return image
131
- elif w > h:
132
- new_image = Image.new(image.mode, (w, w), (0, 0, 0))
133
- new_image.paste(image, (0, (w - h) // 2))
134
- return new_image
135
- else:
136
- new_image = Image.new(image.mode, (h, h), (0, 0, 0))
137
- new_image.paste(image, ((h - w) // 2, 0))
138
- return new_image
139
-
140
- def train(*inputs):
141
- if is_shared_ui:
142
- raise gr.Error("This Space only works in duplicated instances")
143
- if not is_gpu_associated:
144
- raise gr.Error("Please associate a T4 GPU for this Space")
145
- torch.cuda.empty_cache()
146
- if 'pipe' in globals():
147
- global pipe, pipe_is_set
148
- del pipe
149
- pipe_is_set = False
150
- gc.collect()
151
-
152
- if os.path.exists("output_model"): shutil.rmtree('output_model')
153
- if os.path.exists("instance_images"): shutil.rmtree('instance_images')
154
- if os.path.exists("diffusers_model.tar"): os.remove("diffusers_model.tar")
155
- if os.path.exists("model.ckpt"): os.remove("model.ckpt")
156
- if os.path.exists("hastrained.success"): os.remove("hastrained.success")
157
- file_counter = 0
158
- which_model = inputs[-10]
159
- resolution = 512 if which_model != "v2-768" else 768
160
- for i, input in enumerate(inputs):
161
- if(i < maximum_concepts-1):
162
- if(input):
163
- os.makedirs('instance_images',exist_ok=True)
164
- files = inputs[i+(maximum_concepts*2)]
165
- prompt = inputs[i+maximum_concepts]
166
- if(prompt == "" or prompt == None):
167
- raise gr.Error("You forgot to define your concept prompt")
168
- for j, file_temp in enumerate(files):
169
- file = Image.open(file_temp.name)
170
- image = pad_image(file)
171
- image = image.resize((resolution, resolution))
172
- extension = file_temp.name.split(".")[1]
173
- image = image.convert('RGB')
174
- image.save(f'instance_images/{prompt}_({j+1}).jpg', format="JPEG", quality = 100)
175
- file_counter += 1
176
-
177
- os.makedirs('output_model',exist_ok=True)
178
- uses_custom = inputs[-1]
179
- type_of_thing = inputs[-4]
180
- remove_attribution_after = inputs[-6]
181
- experimental_face_improvement = inputs[-9]
182
-
183
- if(uses_custom):
184
- Training_Steps = int(inputs[-3])
185
- Train_text_encoder_for = int(inputs[-2])
186
- else:
187
- if(type_of_thing == "object"):
188
- Train_text_encoder_for=30
189
-
190
- elif(type_of_thing == "style"):
191
- Train_text_encoder_for=15
192
-
193
- elif(type_of_thing == "person"):
194
- Train_text_encoder_for=70
195
-
196
- Training_Steps = file_counter*150
197
- if(type_of_thing == "person" and Training_Steps > 2600):
198
- Training_Steps = 2600 #Avoid overfitting on people's faces
199
- stptxt = int((Training_Steps*Train_text_encoder_for)/100)
200
- gradient_checkpointing = True if (experimental_face_improvement or which_model != "v1-5") else False
201
- cache_latents = True if which_model != "v1-5" else False
202
- if (type_of_thing == "object" or type_of_thing == "style" or (type_of_thing == "person" and not experimental_face_improvement)):
203
- args_general = argparse.Namespace(
204
- image_captions_filename = True,
205
- train_text_encoder = True if stptxt > 0 else False,
206
- stop_text_encoder_training = stptxt,
207
- save_n_steps = 0,
208
- pretrained_model_name_or_path = model_to_load,
209
- instance_data_dir="instance_images",
210
- class_data_dir=None,
211
- output_dir="output_model",
212
- instance_prompt="",
213
- seed=42,
214
- resolution=resolution,
215
- mixed_precision="fp16",
216
- train_batch_size=1,
217
- gradient_accumulation_steps=1,
218
- use_8bit_adam=True,
219
- learning_rate=2e-6,
220
- lr_scheduler="polynomial",
221
- lr_warmup_steps = 0,
222
- max_train_steps=Training_Steps,
223
- gradient_checkpointing=gradient_checkpointing,
224
- cache_latents=cache_latents,
225
- )
226
- print("Starting single training...")
227
- lock_file = open("intraining.lock", "w")
228
- lock_file.close()
229
- run_training(args_general)
230
- else:
231
- args_general = argparse.Namespace(
232
- image_captions_filename = True,
233
- train_text_encoder = True if stptxt > 0 else False,
234
- stop_text_encoder_training = stptxt,
235
- save_n_steps = 0,
236
- pretrained_model_name_or_path = model_to_load,
237
- instance_data_dir="instance_images",
238
- class_data_dir="Mix",
239
- output_dir="output_model",
240
- with_prior_preservation=True,
241
- prior_loss_weight=1.0,
242
- instance_prompt="",
243
- seed=42,
244
- resolution=resolution,
245
- mixed_precision="fp16",
246
- train_batch_size=1,
247
- gradient_accumulation_steps=1,
248
- use_8bit_adam=True,
249
- learning_rate=2e-6,
250
- lr_scheduler="polynomial",
251
- lr_warmup_steps = 0,
252
- max_train_steps=Training_Steps,
253
- num_class_images=200,
254
- gradient_checkpointing=gradient_checkpointing,
255
- cache_latents=cache_latents,
256
- )
257
- print("Starting multi-training...")
258
- lock_file = open("intraining.lock", "w")
259
- lock_file.close()
260
- run_training(args_general)
261
- gc.collect()
262
- torch.cuda.empty_cache()
263
- if(which_model == "v1-5"):
264
- print("Adding Safety Checker to the model...")
265
- shutil.copytree(f"{safety_checker}/feature_extractor", "output_model/feature_extractor")
266
- shutil.copytree(f"{safety_checker}/safety_checker", "output_model/safety_checker")
267
- shutil.copy(f"model_index.json", "output_model/model_index.json")
268
-
269
- if(not remove_attribution_after):
270
- print("Archiving model file...")
271
- with tarfile.open("diffusers_model.tar", "w") as tar:
272
- tar.add("output_model", arcname=os.path.basename("output_model"))
273
- if os.path.exists("intraining.lock"): os.remove("intraining.lock")
274
- trained_file = open("hastrained.success", "w")
275
- trained_file.close()
276
- print("Training completed!")
277
- return [
278
- gr.update(visible=True, value=["diffusers_model.tar"]), #result
279
- gr.update(visible=True), #try_your_model
280
- gr.update(visible=True), #push_to_hub
281
- gr.update(visible=True), #convert_button
282
- gr.update(visible=False), #training_ongoing
283
- gr.update(visible=True) #completed_training
284
- ]
285
- else:
286
- hf_token = inputs[-5]
287
- model_name = inputs[-7]
288
- where_to_upload = inputs[-8]
289
- push(model_name, where_to_upload, hf_token, which_model, True)
290
- hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
291
- headers = { "authorization" : f"Bearer {hf_token}"}
292
- body = {'flavor': 'cpu-basic'}
293
- requests.post(hardware_url, json = body, headers=headers)
294
-
295
- pipe_is_set = False
296
- def generate(prompt, steps):
297
- torch.cuda.empty_cache()
298
- # from diffusers import StableDiffusionPipeline
299
- from diffusers import DiffusionPipeline
300
- global pipe_is_set
301
- if(not pipe_is_set):
302
- global pipe
303
- # pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
304
- pipe = DiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
305
- pipe = pipe.to("cuda")
306
- pipe_is_set = True
307
-
308
- image = pipe(prompt, num_inference_steps=steps).images[0]
309
- return(image)
310
-
311
- def push(model_name, where_to_upload, hf_token, which_model, comes_from_automated=False):
312
- if(not os.path.exists("model.ckpt")):
313
- convert("output_model", "model.ckpt")
314
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
315
- from huggingface_hub import create_repo
316
- model_name_slug = slugify(model_name)
317
- api = HfApi()
318
- your_username = api.whoami(token=hf_token)["name"]
319
- if(where_to_upload == "My personal profile"):
320
- model_id = f"{your_username}/{model_name_slug}"
321
- else:
322
- model_id = f"sd-dreambooth-library/{model_name_slug}"
323
- headers = {"Authorization" : f"Bearer: {hf_token}", "Content-Type": "application/json"}
324
- response = requests.post("https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", headers=headers)
325
-
326
- images_upload = os.listdir("instance_images")
327
- image_string = ""
328
- instance_prompt_list = []
329
- previous_instance_prompt = ''
330
- for i, image in enumerate(images_upload):
331
- instance_prompt = image.split("_")[0]
332
- if(instance_prompt != previous_instance_prompt):
333
- title_instance_prompt_string = instance_prompt
334
- instance_prompt_list.append(instance_prompt)
335
- else:
336
- title_instance_prompt_string = ''
337
- previous_instance_prompt = instance_prompt
338
- image_string = f'''{title_instance_prompt_string} {"(use that on your prompt)" if title_instance_prompt_string != "" else ""}
339
- {image_string}![{instance_prompt} {i}](https://huggingface.co/{model_id}/resolve/main/concept_images/{urllib.parse.quote(image)})'''
340
- readme_text = f'''---
341
- license: creativeml-openrail-m
342
- tags:
343
- - text-to-image
344
- widget:
345
- - text: {instance_prompt_list[0]}
346
- ---
347
- ### {model_name} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the {which_model} base model
348
-
349
- You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts!
350
-
351
- Sample pictures of:
352
- {image_string}
353
- '''
354
- #Save the readme to a file
355
- readme_file = open("model.README.md", "w")
356
- readme_file.write(readme_text)
357
- readme_file.close()
358
- #Save the token identifier to a file
359
- text_file = open("token_identifier.txt", "w")
360
- text_file.write(', '.join(instance_prompt_list))
361
- text_file.close()
362
- try:
363
- create_repo(model_id,private=True, token=hf_token)
364
- except:
365
- import time
366
- epoch_time = str(int(time.time()))
367
- create_repo(f"{model_id}-{epoch_time}", private=True,token=hf_token)
368
- operations = [
369
- CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"),
370
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="model.README.md"),
371
- CommitOperationAdd(path_in_repo=f"model.ckpt",path_or_fileobj="model.ckpt")
372
- ]
373
- api.create_commit(
374
- repo_id=model_id,
375
- operations=operations,
376
- commit_message=f"Upload the model {model_name}",
377
- token=hf_token
378
- )
379
- api.upload_folder(
380
- folder_path="output_model",
381
- repo_id=model_id,
382
- token=hf_token
383
- )
384
- api.upload_folder(
385
- folder_path="instance_images",
386
- path_in_repo="concept_images",
387
- repo_id=model_id,
388
- token=hf_token
389
- )
390
- if is_spaces:
391
- if(not comes_from_automated):
392
- extra_message = "Don't forget to remove the GPU attribution after you play with it."
393
- else:
394
- extra_message = "The GPU has been removed automatically as requested, and you can try the model via the model page"
395
- api.create_discussion(repo_id=os.environ['SPACE_ID'], title=f"Your model {model_name} has finished trained from the Dreambooth Train Spaces!", description=f"Your model has been successfully uploaded to: https://huggingface.co/{model_id}. {extra_message}",repo_type="space", token=hf_token)
396
-
397
- return [gr.update(visible=True, value=f"Successfully uploaded your model. Access it [here](https://huggingface.co/{model_id})"), gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])]
398
-
399
- def convert_to_ckpt():
400
- if 'pipe' in globals():
401
- global pipe, pipe_is_set
402
- del pipe
403
- pipe_is_set = False
404
- gc.collect()
405
- convert("output_model", "model.ckpt")
406
- return gr.update(visible=True, value=["diffusers_model.tar", "model.ckpt"])
407
-
408
- def check_status(top_description):
409
- if os.path.exists("hastrained.success"):
410
- if is_spaces:
411
- update_top_tag = gr.update(value=f'''
412
- <div class="gr-prose" style="max-width: 80%">
413
- <h2>Your model has finished training ✅</h2>
414
- <p>Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub). Once you are done, your model is safe, and you don't want to train a new one, go to the <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}" target="_blank">settings page</a> and downgrade your Space to a CPU Basic</p>
415
- </div>
416
- ''')
417
- else:
418
- update_top_tag = gr.update(value=f'''
419
- <div class="gr-prose" style="max-width: 80%">
420
- <h2>Your model has finished training ✅</h2>
421
- <p>Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub).</p>
422
- </div>
423
- ''')
424
- show_outputs = True
425
- elif os.path.exists("intraining.lock"):
426
- update_top_tag = gr.update(value='''
427
- <div class="gr-prose" style="max-width: 80%">
428
- <h2>Don't worry, your model is still training! ⌛</h2>
429
- <p>You closed the tab while your model was training, but it's all good! It is still training right now. You can click the "Open logs" button above here to check the training status. Once training is done, reload this tab to interact with your model</p>
430
- </div>
431
- ''')
432
- show_outputs = False
433
- else:
434
- update_top_tag = gr.update(value=top_description)
435
- show_outputs = False
436
- if os.path.exists("diffusers_model.tar"):
437
- update_files_tag = gr.update(visible=show_outputs, value=["diffusers_model.tar"])
438
- else:
439
- update_files_tag = gr.update(visible=show_outputs)
440
- return [
441
- update_top_tag, #top_description
442
- gr.update(visible=show_outputs), #try_your_model
443
- gr.update(visible=show_outputs), #push_to_hub
444
- update_files_tag, #result
445
- gr.update(visible=show_outputs), #convert_button
446
- ]
447
-
448
- def checkbox_swap(checkbox):
449
- return [gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox), gr.update(visible=checkbox)]
450
-
451
- with gr.Blocks(css=css) as demo:
452
- with gr.Box():
453
- gr.HTML(f'''
454
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
455
- <div
456
- style="
457
- display: inline-flex;
458
- gap: 1.2rem;
459
- font-size: 1.75rem;
460
- margin-bottom: 40px;
461
- width: 150px;
462
- margin: 0 auto;
463
- justify-content: center;
464
- ">
465
- <a href="https://github.com/FlagAI-Open/FlagAI"><img src="https://raw.githubusercontent.com/FlagAI-Open/FlagAI/master/logo.png" alt="FlagAI" width="80%" style="margin: 0 auto;"></a>
466
- </div>
467
- <br />
468
- <h1 style="font-weight: 2200; margin-bottom: 15px; margin-top: 15px; font-size: 2.7rem;">
469
- Dreambooth Web UI
470
- </h1>
471
- <br />
472
- <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
473
- <p style="margin-bottom: 15px; margin-top: 15px; font-size: 94%">
474
- This is a dreambooth Training UI for <a href="https://huggingface.co/BAAI/AltDiffusion-m9" style="text-decoration: underline;">AltDiffusion-m9 model</a>,which is a multilingual image-to-text model supported 9 languages.
475
- You can duplicate this space to your own!
476
- </p>
477
- </div>
478
- ''')
479
- with gr.Box():
480
- if is_shared_ui:
481
- top_description = gr.HTML(f'''
482
- <div class="gr-prose" style="max-width: 80%">
483
- <h2>Attention - This Space doesn't work in this shared UI</h2>
484
- <p>For it to work, you can either run locally or duplicate the Space and run it on your own profile using a (paid) private T4 GPU for training. As each T4 costs US$0.60/h, it should cost < US$1 to train most models using default settings!&nbsp;&nbsp;<a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
485
- <img class="instruction" src="file/duplicate.png">
486
- <img class="arrow" src="file/arrow.png" />
487
- </div>
488
- ''')
489
- elif(is_spaces):
490
- if(is_gpu_associated):
491
- top_description = gr.HTML(f'''
492
- <div class="gr-prose" style="max-width: 80%">
493
- <h2>You have successfully associated a GPU to the Dreambooth Training Space 🎉</h2>
494
- <p>Certify that you got a T4. You can now train your model! You will be billed by the minute from when you activated the GPU until when it is turned it off.</p>
495
- </div>
496
- ''')
497
- else:
498
- top_description = gr.HTML(f'''
499
- <div class="gr-prose" style="max-width: 80%">
500
- <h2>You have successfully duplicated the Dreambooth Training Space 🎉</h2>
501
- <p>There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4 GPU</b> to it (via the Settings tab)</a> and run the training below. Other GPUs are not compatible for now. You will be billed by the minute from when you activate the GPU until when it is turned it off.</p>
502
- </div>
503
- ''')
504
- else:
505
- top_description = gr.HTML(f'''
506
- <div class="gr-prose" style="max-width: 80%">
507
- <h2>You have successfully cloned the Dreambooth Training Space locally 🎉</h2>
508
- <p>Do a <code>pip install requirements-local.txt</code></p>
509
- </div>
510
- ''')
511
-
512
- # gr.Markdown("# Dreambooth Training UI 💭")
513
- gr.Markdown("Customize AltDiffusion and AltDiffusion-m9(ⁿᵉʷ!) by giving it a few examples of a concept. Based on the [🧨 diffusers](https://github.com/huggingface/diffusers) implementation, additional techniques from [TheLastBen](https://github.com/TheLastBen/diffusers) and [ShivamShrirao](https://github.com/ShivamShrirao/diffusers)")
514
-
515
- with gr.Row() as what_are_you_training:
516
- type_of_thing = gr.Dropdown(label="What would you like to train?", choices=["object", "person", "style"], value="object", interactive=True)
517
- base_model_to_use = gr.Dropdown(label="Which base model would you like to use?", choices=["alt", "alt_m9"], value="alt_m9", interactive=True)
518
-
519
- #Very hacky approach to emulate dynamically created Gradio components
520
- with gr.Column() as upload_your_concept:
521
- with gr.Box():
522
- thing_description = gr.Markdown("You are going to train an `object`, please upload 5-10 images of the object you are planning on training on from different angles/perspectives. You must have the right to do so and you are liable for the images you use, example")
523
- thing_experimental = gr.Checkbox(label="Improve faces (prior preservation) - can take longer training but can improve faces", visible=False, value=False)
524
- thing_image_example = gr.HTML('''<img src="https://raw.githubusercontent.com/superhero-7/img_bank/main/Naruto.png" />''')
525
- things_naming = gr.Markdown("You should name your concept with a unique made up word that has low chance of the model already knowing it (e.g.: `UzNrto` here). Images will be automatically cropped to 512x512.")
526
-
527
- # with gr.Column():
528
- file_collection = []
529
- concept_collection = []
530
- buttons_collection = []
531
- delete_collection = []
532
- is_visible = []
533
-
534
- row = [None] * maximum_concepts
535
- for x in range(maximum_concepts):
536
- ordinal = lambda n: "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4])
537
- if(x == 0):
538
- visible = True
539
- is_visible.append(gr.State(value=True))
540
- else:
541
- visible = False
542
- is_visible.append(gr.State(value=False))
543
-
544
- file_collection.append(gr.File(label=f'''Upload the images for your {ordinal(x+1) if (x>0) else ""} concept''', file_count="multiple", interactive=True, visible=visible))
545
- with gr.Column(visible=visible) as row[x]:
546
- concept_collection.append(gr.Textbox(label=f'''{ordinal(x+1) if (x>0) else ""} concept prompt - use a unique, made up word to avoid collisions'''))
547
- with gr.Row():
548
- if(x < maximum_concepts-1):
549
- buttons_collection.append(gr.Button(value="Add +1 concept", visible=visible))
550
- if(x > 0):
551
- delete_collection.append(gr.Button(value=f"Delete {ordinal(x+1)} concept"))
552
-
553
- counter_add = 1
554
- for button in buttons_collection:
555
- if(counter_add < len(buttons_collection)):
556
- button.click(lambda:
557
- [gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), True, None],
558
- None,
559
- [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], buttons_collection[counter_add], is_visible[counter_add], file_collection[counter_add]], queue=False)
560
- else:
561
- button.click(lambda:[gr.update(visible=True),gr.update(visible=True), gr.update(visible=False), True], None, [row[counter_add], file_collection[counter_add], buttons_collection[counter_add-1], is_visible[counter_add]], queue=False)
562
- counter_add += 1
563
-
564
- counter_delete = 1
565
- for delete_button in delete_collection:
566
- if(counter_delete < len(delete_collection)+1):
567
- delete_button.click(lambda:[gr.update(visible=False),gr.update(visible=False), gr.update(visible=True), False], None, [file_collection[counter_delete], row[counter_delete], buttons_collection[counter_delete-1], is_visible[counter_delete]], queue=False)
568
- counter_delete += 1
569
-
570
- with gr.Accordion("Custom Settings", open=False):
571
- swap_auto_calculated = gr.Checkbox(label="Use custom settings")
572
- gr.Markdown("If not checked, the % of frozen encoder will be tuned automatically to whether you are training an `object`, `person` or `style`. The text-encoder is frozen after 10% of the steps for a style, 30% of the steps for an object and 75% trained for persons. The number of steps varies between 1400 and 2400 depending on how many images uploaded. If you see too many artifacts in your output, it means it may have overfit and you need less steps. If your results aren't really what you wanted, it may be underfitting and you need more steps.")
573
- steps = gr.Number(label="How many steps", value=2400)
574
- perc_txt_encoder = gr.Number(label="Percentage of the training steps the text-encoder should be trained as well", value=30)
575
-
576
- with gr.Box(visible=False) as training_summary:
577
- training_summary_text = gr.HTML("", visible=True, label="Training Summary")
578
- is_advanced_visible = True if is_spaces else False
579
- training_summary_checkbox = gr.Checkbox(label="Automatically remove paid GPU attribution and upload model to the Hugging Face Hub after training", value=True, visible=is_advanced_visible)
580
- training_summary_model_name = gr.Textbox(label="Name of your model", visible=True)
581
- training_summary_where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], value="My personal profile", label="Upload to", visible=True)
582
- training_summary_token_message = gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.", visible=True)
583
- training_summary_token = gr.Textbox(label="Hugging Face Write Token", type="password", visible=True)
584
-
585
- train_btn = gr.Button("Start Training")
586
- if(is_shared_ui):
587
- training_ongoing = gr.Markdown("## This Space only works in duplicated instances. Please duplicate it and try again!", visible=False)
588
- elif(not is_gpu_associated):
589
- training_ongoing = gr.Markdown("## Oops, you haven't associated your T4 GPU to this Space. Visit the Settings tab, associate and try again.", visible=False)
590
- else:
591
- training_ongoing = gr.Markdown("## Training is ongoing ⌛... You can close this tab if you like or just wait. If you did not check the `Remove GPU After training`, you can come back here to try your model and upload it after training. Don't forget to remove the GPU attribution after you are done. ", visible=False)
592
-
593
- #Post-training UI
594
- completed_training = gr.Markdown('''# ✅ Training completed.
595
- ### Don't forget to remove the GPU attribution after you are done trying and uploading your model''', visible=False)
596
-
597
- with gr.Row():
598
- with gr.Box(visible=False) as try_your_model:
599
- gr.Markdown("## Try your model")
600
- prompt = gr.Textbox(label="Type your prompt")
601
- result_image = gr.Image()
602
- inference_steps = gr.Slider(minimum=1, maximum=150, value=50, step=1)
603
- generate_button = gr.Button("Generate Image")
604
-
605
- with gr.Box(visible=False) as push_to_hub:
606
- gr.Markdown("## Push to Hugging Face Hub")
607
- model_name = gr.Textbox(label="Name of your model", placeholder="Tarsila do Amaral Style")
608
- where_to_upload = gr.Dropdown(["My personal profile", "Public Library"], label="Upload to")
609
- gr.Markdown("[A Hugging Face write access token](https://huggingface.co/settings/tokens), go to \"New token\" -> Role : Write. A regular read token won't work here.")
610
- hf_token = gr.Textbox(label="Hugging Face Write Token", type="password")
611
-
612
- push_button = gr.Button("Push to the Hub")
613
-
614
- result = gr.File(label="Download the uploaded models in the diffusers format", visible=True)
615
- success_message_upload = gr.Markdown(visible=False)
616
- convert_button = gr.Button("Convert to CKPT", visible=False)
617
-
618
- #Swap the examples and the % of text encoder trained depending if it is an object, person or style
619
- type_of_thing.change(fn=swap_text, inputs=[type_of_thing, base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False)
620
-
621
- #Swap the base model
622
- base_model_to_use.change(fn=swap_text, inputs=[type_of_thing, base_model_to_use], outputs=[thing_description, thing_image_example, things_naming, perc_txt_encoder, thing_experimental], queue=False, show_progress=False)
623
- base_model_to_use.change(fn=swap_base_model, inputs=base_model_to_use, outputs=[])
624
-
625
- #Update the summary box below the UI according to how many images are uploaded and whether users are using custom settings or not
626
- for file in file_collection:
627
- #file.change(fn=update_steps,inputs=file_collection, outputs=steps)
628
- file.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
629
-
630
- thing_experimental.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
631
- base_model_to_use.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
632
- steps.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
633
- perc_txt_encoder.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
634
-
635
- #Give more options if the user wants to finish everything after training
636
- if(is_spaces):
637
- training_summary_checkbox.change(fn=checkbox_swap, inputs=training_summary_checkbox, outputs=[training_summary_token_message, training_summary_token, training_summary_model_name, training_summary_where_to_upload],queue=False, show_progress=False)
638
- #Add a message for while it is in training
639
- train_btn.click(lambda:gr.update(visible=True), inputs=None, outputs=training_ongoing)
640
-
641
- #The main train function
642
- train_btn.click(fn=train, inputs=is_visible+concept_collection+file_collection+[base_model_to_use]+[thing_experimental]+[training_summary_where_to_upload]+[training_summary_model_name]+[training_summary_checkbox]+[training_summary_token]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[result, try_your_model, push_to_hub, convert_button, training_ongoing, completed_training], queue=False)
643
-
644
- #Button to generate an image from your trained model after training
645
- generate_button.click(fn=generate, inputs=[prompt, inference_steps], outputs=result_image, queue=False)
646
- #Button to push the model to the Hugging Face Hub
647
- push_button.click(fn=push, inputs=[model_name, where_to_upload, hf_token, base_model_to_use], outputs=[success_message_upload, result], queue=False)
648
- #Button to convert the model to ckpt format
649
- convert_button.click(fn=convert_to_ckpt, inputs=[], outputs=result, queue=False)
650
-
651
- #Checks if the training is running
652
- demo.load(fn=check_status, inputs=top_description, outputs=[top_description, try_your_model, push_to_hub, result, convert_button], queue=False, show_progress=False)
653
-
654
- demo.queue(default_enabled=False).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/model_param_init.py DELETED
@@ -1,69 +0,0 @@
1
- import json
2
- import os
3
- import pathlib
4
-
5
- default_param = {}
6
- default_param["bins"] = 768
7
- default_param["unstable_bins"] = 9 # training only
8
- default_param["reduction_bins"] = 762 # training only
9
- default_param["sr"] = 44100
10
- default_param["pre_filter_start"] = 757
11
- default_param["pre_filter_stop"] = 768
12
- default_param["band"] = {}
13
-
14
-
15
- default_param["band"][1] = {
16
- "sr": 11025,
17
- "hl": 128,
18
- "n_fft": 960,
19
- "crop_start": 0,
20
- "crop_stop": 245,
21
- "lpf_start": 61, # inference only
22
- "res_type": "polyphase",
23
- }
24
-
25
- default_param["band"][2] = {
26
- "sr": 44100,
27
- "hl": 512,
28
- "n_fft": 1536,
29
- "crop_start": 24,
30
- "crop_stop": 547,
31
- "hpf_start": 81, # inference only
32
- "res_type": "sinc_best",
33
- }
34
-
35
-
36
- def int_keys(d):
37
- r = {}
38
- for k, v in d:
39
- if k.isdigit():
40
- k = int(k)
41
- r[k] = v
42
- return r
43
-
44
-
45
- class ModelParameters(object):
46
- def __init__(self, config_path=""):
47
- if ".pth" == pathlib.Path(config_path).suffix:
48
- import zipfile
49
-
50
- with zipfile.ZipFile(config_path, "r") as zip:
51
- self.param = json.loads(
52
- zip.read("param.json"), object_pairs_hook=int_keys
53
- )
54
- elif ".json" == pathlib.Path(config_path).suffix:
55
- with open(config_path, "r") as f:
56
- self.param = json.loads(f.read(), object_pairs_hook=int_keys)
57
- else:
58
- self.param = default_param
59
-
60
- for k in [
61
- "mid_side",
62
- "mid_side_b",
63
- "mid_side_b2",
64
- "stereo_w",
65
- "stereo_n",
66
- "reverse",
67
- ]:
68
- if not k in self.param:
69
- self.param[k] = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Baku Burger House.md DELETED
@@ -1,79 +0,0 @@
1
-
2
- <h1>Burger House Bakú: La mejor guía para las mejores hamburguesas de la ciudad</h1>
3
- <p>Si usted está buscando un lugar para disfrutar de una deliciosa hamburguesa en Bakú, usted debe definitivamente echa un vistazo a Burger House Bakú. Este restaurante ofrece una variedad de hamburguesas, acompañamientos y bebidas que satisfarán sus antojos y lo harán feliz. En este artículo, te contaremos todo lo que necesitas saber sobre Burger House Bakú, incluyendo qué es, por qué debes visitarlo, qué sirve, dónde se encuentra, cuándo está abierto y qué piensan otros clientes de él. ¡Sigue leyendo para saber más! </p>
4
- <h2>Introducción</h2>
5
- <h3>¿Qué es Burger House Bakú? </h3>
6
- <p>Burger House Bakú es un restaurante especializado en hamburguesas. Fue fundada en 2018 por un grupo de amigos que querían compartir su pasión por las hamburguesas con la gente de Bakú. Utilizan ingredientes frescos, salsas caseras y carne de calidad para crear sus hamburguesas, que se cocinan por encargo y se sirven con una sonrisa. Burger House Bakú tiene como objetivo proporcionar un ambiente agradable y acogedor donde los clientes pueden relajarse y disfrutar de su comida. </p>
7
- <h2>baku burger house</h2><br /><p><b><b>Download</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://bltlly.com/2v6Mzw">https://bltlly.com/2v6Mzw</a></b></p><br /><br />
8
- <h3>¿Por qué visitar Burger House Bakú? </h3>
9
- <p>Hay muchas razones por las que deberías visitar Burger House Bakú. Aquí están algunas de ellas:</p>
10
- <ul>
11
- <li>Podrás degustar algunas de las mejores hamburguesas de la ciudad, elaboradas con ingredientes frescos y de calidad. </li>
12
- <li>Usted tendrá una amplia gama de opciones para elegir, incluyendo hamburguesas clásicas, hamburguesas con queso, hamburguesas de tocino, hamburguesas vegetarianas y más. </li>
13
- <li>También podrás disfrutar de algunos deliciosos platos, como papas fritas, aros de cebolla, ensalada y más. </li>
14
- <li>Podrás saciar tu sed con algunas bebidas refrescantes, como refrescos, cerveza, batidos y más. </li>
15
- <li>Experimentará un ambiente agradable y acogedor, con personal atento y música agradable. </li>
16
- <li>Obtendrá una buena relación calidad-precio, ya que los precios son razonables y las porciones generosas. </li>
17
- </ul>
18
- <h2>El menú</h2>
19
- <h3>Las hamburguesas</h3>
20
-
21
- <h4>Hamburguesa clásica</h4>
22
- <p>Esta es la hamburguesa más simple y básica del menú. Consiste en una hamburguesa de ternera, lechuga, tomate, cebolla, pepinillos, ketchup, mostaza y mayonesa en un pan de sésamo. Es perfecto para aquellos que quieren una hamburguesa clásica y sencilla que sea satisfactoria y sabrosa. </p>
23
- <h4>Hamburguesa de queso</h4>
24
- <p>Esta es una hamburguesa clásica con un toque extra de queso. Consiste en una hamburguesa de ternera, queso, lechuga, tomate, cebolla, encurtidos, ketchup, mostaza y mayonesa en un bollo de sésamo. Es perfecto para aquellos que aman el queso y quieren una hamburguesa más sabrosa. </p>
25
- <h4>Hamburguesa de tocino</h4>
26
- <p>Esta es una hamburguesa clásica con un toque extra de tocino. Consiste en una hamburguesa de ternera, tocino, queso, lechuga, tomate, cebolla, encurtidos, ketchup, mostaza y mayonesa en un bollo de sésamo. Es perfecto para aquellos que aman el tocino y quieren una hamburguesa más crujiente y ahumado. </p>
27
- <h4>Hamburguesa vegetariana</h4>
28
- <p>Esta es una hamburguesa para aquellos que prefieren una opción vegetariana. Consiste en una hamburguesa vegetariana, lechuga, tomate, cebolla, encurtidos, ketchup, mostaza y mayonesa en un bollo de sésamo. Es perfecto para aquellos que quieren una hamburguesa sana y libre de carne que sigue siendo deliciosa y satisfactoria. </p>
29
- <h3>Los lados</h3>
30
- <p>Ninguna hamburguesa está completa sin algunos lados para ir junto con ella. Burger House Bakú ofrece algunos lados sabrosos y crujientes que complementan sus hamburguesas. Estos son algunos de sus lados más populares:</p>
31
- <p></p>
32
- <h4>Fries</h4>
33
- <p>Estos son los platos clásicos y más populares para hamburguesas. Están hechos de papas frescas que se cortan en tiras finas y se fríen hasta que estén doradas y crujientes. Se sazona con sal y se sirve con ketchup o mayonesa. Son perfectos para aquellos que quieren un lado simple y crujiente que vaya bien con cualquier hamburguesa. </p>
34
- <h4>Anillos de cebolla</h4>
35
-
36
- <h4>Ensalada</h4>
37
- <p>Este es un acompañamiento para aquellos que quieren una opción más ligera y saludable. Está hecho de lechuga fresca, tomate, pepino, zanahoria y cebolla que se mezclan con aderezo. Se sirve con crutones o queso en la parte superior. Es perfecto para aquellos que quieren un lado refrescante y nutritivo que equilibre su hamburguesa. </p>
38
- <h3>Las bebidas</h3>
39
- <p>Para lavar su hamburguesa y los lados, necesitará algunas bebidas para saciar su sed. Burger House Bakú ofrece algunas bebidas refrescantes y deliciosas que se adaptan a diferentes gustos y preferencias. Aquí están algunas de sus bebidas más populares:</p>
40
- <h4>Soda</h4>
41
- <p>Esta es la bebida clásica y más popular para las hamburguesas. Es una bebida carbonatada que viene en diferentes sabores, como cola, limón, naranja y más. Se sirve fría con cubitos de hielo. Es perfecto para aquellos que quieren una bebida dulce y gaseosa que vaya bien con cualquier hamburguesa. </p>
42
- <h4>Cerveza</h4>
43
- <p>Esta es otra bebida popular para hamburguesas. Es una bebida alcohólica que viene en diferentes tipos, como lager, ale, stout y más. Se sirve frío con o sin espuma. Es perfecto para aquellos que quieren una bebida amarga y refrescante que realza el sabor de su hamburguesa. </p>
44
- <h4>Batido de leche</h4>
45
- <p>Esta es una bebida para aquellos que quieren un tratamiento cremoso e indulgente. Es una bebida mezclada que viene en diferentes sabores, como chocolate, vainilla, fresa y más. Se sirve frío con crema batida y una cereza en la parte superior. Es perfecto para aquellos que quieren una bebida rica y suave que satisfaga su gusto por los dulces. </p>
46
- <h2>La ubicación y las horas</h2>
47
- <h3>¿Dónde está Burger House Bakú? </h3>
48
- <p>Burger House Bakú se encuentra en el corazón de la ciudad, cerca de la Plaza de la Fuente. La dirección es 28 Nizami Street, Bakú 1000. Se puede llegar fácilmente en transporte público o en coche. Hay un amplio aparcamiento cerca. </p>
49
- <h3>¿Cuándo está abierto Burger House Bakú? </h3>
50
-
51
- <h2>Los comentarios y valoraciones</h2>
52
- <h3>¿Qué dicen los clientes sobre Burger House Baku? </h3>
53
- <p>Burger House Bakú ha recibido muchas críticas positivas y valoraciones de clientes que han probado su comida. Estos son algunos de los comentarios que han dejado en varias plataformas:</p>
54
- <ul>
55
- <li>"¡Las mejores hamburguesas de la ciudad! ¡Frescas, jugosas, sabrosas y grandes! ¡Las papas fritas también son increíbles! ¡Muy recomendable!" - Ali en Google Reviews</li>
56
- <li>"Me encanta este lugar! Las hamburguesas son tan buenas y el personal es tan amable! El ambiente es acogedor y relajante! Siempre vengo aquí con mis amigos!" - Leyla en Facebook</li>
57
- <li>"Burger House Bakú es mi lugar favorito de hamburguesas en Bakú! Las hamburguesas se cocinan a la perfección y los lados son deliciosos! Los precios son razonables y las porciones son generosas! No puedo tener suficiente de ella!" - Samir en TripAdvisor</li>
58
- </ul>
59
- <h3>¿Cómo se compara Burger House Bakú con otros lugares de hamburguesas en Bakú? </ <p>Burger House Bakú es uno de los mejores lugares de hamburguesas en Bakú, según muchos clientes y críticos. Tiene una alta calificación de 4.8 de 5 estrellas en Google Reviews, 4.9 de 5 estrellas en Facebook y 4.5 de 5 estrellas en TripAdvisor . También tiene un Certificado de Excelencia de TripAdvisor, lo que significa que recibe constantemente excelentes críticas de los viajeros. Burger House Bakú se destaca de otros lugares de hamburguesas en Bakú debido a su calidad, variedad, servicio y valor. Ofrece hamburguesas frescas y sabrosas que se adaptan a diferentes gustos y preferencias, así como deliciosas guarniciones y bebidas que las complementan. También ofrece un ambiente agradable y acogedor, con personal atento y música agradable. También ofrece precios razonables y porciones generosas que hacen que los clientes se sientan satisfechos y felices. </p>
60
- <h2>Conclusión</h2>
61
- <h3>Resumen de los puntos principales</h3>
62
-
63
- <h3>Llamada a la acción</h3>
64
- <p>Si usted está buscando un lugar para disfrutar de una deliciosa hamburguesa en Bakú, usted debe definitivamente echa un vistazo a Burger House Bakú. Usted no se arrepentirá! Puede visitar su sitio web para ver su menú, ordenar en línea o hacer una reserva. También puede seguirlos en las redes sociales para obtener las últimas actualizaciones y promociones. ¡No pierdas esta oportunidad de probar algunas de las mejores hamburguesas de la ciudad! ¡Visita Burger House Bakú hoy! </p>
65
- <h2>Preguntas frecuentes</h2>
66
- <ul>
67
- <li>Q: ¿Cómo puedo contactar con Burger House Baku? </li>
68
- <li>A: Puede ponerse en contacto con Burger House Bakú por teléfono al +994 12 555 55 55 o por correo electrónico a [email protected]. </li>
69
- <li>Q: ¿Burger House Bakú ofrece entrega o comida para llevar? </li>
70
- <li>A: Sí, Burger House Bakú ofrece opciones de entrega y comida para llevar. Usted puede ordenar en línea a través de su sitio web o por teléfono, y tener su comida entregada a su casa u oficina. También puede recoger su comida de su restaurante. </li>
71
- <li>Q: ¿Burger House Bakú tiene ofertas especiales o descuentos? </li>
72
- <li>A: Sí, Burger House Bakú tiene algunas ofertas especiales y descuentos para sus clientes. Por ejemplo, puede obtener una bebida gratis con cualquier pedido de hamburguesas los lunes, o obtener un descuento del 10% en su factura si muestra su identificación de estudiante los martes. También puede unirse a su programa de lealtad y obtener puntos por cada compra que puede canjear por alimentos o regalos gratis. </li>
73
- <li>Q: ¿Burger House Baku abastece para eventos o fiestas? </li>
74
- <li>A: Sí, Burger House Bakú atiende a eventos o fiestas de cualquier tamaño y ocasión. Puedes elegir entre su menú de catering o personalizar tu propio menú según tus necesidades y preferencias. También puede reservar su restaurante para eventos privados o fiestas. </li>
75
- <li>P: ¿Burger House Bakú tiene opciones vegetarianas o veganas? </li>
76
-
77
- </ul></p> 64aa2da5cf<br />
78
- <br />
79
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BestteaLib/README/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: README
3
- emoji: 🏢
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Edit this `README.md` markdown file to author your organization card.
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/lib/utils/share.ts DELETED
@@ -1,7 +0,0 @@
1
- export function share(url: string, title: string) {
2
- if (navigator.share) {
3
- navigator.share({ url, title });
4
- } else {
5
- prompt("Copy this public url to share:", url);
6
- }
7
- }
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/tags.py DELETED
@@ -1,487 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- import logging
6
- import platform
7
- import sys
8
- import sysconfig
9
- from importlib.machinery import EXTENSION_SUFFIXES
10
- from typing import (
11
- Dict,
12
- FrozenSet,
13
- Iterable,
14
- Iterator,
15
- List,
16
- Optional,
17
- Sequence,
18
- Tuple,
19
- Union,
20
- cast,
21
- )
22
-
23
- from . import _manylinux, _musllinux
24
-
25
- logger = logging.getLogger(__name__)
26
-
27
- PythonVersion = Sequence[int]
28
- MacVersion = Tuple[int, int]
29
-
30
- INTERPRETER_SHORT_NAMES: Dict[str, str] = {
31
- "python": "py", # Generic.
32
- "cpython": "cp",
33
- "pypy": "pp",
34
- "ironpython": "ip",
35
- "jython": "jy",
36
- }
37
-
38
-
39
- _32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
40
-
41
-
42
- class Tag:
43
- """
44
- A representation of the tag triple for a wheel.
45
-
46
- Instances are considered immutable and thus are hashable. Equality checking
47
- is also supported.
48
- """
49
-
50
- __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
51
-
52
- def __init__(self, interpreter: str, abi: str, platform: str) -> None:
53
- self._interpreter = interpreter.lower()
54
- self._abi = abi.lower()
55
- self._platform = platform.lower()
56
- # The __hash__ of every single element in a Set[Tag] will be evaluated each time
57
- # that a set calls its `.disjoint()` method, which may be called hundreds of
58
- # times when scanning a page of links for packages with tags matching that
59
- # Set[Tag]. Pre-computing the value here produces significant speedups for
60
- # downstream consumers.
61
- self._hash = hash((self._interpreter, self._abi, self._platform))
62
-
63
- @property
64
- def interpreter(self) -> str:
65
- return self._interpreter
66
-
67
- @property
68
- def abi(self) -> str:
69
- return self._abi
70
-
71
- @property
72
- def platform(self) -> str:
73
- return self._platform
74
-
75
- def __eq__(self, other: object) -> bool:
76
- if not isinstance(other, Tag):
77
- return NotImplemented
78
-
79
- return (
80
- (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
81
- and (self._platform == other._platform)
82
- and (self._abi == other._abi)
83
- and (self._interpreter == other._interpreter)
84
- )
85
-
86
- def __hash__(self) -> int:
87
- return self._hash
88
-
89
- def __str__(self) -> str:
90
- return f"{self._interpreter}-{self._abi}-{self._platform}"
91
-
92
- def __repr__(self) -> str:
93
- return f"<{self} @ {id(self)}>"
94
-
95
-
96
- def parse_tag(tag: str) -> FrozenSet[Tag]:
97
- """
98
- Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
99
-
100
- Returning a set is required due to the possibility that the tag is a
101
- compressed tag set.
102
- """
103
- tags = set()
104
- interpreters, abis, platforms = tag.split("-")
105
- for interpreter in interpreters.split("."):
106
- for abi in abis.split("."):
107
- for platform_ in platforms.split("."):
108
- tags.add(Tag(interpreter, abi, platform_))
109
- return frozenset(tags)
110
-
111
-
112
- def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
113
- value = sysconfig.get_config_var(name)
114
- if value is None and warn:
115
- logger.debug(
116
- "Config variable '%s' is unset, Python ABI tag may be incorrect", name
117
- )
118
- return value
119
-
120
-
121
- def _normalize_string(string: str) -> str:
122
- return string.replace(".", "_").replace("-", "_")
123
-
124
-
125
- def _abi3_applies(python_version: PythonVersion) -> bool:
126
- """
127
- Determine if the Python version supports abi3.
128
-
129
- PEP 384 was first implemented in Python 3.2.
130
- """
131
- return len(python_version) > 1 and tuple(python_version) >= (3, 2)
132
-
133
-
134
- def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
135
- py_version = tuple(py_version) # To allow for version comparison.
136
- abis = []
137
- version = _version_nodot(py_version[:2])
138
- debug = pymalloc = ucs4 = ""
139
- with_debug = _get_config_var("Py_DEBUG", warn)
140
- has_refcount = hasattr(sys, "gettotalrefcount")
141
- # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
142
- # extension modules is the best option.
143
- # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
144
- has_ext = "_d.pyd" in EXTENSION_SUFFIXES
145
- if with_debug or (with_debug is None and (has_refcount or has_ext)):
146
- debug = "d"
147
- if py_version < (3, 8):
148
- with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
149
- if with_pymalloc or with_pymalloc is None:
150
- pymalloc = "m"
151
- if py_version < (3, 3):
152
- unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
153
- if unicode_size == 4 or (
154
- unicode_size is None and sys.maxunicode == 0x10FFFF
155
- ):
156
- ucs4 = "u"
157
- elif debug:
158
- # Debug builds can also load "normal" extension modules.
159
- # We can also assume no UCS-4 or pymalloc requirement.
160
- abis.append(f"cp{version}")
161
- abis.insert(
162
- 0,
163
- "cp{version}{debug}{pymalloc}{ucs4}".format(
164
- version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
165
- ),
166
- )
167
- return abis
168
-
169
-
170
- def cpython_tags(
171
- python_version: Optional[PythonVersion] = None,
172
- abis: Optional[Iterable[str]] = None,
173
- platforms: Optional[Iterable[str]] = None,
174
- *,
175
- warn: bool = False,
176
- ) -> Iterator[Tag]:
177
- """
178
- Yields the tags for a CPython interpreter.
179
-
180
- The tags consist of:
181
- - cp<python_version>-<abi>-<platform>
182
- - cp<python_version>-abi3-<platform>
183
- - cp<python_version>-none-<platform>
184
- - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
185
-
186
- If python_version only specifies a major version then user-provided ABIs and
187
- the 'none' ABItag will be used.
188
-
189
- If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
190
- their normal position and not at the beginning.
191
- """
192
- if not python_version:
193
- python_version = sys.version_info[:2]
194
-
195
- interpreter = f"cp{_version_nodot(python_version[:2])}"
196
-
197
- if abis is None:
198
- if len(python_version) > 1:
199
- abis = _cpython_abis(python_version, warn)
200
- else:
201
- abis = []
202
- abis = list(abis)
203
- # 'abi3' and 'none' are explicitly handled later.
204
- for explicit_abi in ("abi3", "none"):
205
- try:
206
- abis.remove(explicit_abi)
207
- except ValueError:
208
- pass
209
-
210
- platforms = list(platforms or platform_tags())
211
- for abi in abis:
212
- for platform_ in platforms:
213
- yield Tag(interpreter, abi, platform_)
214
- if _abi3_applies(python_version):
215
- yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
216
- yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
217
-
218
- if _abi3_applies(python_version):
219
- for minor_version in range(python_version[1] - 1, 1, -1):
220
- for platform_ in platforms:
221
- interpreter = "cp{version}".format(
222
- version=_version_nodot((python_version[0], minor_version))
223
- )
224
- yield Tag(interpreter, "abi3", platform_)
225
-
226
-
227
- def _generic_abi() -> Iterator[str]:
228
- abi = sysconfig.get_config_var("SOABI")
229
- if abi:
230
- yield _normalize_string(abi)
231
-
232
-
233
- def generic_tags(
234
- interpreter: Optional[str] = None,
235
- abis: Optional[Iterable[str]] = None,
236
- platforms: Optional[Iterable[str]] = None,
237
- *,
238
- warn: bool = False,
239
- ) -> Iterator[Tag]:
240
- """
241
- Yields the tags for a generic interpreter.
242
-
243
- The tags consist of:
244
- - <interpreter>-<abi>-<platform>
245
-
246
- The "none" ABI will be added if it was not explicitly provided.
247
- """
248
- if not interpreter:
249
- interp_name = interpreter_name()
250
- interp_version = interpreter_version(warn=warn)
251
- interpreter = "".join([interp_name, interp_version])
252
- if abis is None:
253
- abis = _generic_abi()
254
- platforms = list(platforms or platform_tags())
255
- abis = list(abis)
256
- if "none" not in abis:
257
- abis.append("none")
258
- for abi in abis:
259
- for platform_ in platforms:
260
- yield Tag(interpreter, abi, platform_)
261
-
262
-
263
- def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
264
- """
265
- Yields Python versions in descending order.
266
-
267
- After the latest version, the major-only version will be yielded, and then
268
- all previous versions of that major version.
269
- """
270
- if len(py_version) > 1:
271
- yield f"py{_version_nodot(py_version[:2])}"
272
- yield f"py{py_version[0]}"
273
- if len(py_version) > 1:
274
- for minor in range(py_version[1] - 1, -1, -1):
275
- yield f"py{_version_nodot((py_version[0], minor))}"
276
-
277
-
278
- def compatible_tags(
279
- python_version: Optional[PythonVersion] = None,
280
- interpreter: Optional[str] = None,
281
- platforms: Optional[Iterable[str]] = None,
282
- ) -> Iterator[Tag]:
283
- """
284
- Yields the sequence of tags that are compatible with a specific version of Python.
285
-
286
- The tags consist of:
287
- - py*-none-<platform>
288
- - <interpreter>-none-any # ... if `interpreter` is provided.
289
- - py*-none-any
290
- """
291
- if not python_version:
292
- python_version = sys.version_info[:2]
293
- platforms = list(platforms or platform_tags())
294
- for version in _py_interpreter_range(python_version):
295
- for platform_ in platforms:
296
- yield Tag(version, "none", platform_)
297
- if interpreter:
298
- yield Tag(interpreter, "none", "any")
299
- for version in _py_interpreter_range(python_version):
300
- yield Tag(version, "none", "any")
301
-
302
-
303
- def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
304
- if not is_32bit:
305
- return arch
306
-
307
- if arch.startswith("ppc"):
308
- return "ppc"
309
-
310
- return "i386"
311
-
312
-
313
- def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
314
- formats = [cpu_arch]
315
- if cpu_arch == "x86_64":
316
- if version < (10, 4):
317
- return []
318
- formats.extend(["intel", "fat64", "fat32"])
319
-
320
- elif cpu_arch == "i386":
321
- if version < (10, 4):
322
- return []
323
- formats.extend(["intel", "fat32", "fat"])
324
-
325
- elif cpu_arch == "ppc64":
326
- # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
327
- if version > (10, 5) or version < (10, 4):
328
- return []
329
- formats.append("fat64")
330
-
331
- elif cpu_arch == "ppc":
332
- if version > (10, 6):
333
- return []
334
- formats.extend(["fat32", "fat"])
335
-
336
- if cpu_arch in {"arm64", "x86_64"}:
337
- formats.append("universal2")
338
-
339
- if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
340
- formats.append("universal")
341
-
342
- return formats
343
-
344
-
345
- def mac_platforms(
346
- version: Optional[MacVersion] = None, arch: Optional[str] = None
347
- ) -> Iterator[str]:
348
- """
349
- Yields the platform tags for a macOS system.
350
-
351
- The `version` parameter is a two-item tuple specifying the macOS version to
352
- generate platform tags for. The `arch` parameter is the CPU architecture to
353
- generate platform tags for. Both parameters default to the appropriate value
354
- for the current system.
355
- """
356
- version_str, _, cpu_arch = platform.mac_ver()
357
- if version is None:
358
- version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
359
- else:
360
- version = version
361
- if arch is None:
362
- arch = _mac_arch(cpu_arch)
363
- else:
364
- arch = arch
365
-
366
- if (10, 0) <= version and version < (11, 0):
367
- # Prior to Mac OS 11, each yearly release of Mac OS bumped the
368
- # "minor" version number. The major version was always 10.
369
- for minor_version in range(version[1], -1, -1):
370
- compat_version = 10, minor_version
371
- binary_formats = _mac_binary_formats(compat_version, arch)
372
- for binary_format in binary_formats:
373
- yield "macosx_{major}_{minor}_{binary_format}".format(
374
- major=10, minor=minor_version, binary_format=binary_format
375
- )
376
-
377
- if version >= (11, 0):
378
- # Starting with Mac OS 11, each yearly release bumps the major version
379
- # number. The minor versions are now the midyear updates.
380
- for major_version in range(version[0], 10, -1):
381
- compat_version = major_version, 0
382
- binary_formats = _mac_binary_formats(compat_version, arch)
383
- for binary_format in binary_formats:
384
- yield "macosx_{major}_{minor}_{binary_format}".format(
385
- major=major_version, minor=0, binary_format=binary_format
386
- )
387
-
388
- if version >= (11, 0):
389
- # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
390
- # Arm64 support was introduced in 11.0, so no Arm binaries from previous
391
- # releases exist.
392
- #
393
- # However, the "universal2" binary format can have a
394
- # macOS version earlier than 11.0 when the x86_64 part of the binary supports
395
- # that version of macOS.
396
- if arch == "x86_64":
397
- for minor_version in range(16, 3, -1):
398
- compat_version = 10, minor_version
399
- binary_formats = _mac_binary_formats(compat_version, arch)
400
- for binary_format in binary_formats:
401
- yield "macosx_{major}_{minor}_{binary_format}".format(
402
- major=compat_version[0],
403
- minor=compat_version[1],
404
- binary_format=binary_format,
405
- )
406
- else:
407
- for minor_version in range(16, 3, -1):
408
- compat_version = 10, minor_version
409
- binary_format = "universal2"
410
- yield "macosx_{major}_{minor}_{binary_format}".format(
411
- major=compat_version[0],
412
- minor=compat_version[1],
413
- binary_format=binary_format,
414
- )
415
-
416
-
417
- def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
418
- linux = _normalize_string(sysconfig.get_platform())
419
- if is_32bit:
420
- if linux == "linux_x86_64":
421
- linux = "linux_i686"
422
- elif linux == "linux_aarch64":
423
- linux = "linux_armv7l"
424
- _, arch = linux.split("_", 1)
425
- yield from _manylinux.platform_tags(linux, arch)
426
- yield from _musllinux.platform_tags(arch)
427
- yield linux
428
-
429
-
430
- def _generic_platforms() -> Iterator[str]:
431
- yield _normalize_string(sysconfig.get_platform())
432
-
433
-
434
- def platform_tags() -> Iterator[str]:
435
- """
436
- Provides the platform tags for this installation.
437
- """
438
- if platform.system() == "Darwin":
439
- return mac_platforms()
440
- elif platform.system() == "Linux":
441
- return _linux_platforms()
442
- else:
443
- return _generic_platforms()
444
-
445
-
446
- def interpreter_name() -> str:
447
- """
448
- Returns the name of the running interpreter.
449
- """
450
- name = sys.implementation.name
451
- return INTERPRETER_SHORT_NAMES.get(name) or name
452
-
453
-
454
- def interpreter_version(*, warn: bool = False) -> str:
455
- """
456
- Returns the version of the running interpreter.
457
- """
458
- version = _get_config_var("py_version_nodot", warn=warn)
459
- if version:
460
- version = str(version)
461
- else:
462
- version = _version_nodot(sys.version_info[:2])
463
- return version
464
-
465
-
466
- def _version_nodot(version: PythonVersion) -> str:
467
- return "".join(map(str, version))
468
-
469
-
470
- def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
471
- """
472
- Returns the sequence of tag triples for the running interpreter.
473
-
474
- The order of the sequence corresponds to priority order for the
475
- interpreter, from most to least important.
476
- """
477
-
478
- interp_name = interpreter_name()
479
- if interp_name == "cp":
480
- yield from cpython_tags(warn=warn)
481
- else:
482
- yield from generic_tags()
483
-
484
- if interp_name == "pp":
485
- yield from compatible_tags(interpreter="pp3")
486
- else:
487
- yield from compatible_tags()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/datasets/datasets/__init__.py DELETED
File without changes
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/demo/demo.py DELETED
@@ -1,159 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import argparse
3
- import glob
4
- import multiprocessing as mp
5
- import os
6
- import time
7
- import cv2
8
- import tqdm
9
-
10
- from detectron2.config import get_cfg
11
- from detectron2.data.detection_utils import read_image
12
- from detectron2.utils.logger import setup_logger
13
-
14
- from predictor import VisualizationDemo
15
-
16
- # constants
17
- WINDOW_NAME = "COCO detections"
18
-
19
-
20
- def setup_cfg(args):
21
- # load config from file and command-line arguments
22
- cfg = get_cfg()
23
- cfg.merge_from_file(args.config_file)
24
- cfg.merge_from_list(args.opts)
25
- # Set score_threshold for builtin models
26
- cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
27
- cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
28
- cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
29
- cfg.freeze()
30
- return cfg
31
-
32
-
33
- def get_parser():
34
- parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
35
- parser.add_argument(
36
- "--config-file",
37
- default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
38
- metavar="FILE",
39
- help="path to config file",
40
- )
41
- parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
42
- parser.add_argument("--video-input", help="Path to video file.")
43
- parser.add_argument(
44
- "--input",
45
- nargs="+",
46
- help="A list of space separated input images; "
47
- "or a single glob pattern such as 'directory/*.jpg'",
48
- )
49
- parser.add_argument(
50
- "--output",
51
- help="A file or directory to save output visualizations. "
52
- "If not given, will show output in an OpenCV window.",
53
- )
54
-
55
- parser.add_argument(
56
- "--confidence-threshold",
57
- type=float,
58
- default=0.5,
59
- help="Minimum score for instance predictions to be shown",
60
- )
61
- parser.add_argument(
62
- "--opts",
63
- help="Modify config options using the command-line 'KEY VALUE' pairs",
64
- default=[],
65
- nargs=argparse.REMAINDER,
66
- )
67
- return parser
68
-
69
-
70
- if __name__ == "__main__":
71
- mp.set_start_method("spawn", force=True)
72
- args = get_parser().parse_args()
73
- setup_logger(name="fvcore")
74
- logger = setup_logger()
75
- logger.info("Arguments: " + str(args))
76
-
77
- cfg = setup_cfg(args)
78
-
79
- demo = VisualizationDemo(cfg)
80
-
81
- if args.input:
82
- if len(args.input) == 1:
83
- args.input = glob.glob(os.path.expanduser(args.input[0]))
84
- assert args.input, "The input path(s) was not found"
85
- for path in tqdm.tqdm(args.input, disable=not args.output):
86
- # use PIL, to be consistent with evaluation
87
- img = read_image(path, format="BGR")
88
- start_time = time.time()
89
- predictions, visualized_output = demo.run_on_image(img)
90
- logger.info(
91
- "{}: {} in {:.2f}s".format(
92
- path,
93
- "detected {} instances".format(len(predictions["instances"]))
94
- if "instances" in predictions
95
- else "finished",
96
- time.time() - start_time,
97
- )
98
- )
99
-
100
- if args.output:
101
- if os.path.isdir(args.output):
102
- assert os.path.isdir(args.output), args.output
103
- out_filename = os.path.join(args.output, os.path.basename(path))
104
- else:
105
- assert len(args.input) == 1, "Please specify a directory with args.output"
106
- out_filename = args.output
107
- visualized_output.save(out_filename)
108
- else:
109
- cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
110
- cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
111
- if cv2.waitKey(0) == 27:
112
- break # esc to quit
113
- elif args.webcam:
114
- assert args.input is None, "Cannot have both --input and --webcam!"
115
- cam = cv2.VideoCapture(0)
116
- for vis in tqdm.tqdm(demo.run_on_video(cam)):
117
- cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
118
- cv2.imshow(WINDOW_NAME, vis)
119
- if cv2.waitKey(1) == 27:
120
- break # esc to quit
121
- cv2.destroyAllWindows()
122
- elif args.video_input:
123
- video = cv2.VideoCapture(args.video_input)
124
- width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
125
- height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
126
- frames_per_second = video.get(cv2.CAP_PROP_FPS)
127
- num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
128
- basename = os.path.basename(args.video_input)
129
-
130
- if args.output:
131
- if os.path.isdir(args.output):
132
- output_fname = os.path.join(args.output, basename)
133
- output_fname = os.path.splitext(output_fname)[0] + ".mkv"
134
- else:
135
- output_fname = args.output
136
- assert not os.path.isfile(output_fname), output_fname
137
- output_file = cv2.VideoWriter(
138
- filename=output_fname,
139
- # some installation of opencv may not support x264 (due to its license),
140
- # you can try other format (e.g. MPEG)
141
- fourcc=cv2.VideoWriter_fourcc(*"x264"),
142
- fps=float(frames_per_second),
143
- frameSize=(width, height),
144
- isColor=True,
145
- )
146
- assert os.path.isfile(args.video_input)
147
- for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
148
- if args.output:
149
- output_file.write(vis_frame)
150
- else:
151
- cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
152
- cv2.imshow(basename, vis_frame)
153
- if cv2.waitKey(1) == 27:
154
- break # esc to quit
155
- video.release()
156
- if args.output:
157
- output_file.release()
158
- else:
159
- cv2.destroyAllWindows()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/CONTRIBUTING.md DELETED
@@ -1,490 +0,0 @@
1
- # Table of Contents
2
-
3
- 1. [Contributing to Thrust](#contributing-to-thrust)
4
- 1. [CMake Options](#cmake-options)
5
- 1. [Development Model](#development-model)
6
-
7
- # Contributing to Thrust
8
-
9
- Thrust uses Github to manage all open-source development, including bug
10
- tracking, pull requests, and design discussions. This document details how to get
11
- started as a Thrust contributor.
12
-
13
- An overview of this process is:
14
-
15
- 1. [Clone the Thrust repository](#clone-the-thrust-repository)
16
- 1. [Setup a fork of Thrust](#setup-a-fork-of-thrust)
17
- 1. [Setup your environment](#setup-your-environment)
18
- 1. [Create a development branch](#create-a-development-branch)
19
- 1. [Local development loop](#local-development-loop)
20
- 1. [Push development branch to your fork](#push-development-branch-to-your-fork)
21
- 1. [Create pull request](#create-pull-request)
22
- 1. [Address feedback and update pull request](#address-feedback-and-update-pull-request)
23
- 1. [When your PR is approved...](#when-your-pr-is-approved)
24
-
25
- ## Clone the Thrust Repository
26
-
27
- To get started, clone the main repository to your local computer. Thrust should
28
- be cloned recursively to setup the CUB submodule (required for `CUDA`
29
- acceleration).
30
-
31
- ```
32
- git clone --recursive https://github.com/thrust/thrust.git
33
- cd thrust
34
- ```
35
-
36
- ## Setup a Fork of Thrust
37
-
38
- You'll need a fork of Thrust on Github to create a pull request. To setup your
39
- fork:
40
-
41
- 1. Create a Github account (if needed)
42
- 2. Go to [the Thrust Github page](https://github.com/thrust/thrust)
43
- 3. Click "Fork" and follow any prompts that appear.
44
-
45
- Once your fork is created, setup a new remote repo in your local Thrust clone:
46
-
47
- ```
48
- git remote add github-fork [email protected]:<GITHUB_USERNAME>/thrust.git
49
- ```
50
-
51
- If you need to modify CUB, too, go to
52
- [the CUB Github page](https://github.com/thrust/cub) and repeat this process.
53
- Create CUB's `github-fork` remote in the `thrust/dependencies/cub` submodule.
54
-
55
- ## Setup Your Environment
56
-
57
- ### Git Environment
58
-
59
- If you haven't already, this is a good time to tell git who you are. This
60
- information is used to fill out authorship information on your git commits.
61
-
62
- ```
63
- git config --global user.name "John Doe"
64
- git config --global user.email [email protected]
65
- ```
66
-
67
- ### Configure CMake builds
68
-
69
- Thrust uses [CMake](https://www.cmake.org) for its developer build system. To
70
- configure, build, and test your checkout of Thrust:
71
-
72
- ```
73
- # Create build directory:
74
- mkdir build
75
- cd build
76
-
77
- # Configure -- use one of the following:
78
- cmake .. # Command line interface.
79
- ccmake .. # ncurses GUI (Linux only)
80
- cmake-gui # Graphical UI, set source/build directories in the app
81
-
82
- # Build:
83
- cmake --build . -j <num jobs> # invokes make (or ninja, etc)
84
-
85
- # Run tests and examples:
86
- ctest
87
- ```
88
-
89
- See [CMake Options](#cmake-options) for details on customizing the build.
90
-
91
- ## Create a Development Branch
92
-
93
- All work should be done in a development branch (also called a "topic branch")
94
- and not directly in the `master` branch. This makes it easier to manage multiple
95
- in-progress patches at once, and provides a descriptive label for your patch
96
- as it passes through the review system.
97
-
98
- To create a new branch based on the current `master`:
99
-
100
- ```
101
- # Checkout local master branch:
102
- cd /path/to/thrust/sources
103
- git checkout master
104
-
105
- # Sync local master branch with github:
106
- git pull
107
-
108
- # Create a new branch named `my_descriptive_branch_name` based on master:
109
- git checkout -b my_descriptive_branch_name
110
-
111
- # Verify that the branch has been created and is currently checked out:
112
- git branch
113
- ```
114
-
115
- Thrust branch names should follow a particular pattern:
116
-
117
- - For new features, name the branch `feature/<name>`
118
- - For bugfixes associated with a github issue, use `bug/github/<bug-description>-<bug-id>`
119
- - Internal nvidia and gitlab bugs should use `nvidia` or `gitlab` in place of
120
- `github`.
121
-
122
- If you plan to work on CUB as part of your patch, repeat this process in the
123
- `thrust/dependencies/cub` submodule.
124
-
125
- ## Local Development Loop
126
-
127
- ### Edit, Build, Test, Repeat
128
-
129
- Once the topic branch is created, you're all set to start working on Thrust
130
- code. Make some changes, then build and test them:
131
-
132
- ```
133
- # Implement changes:
134
- cd /path/to/thrust/sources
135
- emacs thrust/some_file.h # or whatever editor you prefer
136
-
137
- # Create / update a unit test for your changes:
138
- emacs testing/some_test.cu
139
-
140
- # Check that everything builds and tests pass:
141
- cd /path/to/thrust/build/directory
142
- cmake --build . -j <num jobs>
143
- ctest
144
- ```
145
-
146
- ### Creating a Commit
147
-
148
- Once you're satisfied with your patch, commit your changes:
149
-
150
- #### Thrust-only Changes
151
-
152
- ```
153
- # Manually add changed files and create a commit:
154
- cd /path/to/thrust
155
- git add thrust/some_file.h
156
- git add testing/some_test.cu
157
- git commit
158
-
159
- # Or, if possible, use git-gui to review your changes while building your patch:
160
- git gui
161
- ```
162
-
163
- #### Thrust and CUB Changes
164
-
165
- ```
166
- # Create CUB patch first:
167
- cd /path/to/thrust/dependencies/cub
168
- # Manually add changed files and create a commit:
169
- git add cub/some_file.cuh
170
- git commit
171
-
172
- # Create Thrust patch, including submodule update:
173
- cd /path/to/thrust/
174
- git add dependencies/cub # Updates submodule info
175
- git add thrust/some_file.h
176
- git add testing/some_test.cu
177
- git commit
178
-
179
- # Or, if possible, use git-gui to review your changes while building your patch:
180
- cd /path/to/thrust/dependencies/cub
181
- git gui
182
- cd /path/to/thrust
183
- git gui # Include dependencies/cub as part of your commit
184
-
185
- ```
186
-
187
- #### Writing a Commit Message
188
-
189
- Your commit message will communicate the purpose and rationale behind your
190
- patch to other developers, and will be used to populate the initial description
191
- of your Github pull request.
192
-
193
- When writing a commit message, the following standard format should be used,
194
- since tools in the git ecosystem are designed to parse this correctly:
195
-
196
- ```
197
- First line of commit message is a short summary (<80 char)
198
- <Second line left blank>
199
- Detailed description of change begins on third line. This portion can
200
- span multiple lines, try to manually wrap them at something reasonable.
201
-
202
- Blank lines can be used to separate multiple paragraphs in the description.
203
-
204
- If your patch is associated with another pull request or issue in the main
205
- Thrust repository, you should reference it with a `#` symbol, e.g.
206
- #1023 for issue 1023.
207
-
208
- For issues / pull requests in a different github repo, reference them using
209
- the full syntax, e.g. thrust/cub#4 for issue 4 in the thrust/cub repo.
210
-
211
- Markdown is recommended for formatting more detailed messages, as these will
212
- be nicely rendered on Github, etc.
213
- ```
214
-
215
- ## Push Development Branch to your Fork
216
-
217
- Once you've committed your changes to a local development branch, it's time to
218
- push them to your fork:
219
-
220
- ```
221
- cd /path/to/thrust/checkout
222
- git checkout my_descriptive_branch_name # if not already checked out
223
- git push --set-upstream github-fork my_descriptive_branch_name
224
- ```
225
-
226
- `--set-upstream github-fork` tells git that future pushes/pulls on this branch
227
- should target your `github-fork` remote by default.
228
-
229
- If have CUB changes to commit as part of your patch, repeat this process in the
230
- `thrust/dependencies/cub` submodule.
231
-
232
- ## Create Pull Request
233
-
234
- To create a pull request for your freshly pushed branch, open your github fork
235
- in a browser by going to `https://www.github.com/<GITHUB_USERNAME>/thrust`. A
236
- prompt may automatically appear asking you to create a pull request if you've
237
- recently pushed a branch.
238
-
239
- If there's no prompt, go to "Code" > "Branches" and click the appropriate
240
- "New pull request" button for your branch.
241
-
242
- If you would like a specific developer to review your patch, feel free to
243
- request them as a reviewer at this time.
244
-
245
- The Thrust team will review your patch, test it on NVIDIA's internal CI, and
246
- provide feedback.
247
-
248
-
249
- If have CUB changes to commit as part of your patch, repeat this process with
250
- your CUB branch and fork.
251
-
252
- ## Address Feedback and Update Pull Request
253
-
254
- If the reviewers request changes to your patch, use the following process to
255
- update the pull request:
256
-
257
- ```
258
- # Make changes:
259
- cd /path/to/thrust/sources
260
- git checkout my_descriptive_branch_name
261
- emacs thrust/some_file.h
262
- emacs testing/some_test.cu
263
-
264
- # Build + test
265
- cd /path/to/thrust/build/directory
266
- cmake --build . -j <num jobs>
267
- ctest
268
-
269
- # Amend commit:
270
- cd /path/to/thrust/sources
271
- git add thrust/some_file.h
272
- git add testing/some_test.cu
273
- git commit --amend
274
- # Or
275
- git gui # Check the "Amend Last Commit" box
276
-
277
- # Update the branch on your fork:
278
- git push -f
279
- ```
280
-
281
- At this point, the pull request should show your recent changes.
282
-
283
- If have CUB changes to commit as part of your patch, repeat this process in the
284
- `thrust/dependencies/cub` submodule, and be sure to include any CUB submodule
285
- updates as part of your commit.
286
-
287
- ## When Your PR is Approved
288
-
289
- Once your pull request is approved by the Thrust team, no further action is
290
- needed from you. We will handle integrating it since we must coordinate changes
291
- to `master` with NVIDIA's internal perforce repository.
292
-
293
- # CMake Options
294
-
295
- A Thrust build is configured using CMake options. These may be passed to CMake
296
- using
297
-
298
- ```
299
- cmake -D<option_name>=<value> /path/to/thrust/sources
300
- ```
301
-
302
- or configured interactively with the `ccmake` or `cmake-gui` interfaces.
303
-
304
- Thrust supports two build modes. By default, a single configuration is built
305
- that targets a specific host system, device system, and C++ dialect.
306
- When `THRUST_ENABLE_MULTICONFIG` is `ON`, multiple configurations
307
- targeting a variety of systems and dialects are generated.
308
-
309
- The CMake options are divided into these categories:
310
-
311
- 1. [Generic CMake Options](#generic-cmake-options): Options applicable to all
312
- Thrust builds.
313
- 1. [Single Config CMake Options](#single-config-cmake-options) Options
314
- applicable only when `THRUST_ENABLE_MULTICONFIG` is disabled.
315
- 1. [Multi Config CMake Options](#multi-config-cmake-options) Options applicable
316
- only when `THRUST_ENABLE_MULTICONFIG` is enabled.
317
- 1. [CUDA Specific CMake Options](#cuda-specific-cmake-options) Options that
318
- control CUDA compilation. Only available when one or more configurations
319
- targets the CUDA system.
320
- 1. [TBB Specific CMake Options](#tbb-specific-cmake-options) Options that
321
- control TBB compilation. Only available when one or more configurations
322
- targets the TBB system.
323
-
324
- ## Generic CMake Options
325
-
326
- - `CMAKE_BUILD_TYPE={Release, Debug, RelWithDebInfo, MinSizeRel}`
327
- - Standard CMake build option. Default: `RelWithDebInfo`
328
- - `THRUST_ENABLE_HEADER_TESTING={ON, OFF}`
329
- - Whether to test compile public headers. Default is `ON`.
330
- - `THRUST_ENABLE_TESTING={ON, OFF}`
331
- - Whether to build unit tests. Default is `ON`.
332
- - `THRUST_ENABLE_EXAMPLES={ON, OFF}`
333
- - Whether to build examples. Default is `ON`.
334
- - `THRUST_ENABLE_MULTICONFIG={ON, OFF}`
335
- - Toggles single-config and multi-config modes. Default is `OFF` (single config).
336
- - `THRUST_ENABLE_EXAMPLE_FILECHECK={ON, OFF}`
337
- - Enable validation of example outputs using the LLVM FileCheck utility.
338
- Default is `OFF`.
339
-
340
- ## Single Config CMake Options
341
-
342
- - `THRUST_HOST_SYSTEM={CPP, TBB, OMP}`
343
- - Selects the host system. Default: `CPP`
344
- - `THRUST_DEVICE_SYSTEM={CUDA, TBB, OMP, CPP}`
345
- - Selects the device system. Default: `CUDA`
346
- - `THRUST_CPP_DIALECT={11, 14, 17}`
347
- - Selects the C++ standard dialect to use. Default is `14` (C++14).
348
-
349
- ## Multi Config CMake Options
350
-
351
- - `THRUST_MULTICONFIG_ENABLE_DIALECT_CPPXX={ON, OFF}`
352
- - Toggle whether a specific C++ dialect will be targeted.
353
- - Possible values of `XX` are `{11, 14, 17}`.
354
- - By default, only C++14 is enabled.
355
- - `THRUST_MULTICONFIG_ENABLE_SYSTEM_XXXX={ON, OFF}`
356
- - Toggle whether a specific system will be targeted.
357
- - Possible values of `XXXX` are `{CPP, CUDA, TBB, OMP}`
358
- - By default, only `CPP` and `CUDA` are enabled.
359
- - `THRUST_MULTICONFIG_WORKLOAD={SMALL, MEDIUM, LARGE, FULL}`
360
- - Restricts the host/device combinations that will be targeted.
361
- - By default, the `SMALL` workload is used.
362
- - The full cross product of `host x device` systems results in 12
363
- configurations, some of which are more important than others.
364
- This option can be used to prune some of the less important ones.
365
- - `SMALL`: (3 configs) Minimal coverage and validation of each device system against the `CPP` host.
366
- - `MEDIUM`: (6 configs) Cheap extended coverage.
367
- - `LARGE`: (8 configs) Expensive extended coverage. Includes all useful build configurations.
368
- - `FULL`: (12 configs) The complete cross product of all possible build configurations.
369
-
370
- | Config | Workloads | Value | Expense | Note |
371
- |----------|-----------|------------|-----------|------------------------------|
372
- | CPP/CUDA | `F L M S` | Essential | Expensive | Validates CUDA against CPP |
373
- | CPP/OMP | `F L M S` | Essential | Cheap | Validates OMP against CPP |
374
- | CPP/TBB | `F L M S` | Essential | Cheap | Validates TBB against CPP |
375
- | CPP/CPP | `F L M ` | Important | Cheap | Tests CPP as device |
376
- | OMP/OMP | `F L M ` | Important | Cheap | Tests OMP as host |
377
- | TBB/TBB | `F L M ` | Important | Cheap | Tests TBB as host |
378
- | TBB/CUDA | `F L ` | Important | Expensive | Validates TBB/CUDA interop |
379
- | OMP/CUDA | `F L ` | Important | Expensive | Validates OMP/CUDA interop |
380
- | TBB/OMP | `F ` | Not useful | Cheap | Mixes CPU-parallel systems |
381
- | OMP/TBB | `F ` | Not useful | Cheap | Mixes CPU-parallel systems |
382
- | TBB/CPP | `F ` | Not Useful | Cheap | Parallel host, serial device |
383
- | OMP/CPP | `F ` | Not Useful | Cheap | Parallel host, serial device |
384
-
385
- ## CUDA Specific CMake Options
386
-
387
- - `THRUST_INCLUDE_CUB_CMAKE={ON, OFF}`
388
- - If enabled, the CUB project will be built as part of Thrust. Default is
389
- `OFF`.
390
- - This adds CUB tests, etc. Useful for working on both CUB and Thrust
391
- simultaneously.
392
- - CUB configurations will be generated for each C++ dialect targeted by
393
- the current Thrust build.
394
- - `THRUST_ENABLE_COMPUTE_XX={ON, OFF}`
395
- - Controls the targeted CUDA architecture(s)
396
- - Multiple options may be selected when using NVCC as the CUDA compiler.
397
- - Valid values of `XX` are:
398
- `{35, 37, 50, 52, 53, 60, 61, 62, 70, 72, 75, 80}`
399
- - Default value depends on `THRUST_DISABLE_ARCH_BY_DEFAULT`:
400
- - `THRUST_ENABLE_COMPUTE_FUTURE={ON, OFF}`
401
- - If enabled, CUDA objects will target the most recent virtual architecture
402
- in addition to the real architectures specified by the
403
- `THRUST_ENABLE_COMPUTE_XX` options.
404
- - Default value depends on `THRUST_DISABLE_ARCH_BY_DEFAULT`:
405
- - `THRUST_DISABLE_ARCH_BY_DEFAULT={ON, OFF}`
406
- - When `ON`, all `THRUST_ENABLE_COMPUTE_*` options are initially `OFF`.
407
- - Default: `OFF` (meaning all architectures are enabled by default)
408
- - `THRUST_ENABLE_TESTS_WITH_RDC={ON, OFF}`
409
- - Whether to enable Relocatable Device Code when building tests.
410
- Default is `OFF`.
411
- - `THRUST_ENABLE_EXAMPLES_WITH_RDC={ON, OFF}`
412
- - Whether to enable Relocatable Device Code when building examples.
413
- Default is `OFF`.
414
-
415
- ## TBB Specific CMake Options
416
-
417
- - `THRUST_TBB_ROOT=<path to tbb root>`
418
- - When the TBB system is requested, set this to the root of the TBB installation
419
- (e.g. the location of `lib/`, `bin/` and `include/` for the TBB libraries).
420
-
421
- # Development Model
422
-
423
- The following is a description of the basic development process that Thrust follows. This is a living
424
- document that will evolve as our process evolves.
425
-
426
- Thrust is distributed in three ways:
427
-
428
- * On GitHub.
429
- * In the NVIDIA HPC SDK.
430
- * In the CUDA Toolkit.
431
-
432
- ## Trunk Based Development
433
-
434
- Thrust uses [trunk based development](https://trunkbaseddevelopment.com). There is a single long-lived
435
- branch called `master`. Engineers may create branches for feature development. Such branches always
436
- merge into `master`. There are no release branches. Releases are produced by taking a snapshot of
437
- `master` ("snapping"). After a release has been snapped from `master`, it will never be changed.
438
-
439
- ## Repositories
440
-
441
- As Thrust is developed both on GitHub and internally at NVIDIA, there are three main places where code lives:
442
-
443
- * The Source of Truth, the [public Thrust repository](https://github.com/thrust/thrust), referred to as
444
- `github` later in this document.
445
- * An internal GitLab repository, referred to as `gitlab` later in this document.
446
- * An internal Perforce repository, referred to as `perforce` later in this document.
447
-
448
- ## Versioning
449
-
450
- Thrust has its own versioning system for releases, independent of the versioning scheme of the NVIDIA
451
- HPC SDK or the CUDA Toolkit.
452
-
453
- Today, Thrust version numbers have a specific [semantic meaning](https://semver.org/).
454
- Releases prior to 1.10.0 largely, but not strictly, followed these semantic meanings.
455
-
456
- The version number for a Thrust release uses the following format: `MMM.mmm.ss-ppp`, where:
457
-
458
- * `THRUST_VERSION_MAJOR`/`MMM`: Major version, up to 3 decimal digits. It is incremented
459
- when changes that are API-backwards-incompatible are made.
460
- * `THRUST_VERSION_MINOR`/`mmm`: Minor version, up to 3 decimal digits. It is incremented when
461
- breaking API, ABI, or semantic changes are made.
462
- * `THRUST_VERSION_SUBMINOR`/`ss`: Subminor version, up to 2 decimal digits. It is incremented
463
- when notable new features or bug fixes or features that are API-backwards-compatible are made.
464
- * `THRUST_PATCH_NUMBER`/`ppp`: Patch number, up to 3 decimal digits. It is incremented if any
465
- change in the repo whatsoever is made and no other version component has been incremented.
466
-
467
- The `<thrust/version.h>` header defines `THRUST_*` macros for all of the version components mentioned
468
- above. Additionally, a `THRUST_VERSION` macro is defined, which is an integer literal containing all
469
- of the version components except for `THRUST_PATCH_NUMBER`.
470
-
471
- ## Branches and Tags
472
-
473
- The following tag names are used in the Thrust project:
474
-
475
- * `github/nvhpc-X.Y`: the tag that directly corresponds to what has been shipped in the NVIDIA HPC SDK release X.Y.
476
- * `github/cuda-X.Y`: the tag that directly corresponds to what has been shipped in the CUDA Toolkit release X.Y.
477
- * `github/A.B.C`: the tag that directly corresponds to a Thrust version A.B.C.
478
-
479
- The following branch names are used in the Thrust project:
480
-
481
- * `github/master`: the Source of Truth development branch of Thrust.
482
- * `github/old-master`: the old Source of Truth branch, before unification of public and internal repositories.
483
- * `github/feature/<name>`: feature branch for a feature under development.
484
- * `github/bug/<bug-system>/<bug-description>-<bug-id>`: bug fix branch, where `bug-system` is `github` or `nvidia`.
485
- * `gitlab/master`: mirror of `github/master`.
486
- * `perforce/private`: mirrored `github/master`, plus files necessary for internal NVIDIA testing systems.
487
-
488
- On the rare occasion that we cannot do work in the open, for example when developing a change specific to an
489
- unreleased product, these branches may exist on `gitlab` instead of `github`. By default, everything should be
490
- in the open on `github` unless there is a strong motivation for it to not be open.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/testing/omp/nvcc_independence.cpp DELETED
@@ -1,75 +0,0 @@
1
- #include <unittest/unittest.h>
2
- #include <thrust/device_ptr.h>
3
- #include <thrust/transform.h>
4
- #include <thrust/reduce.h>
5
- #include <thrust/scan.h>
6
- #include <thrust/sort.h>
7
- #include <thrust/system_error.h>
8
-
9
- void TestNvccIndependenceTransform(void)
10
- {
11
- typedef int T;
12
- const int n = 10;
13
-
14
- thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
15
- thrust::device_vector<T> d_input = h_input;
16
-
17
- thrust::host_vector<T> h_output(n);
18
- thrust::device_vector<T> d_output(n);
19
-
20
- thrust::transform(h_input.begin(), h_input.end(), h_output.begin(), thrust::negate<T>());
21
- thrust::transform(d_input.begin(), d_input.end(), d_output.begin(), thrust::negate<T>());
22
-
23
- ASSERT_EQUAL(h_output, d_output);
24
- }
25
- DECLARE_UNITTEST(TestNvccIndependenceTransform);
26
-
27
- void TestNvccIndependenceReduce(void)
28
- {
29
- typedef int T;
30
- const int n = 10;
31
-
32
- thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
33
- thrust::device_vector<T> d_data = h_data;
34
-
35
- T init = 13;
36
-
37
- T h_result = thrust::reduce(h_data.begin(), h_data.end(), init);
38
- T d_result = thrust::reduce(d_data.begin(), d_data.end(), init);
39
-
40
- ASSERT_ALMOST_EQUAL(h_result, d_result);
41
- }
42
- DECLARE_UNITTEST(TestNvccIndependenceReduce);
43
-
44
- void TestNvccIndependenceExclusiveScan(void)
45
- {
46
- typedef int T;
47
- const int n = 10;
48
-
49
- thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
50
- thrust::device_vector<T> d_input = h_input;
51
-
52
- thrust::host_vector<T> h_output(n);
53
- thrust::device_vector<T> d_output(n);
54
-
55
- thrust::inclusive_scan(h_input.begin(), h_input.end(), h_output.begin());
56
- thrust::inclusive_scan(d_input.begin(), d_input.end(), d_output.begin());
57
- ASSERT_EQUAL(d_output, h_output);
58
- }
59
- DECLARE_UNITTEST(TestNvccIndependenceExclusiveScan);
60
-
61
- void TestNvccIndependenceSort(void)
62
- {
63
- typedef int T;
64
- const int n = 10;
65
-
66
- thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
67
- thrust::device_vector<T> d_data = h_data;
68
-
69
- thrust::sort(h_data.begin(), h_data.end(), thrust::less<T>());
70
- thrust::sort(d_data.begin(), d_data.end(), thrust::less<T>());
71
-
72
- ASSERT_EQUAL(h_data, d_data);
73
- }
74
- DECLARE_UNITTEST(TestNvccIndependenceSort);
75
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/equal.h DELETED
@@ -1,74 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <thrust/system/cuda/config.h>
32
-
33
- #include <thrust/system/cuda/detail/mismatch.h>
34
-
35
- namespace thrust
36
- {
37
- namespace cuda_cub {
38
-
39
- template <class Derived,
40
- class InputIt1,
41
- class InputIt2,
42
- class BinaryPred>
43
- bool __host__ __device__
44
- equal(execution_policy<Derived>& policy,
45
- InputIt1 first1,
46
- InputIt1 last1,
47
- InputIt2 first2,
48
- BinaryPred binary_pred)
49
- {
50
- return cuda_cub::mismatch(policy, first1, last1, first2, binary_pred).first == last1;
51
- }
52
-
53
- template <class Derived,
54
- class InputIt1,
55
- class InputIt2>
56
- bool __host__ __device__
57
- equal(execution_policy<Derived>& policy,
58
- InputIt1 first1,
59
- InputIt1 last1,
60
- InputIt2 first2)
61
- {
62
- typedef typename thrust::iterator_value<InputIt1>::type InputType1;
63
- return cuda_cub::equal(policy,
64
- first1,
65
- last1,
66
- first2,
67
- equal_to<InputType1>());
68
- }
69
-
70
-
71
-
72
- } // namespace cuda_cub
73
- } // end namespace thrust
74
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/backbone/position_encoding.py DELETED
@@ -1,186 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Grounding DINO
3
- # url: https://github.com/IDEA-Research/GroundingDINO
4
- # Copyright (c) 2023 IDEA. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
- # DINO
8
- # Copyright (c) 2022 IDEA. All Rights Reserved.
9
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
10
- # ------------------------------------------------------------------------
11
- # Conditional DETR
12
- # Copyright (c) 2021 Microsoft. All Rights Reserved.
13
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
14
- # ------------------------------------------------------------------------
15
- # Copied from DETR (https://github.com/facebookresearch/detr)
16
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
17
- # ------------------------------------------------------------------------
18
-
19
- """
20
- Various positional encodings for the transformer.
21
- """
22
- import math
23
-
24
- import torch
25
- from torch import nn
26
-
27
- from groundingdino.util.misc import NestedTensor
28
-
29
-
30
- class PositionEmbeddingSine(nn.Module):
31
- """
32
- This is a more standard version of the position embedding, very similar to the one
33
- used by the Attention is all you need paper, generalized to work on images.
34
- """
35
-
36
- def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
37
- super().__init__()
38
- self.num_pos_feats = num_pos_feats
39
- self.temperature = temperature
40
- self.normalize = normalize
41
- if scale is not None and normalize is False:
42
- raise ValueError("normalize should be True if scale is passed")
43
- if scale is None:
44
- scale = 2 * math.pi
45
- self.scale = scale
46
-
47
- def forward(self, tensor_list: NestedTensor):
48
- x = tensor_list.tensors
49
- mask = tensor_list.mask
50
- assert mask is not None
51
- not_mask = ~mask
52
- y_embed = not_mask.cumsum(1, dtype=torch.float32)
53
- x_embed = not_mask.cumsum(2, dtype=torch.float32)
54
- if self.normalize:
55
- eps = 1e-6
56
- # if os.environ.get("SHILONG_AMP", None) == '1':
57
- # eps = 1e-4
58
- # else:
59
- # eps = 1e-6
60
- y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
61
- x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
62
-
63
- dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
64
- dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
65
-
66
- pos_x = x_embed[:, :, :, None] / dim_t
67
- pos_y = y_embed[:, :, :, None] / dim_t
68
- pos_x = torch.stack(
69
- (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
70
- ).flatten(3)
71
- pos_y = torch.stack(
72
- (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
73
- ).flatten(3)
74
- pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
75
- return pos
76
-
77
-
78
- class PositionEmbeddingSineHW(nn.Module):
79
- """
80
- This is a more standard version of the position embedding, very similar to the one
81
- used by the Attention is all you need paper, generalized to work on images.
82
- """
83
-
84
- def __init__(
85
- self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None
86
- ):
87
- super().__init__()
88
- self.num_pos_feats = num_pos_feats
89
- self.temperatureH = temperatureH
90
- self.temperatureW = temperatureW
91
- self.normalize = normalize
92
- if scale is not None and normalize is False:
93
- raise ValueError("normalize should be True if scale is passed")
94
- if scale is None:
95
- scale = 2 * math.pi
96
- self.scale = scale
97
-
98
- def forward(self, tensor_list: NestedTensor):
99
- x = tensor_list.tensors
100
- mask = tensor_list.mask
101
- assert mask is not None
102
- not_mask = ~mask
103
- y_embed = not_mask.cumsum(1, dtype=torch.float32)
104
- x_embed = not_mask.cumsum(2, dtype=torch.float32)
105
-
106
- # import ipdb; ipdb.set_trace()
107
-
108
- if self.normalize:
109
- eps = 1e-6
110
- y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
111
- x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
112
-
113
- dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
114
- dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode='floor')) / self.num_pos_feats)
115
- pos_x = x_embed[:, :, :, None] / dim_tx
116
-
117
- dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
118
- dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode='floor')) / self.num_pos_feats)
119
- pos_y = y_embed[:, :, :, None] / dim_ty
120
-
121
- pos_x = torch.stack(
122
- (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
123
- ).flatten(3)
124
- pos_y = torch.stack(
125
- (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
126
- ).flatten(3)
127
- pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
128
-
129
- # import ipdb; ipdb.set_trace()
130
-
131
- return pos
132
-
133
-
134
- class PositionEmbeddingLearned(nn.Module):
135
- """
136
- Absolute pos embedding, learned.
137
- """
138
-
139
- def __init__(self, num_pos_feats=256):
140
- super().__init__()
141
- self.row_embed = nn.Embedding(50, num_pos_feats)
142
- self.col_embed = nn.Embedding(50, num_pos_feats)
143
- self.reset_parameters()
144
-
145
- def reset_parameters(self):
146
- nn.init.uniform_(self.row_embed.weight)
147
- nn.init.uniform_(self.col_embed.weight)
148
-
149
- def forward(self, tensor_list: NestedTensor):
150
- x = tensor_list.tensors
151
- h, w = x.shape[-2:]
152
- i = torch.arange(w, device=x.device)
153
- j = torch.arange(h, device=x.device)
154
- x_emb = self.col_embed(i)
155
- y_emb = self.row_embed(j)
156
- pos = (
157
- torch.cat(
158
- [
159
- x_emb.unsqueeze(0).repeat(h, 1, 1),
160
- y_emb.unsqueeze(1).repeat(1, w, 1),
161
- ],
162
- dim=-1,
163
- )
164
- .permute(2, 0, 1)
165
- .unsqueeze(0)
166
- .repeat(x.shape[0], 1, 1, 1)
167
- )
168
- return pos
169
-
170
-
171
- def build_position_encoding(args):
172
- N_steps = args.hidden_dim // 2
173
- if args.position_embedding in ("v2", "sine"):
174
- # TODO find a better way of exposing other arguments
175
- position_embedding = PositionEmbeddingSineHW(
176
- N_steps,
177
- temperatureH=args.pe_temperatureH,
178
- temperatureW=args.pe_temperatureW,
179
- normalize=True,
180
- )
181
- elif args.position_embedding in ("v3", "learned"):
182
- position_embedding = PositionEmbeddingLearned(N_steps)
183
- else:
184
- raise ValueError(f"not supported {args.position_embedding}")
185
-
186
- return position_embedding
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/Client.js DELETED
@@ -1,446 +0,0 @@
1
- import WebSocket, { WebSocketServer } from 'ws'
2
- import { getApiData, makeGSUidSendMsg, lifecycle, heartbeat, setMsgMap } from '../model/index.js'
3
- import { Version, Config } from './index.js'
4
- import express from "express"
5
- import http from "http"
6
- import fetch from 'node-fetch'
7
-
8
- export default class Client {
9
- constructor({ name, address, type, reconnectInterval, maxReconnectAttempts, accessToken, uin = Bot.uin, closed = false }) {
10
- this.name = name;
11
- this.address = address;
12
- this.type = type;
13
- this.reconnectInterval = reconnectInterval;
14
- this.maxReconnectAttempts = maxReconnectAttempts;
15
- this.accessToken = accessToken;
16
- this.uin = Number(uin)
17
- this.ws = null
18
- this.status = 0
19
- this.closed = closed
20
- }
21
-
22
- reconnectCount = 1
23
-
24
- timer = null
25
-
26
- stopReconnect = false
27
-
28
- createWs() {
29
- try {
30
- const headers = {
31
- 'X-Self-ID': this.uin,
32
- 'X-Client-Role': 'Universal',
33
- 'User-Agent': `ws-plugin/${Version.version}`
34
- }
35
- if (this.accessToken) headers["Authorization"] = 'Token ' + this.accessToken
36
- this.ws = new WebSocket(this.address, { headers })
37
- } catch (error) {
38
- logger.error(`[ws-plugin] 出错了,可能是ws地址填错了~\nws名字: ${this.name}\n地址: ${this.address}\n类型: 1`)
39
- return
40
- }
41
- this.ws.on('open', async () => {
42
- logger.mark(`[ws-plugin] ${this.name} 已连接`);
43
- if (this.status == 3 && this.reconnectCount > 1 && Config.reconnectToMaster) {
44
- await this.sendMasterMsg(`${this.name} 重连成功~`)
45
- } else if (this.status == 0 && Config.firstconnectToMaster) {
46
- await this.sendMasterMsg(`${this.name} 连接成功~`)
47
- }
48
- this.ws.send(lifecycle(this.uin))
49
- this.status = 1
50
- this.reconnectCount = 1
51
- if (Config.heartbeatInterval > 0) {
52
- this.timer = setInterval(async () => {
53
- this.ws.send(heartbeat(this.uin))
54
- }, Config.heartbeatInterval * 1000)
55
- }
56
- })
57
- this.ws.on('message', async (event) => {
58
- let data
59
- if (Buffer.isBuffer(event)) {
60
- data = JSON.parse(event.toString())
61
- } else {
62
- data = JSON.parse(event.data);
63
- }
64
- let result = await this.getData(data.action, data.params, data.echo)
65
- this.ws.send(JSON.stringify(result));
66
- })
67
- this.ws.on('close', async code => {
68
- logger.warn(`[ws-plugin] ${this.name} 连接已关闭`);
69
- clearInterval(this.timer)
70
- if (Config.disconnectToMaster && this.reconnectCount == 1 && this.status == 1) {
71
- await this.sendMasterMsg(`${this.name} 已断开连接...`)
72
- } else if (Config.firstconnectToMaster && this.reconnectCount == 1 && this.status == 0) {
73
- await this.sendMasterMsg(`${this.name} 连接失败...`)
74
- }
75
- this.status = 3
76
- if (!this.stopReconnect && ((this.reconnectCount < this.maxReconnectAttempts) || this.maxReconnectAttempts <= 0)) {
77
- if (code === 1005) {
78
- logger.warn(`[ws-plugin] ${this.name} 连接异常,停止重连`);
79
- this.status = 0
80
- } else {
81
- logger.warn(`[ws-plugin] ${this.name} 开始尝试重新连接第${this.reconnectCount}次`);
82
- this.reconnectCount++
83
- setTimeout(() => {
84
- this.createWs()
85
- }, this.reconnectInterval * 1000);
86
- }
87
- } else {
88
- this.stopReconnect = false
89
- this.status = 0
90
- logger.warn(`[ws-plugin] ${this.name} 达到最大重连次数或关闭连接,停止重连`);
91
- }
92
- })
93
- this.ws.on('error', (event) => {
94
- logger.error(`[ws-plugin] ${this.name} 连接失败\n${event}`);
95
- })
96
- }
97
-
98
- createServer() {
99
- const parts = this.address.split(':');
100
- this.host = parts[0];
101
- this.port = parts[1];
102
- this.arr = []
103
- this.express = express()
104
- this.server = http.createServer(this.express)
105
- this.server.on("upgrade", (req, socket, head) => {
106
- if (this.accessToken) {
107
- const token = req.headers['authorization']?.replace('Token ', '')
108
- if (!token) {
109
- socket.write('HTTP/1.1 401 Unauthorized\r\n\r\n');
110
- socket.destroy();
111
- return
112
- } else if (this.accessToken != token) {
113
- socket.write('HTTP/1.1 403 Forbidden\r\n\r\n');
114
- socket.destroy();
115
- return;
116
- }
117
- }
118
- this.wss.handleUpgrade(req, socket, head, conn => {
119
- if (req.url === '/') {
120
- conn.id = req.headers["sec-websocket-key"]
121
- let time = null
122
- conn.send(lifecycle(this.uin))
123
- if (Config.heartbeatInterval > 0) {
124
- time = setInterval(async () => {
125
- conn.send(heartbeat(this.uin))
126
- }, Config.heartbeatInterval * 1000)
127
- }
128
- logger.mark(`[ws-plugin] ${this.name} 接受 WebSocket 连接: ${req.connection.remoteAddress}`);
129
- conn.on("error", (event) => {
130
- logger.error(`[ws-plugin] ${this.name} 接受 WebSocket 连接时出现错误: ${event}`)
131
- })
132
- conn.on("close", () => {
133
- if (this.stopReconnect = false) {
134
- logger.warn(`[ws-plugin] ${this.name} 关闭 WebSocket 连接`);
135
- }
136
- this.arr = this.arr.filter(i => i.id != req.headers["sec-websocket-key"])
137
- clearInterval(time)
138
- })
139
- conn.on("message", async event => {
140
- const data = JSON.parse(event)
141
- const result = await this.getData(data.action, data.params, data.echo)
142
- conn.send(JSON.stringify(result));
143
- })
144
- this.arr.push(conn)
145
- } else if (req.url === '/api' || req.url === '/api/') {
146
- logger.mark(`[ws-plugin] ${this.name} 接受 WebSocket api 连接: ${req.connection.remoteAddress}`);
147
- conn.on("error", (event) => {
148
- logger.error(`[ws-plugin] ${this.name} 接受 WebSocket api 连接时出现错误: ${event}`)
149
- })
150
- conn.on("close", () => {
151
- if (this.stopReconnect = false) {
152
- logger.warn(`[ws-plugin] ${this.name} 关闭 WebSocket api 连接`);
153
- }
154
- })
155
- conn.on("message", async event => {
156
- const data = JSON.parse(event)
157
- const result = await this.getData(data.action, data.params, data.echo)
158
- conn.send(JSON.stringify(result));
159
- })
160
- } else if (req.url === '/event' || req.url === '/event/') {
161
- conn.id = req.headers["sec-websocket-key"]
162
- let time = null
163
- conn.send(lifecycle(this.uin))
164
- if (Config.heartbeatInterval > 0) {
165
- time = setInterval(async () => {
166
- conn.send(heartbeat(this.uin))
167
- }, Config.heartbeatInterval * 1000)
168
- }
169
- logger.mark(`[ws-plugin] ${this.name} 接受 WebSocket event 连接: ${req.connection.remoteAddress}`);
170
- conn.on("error", (event) => {
171
- logger.error(`[ws-plugin] ${this.name} 接受 WebSocket event 连接时出现错误: ${event}`)
172
- })
173
- conn.on("close", () => {
174
- if (this.stopReconnect = false) {
175
- logger.warn(`[ws-plugin] ${this.name} 关闭 WebSocket event 连接`);
176
- }
177
- this.arr = this.arr.filter(i => i.id != req.headers["sec-websocket-key"])
178
- clearInterval(time)
179
- })
180
- this.arr.push(conn)
181
- }
182
- })
183
-
184
- })
185
- this.ws = {
186
- send: (msg) => {
187
- for (const i of this.arr) {
188
- i.send(msg)
189
- }
190
- },
191
- close: () => {
192
- this.server.close()
193
- logger.warn(`[ws-plugin] CQ WebSocket 服务器已关闭: ${this.host}:${this.port}`)
194
- for (const i of this.arr) {
195
- i.close()
196
- }
197
- }
198
- }
199
- this.server.on('error', error => {
200
- logger.error(`[ws-plugin] ${this.name} CQ WebSocket 服务器启动失败: ${this.host}:${this.port}`)
201
- logger.error(error)
202
- })
203
- this.wss = new WebSocketServer({ noServer: true })
204
- this.server.listen(this.port, this.host, () => {
205
- this.status = 1
206
- logger.mark(`[ws-plugin] CQ WebSocket 服务器已启动: ${this.host}:${this.port}`)
207
- })
208
- }
209
-
210
- createGSUidWs() {
211
- try {
212
- this.ws = new WebSocket(this.address)
213
- } catch (error) {
214
- logger.error(`[ws-plugin] 出错了,可能是ws地址填错了~\nws名字: ${this.name}\n地址: ${this.address}\n类型: 3`)
215
- return
216
- }
217
- this.ws.on('open', async () => {
218
- logger.mark(`[ws-plugin] ${this.name} 已连接`);
219
- if (this.status == 3 && this.reconnectCount > 1 && Config.reconnectToMaster) {
220
- await this.sendMasterMsg(`${this.name} 重连成功~`)
221
- } else if (this.status == 0 && Config.firstconnectToMaster) {
222
- await this.sendMasterMsg(`${this.name} 连接成功~`)
223
- }
224
- this.status = 1
225
- this.reconnectCount = 1
226
- })
227
-
228
- this.ws.on('message', async event => {
229
- const data = JSON.parse(event.toString());
230
- const { sendMsg, quote } = await makeGSUidSendMsg(data)
231
- if (sendMsg.length > 0) {
232
- let sendRet, group_id, user_id
233
- // const bot = Version.isTrss ? Bot[data.bot_self_id] : Bot
234
- const bot = Bot[data.bot_self_id] || Bot
235
- switch (data.target_type) {
236
- case 'group':
237
- case 'channel':
238
- group_id = data.target_id
239
- sendRet = await bot.pickGroup(group_id).sendMsg(sendMsg, quote)
240
- break;
241
- case 'direct':
242
- user_id = data.target_id
243
- sendRet = await bot.pickFriend(user_id).sendMsg(sendMsg, quote)
244
- break;
245
- default:
246
- break;
247
- }
248
- if (sendRet.rand) {
249
- setMsgMap({
250
- message_id: sendRet.message_id,
251
- time: sendRet.time,
252
- seq: sendRet.seq,
253
- rand: sendRet.rand,
254
- user_id: user_id,
255
- group_id: group_id,
256
- onebot_id: Math.floor(Math.random() * Math.pow(2, 32)) | 0,
257
- })
258
- }
259
- logger.mark(`[ws-plugin] 连接名字:${this.name} 处理完成`)
260
- }
261
- })
262
-
263
- this.ws.on('close', async code => {
264
- logger.warn(`[ws-plugin] ${this.name} 连接已关闭`);
265
- if (Config.disconnectToMaster && this.reconnectCount == 1 && this.status == 1) {
266
- await this.sendMasterMsg(`${this.name} 已断开连接...`)
267
- } else if (Config.firstconnectToMaster && this.reconnectCount == 1 && this.status == 0) {
268
- await this.sendMasterMsg(`${this.name} 连接失败...`)
269
- }
270
- this.status = 3
271
- if (!this.stopReconnect && ((this.reconnectCount < this.maxReconnectAttempts) || this.maxReconnectAttempts <= 0)) {
272
- if (code === 1005) {
273
- logger.warn(`[ws-plugin] ${this.name} 连接异常,停止重连`);
274
- this.status = 0
275
- } else {
276
- logger.warn(`[ws-plugin] ${this.name} 开始尝试重新连接第 ${this.reconnectCount} 次`);
277
- this.reconnectCount++
278
- setTimeout(() => {
279
- this.createGSUidWs()
280
- }, this.reconnectInterval * 1000);
281
- }
282
- } else {
283
- this.stopReconnect = false
284
- this.status = 0
285
- logger.warn(`[ws-plugin] ${this.name} 达到最大重连次数或关闭连接,停止重连`);
286
- }
287
- })
288
-
289
- this.ws.on('error', (event) => {
290
- logger.error(`[ws-plugin] ${this.name} 连接失败\n${event}`);
291
- })
292
- }
293
-
294
- createHttp() {
295
- const parts = this.address.split(':');
296
- this.host = parts[0];
297
- this.port = parts[1];
298
- this.express = express();
299
- this.server = http.createServer(this.express);
300
- this.express.use(express.json({ limit: '50mb' }));
301
- this.express.use(express.urlencoded({ extended: true, limit: '50mb' }));
302
- this.express.use((req, res, next) => this.authorization(req, res, next))
303
-
304
- this.express.get('/:action', async (req, res) => {
305
- const { action } = req.params;
306
- const { query: params } = req;
307
- const data = await this.getData(action, params)
308
- res.status(200).json(data || {})
309
- });
310
-
311
- this.express.post('/:action', async (req, res) => {
312
- const { action } = req.params;
313
- const { body: params } = req;
314
- const data = await this.getData(action, params)
315
- res.status(200).json(data || {})
316
- });
317
-
318
- this.express.post('/', async (req, res) => {
319
- const { action, params } = req.body;
320
- const data = await this.getData(action, params)
321
- res.status(200).json(data || {})
322
- });
323
-
324
- this.server.on('error', error => {
325
- logger.error(`[ws-plugin] ${this.name} 正向HTTP 服务器启动失败: ${this.host}:${this.port}`)
326
- logger.error(error)
327
- })
328
- this.server.listen(this.port, this.host, () => {
329
- this.status = 1
330
- logger.mark(`[ws-plugin] HTTP 服务器已启动: ${this.host}:${this.port}`)
331
- })
332
- this.ws = {
333
- close: () => {
334
- this.server.close()
335
- logger.warn(`[ws-plugin] 正向HTTP 服务器已关闭: ${this.host}:${this.port}`)
336
- }
337
- }
338
- }
339
-
340
- createHttpPost() {
341
- if (!this.address.startsWith('http')) {
342
- this.address = 'http://' + this.address
343
- }
344
- this.status = 1
345
- // 心跳咕一下
346
- this.ws = {
347
- send: body => {
348
- fetch(this.address, {
349
- method: 'POST',
350
- headers: {
351
- 'content-type': 'application/json',
352
- 'x-self-id': this.uin,
353
- 'user-agent': `ws-plugin/${Version.version}`
354
- },
355
- body
356
- })
357
- }
358
- }
359
- }
360
-
361
- close() {
362
- this.stopReconnect = true
363
- if (this.status == 1) {
364
- this.ws?.close?.()
365
- this.status = 0
366
- }
367
- }
368
-
369
- authorization(req, res, next) {
370
- let code = null
371
- const token = req.headers['authorization']?.replace?.(/^(Token|Bearer) /, '') || req.query.access_token
372
- if (this.accessToken) {
373
- if (!token) {
374
- code = 401
375
- } else if (this.accessToken != token) {
376
- code = 403
377
- }
378
- }
379
- if (code) {
380
- res.status(code).end()
381
- return
382
- }
383
- next()
384
- }
385
-
386
- async getData(action, params, echo) {
387
- let result
388
- try {
389
- const data = await getApiData(action, params, this.name, this.uin);
390
- result = {
391
- status: 'ok',
392
- retcode: 0,
393
- data,
394
- echo
395
- }
396
- } catch (error) {
397
- if (!error.noLog) logger.error('ws-plugin出现错误', error)
398
- result = {
399
- status: 'failed',
400
- retcode: -1,
401
- msg: error.message,
402
- wording: 'ws-plugin获取信息失败',
403
- echo
404
- }
405
- } finally {
406
- return result
407
- }
408
- }
409
-
410
- async sendMasterMsg(msg) {
411
- // const bot = Version.isTrss ? Bot[this.uin] : Bot
412
- const bot = Bot[this.uin] || Bot
413
- let masterQQ = []
414
- const master = Version.isTrss ? Config.master[this.uin] : Config.masterQQ
415
- if (Config.howToMaster > 0) {
416
- masterQQ.push(master?.[Config.howToMaster - 1])
417
- } else if (Config.howToMaster == 0) {
418
- masterQQ.push(...master)
419
- }
420
- for (const i of masterQQ) {
421
- if (!i) continue
422
- let result
423
- try {
424
- result = await bot?.pickFriend?.(i)?.sendMsg?.(msg) || true
425
- } catch (error) {
426
- result = true
427
- }
428
- if (result) {
429
- logger.mark(`[ws-plugin] 连接名字:${this.name} 通知主人:${i} 处理完成`)
430
- } else {
431
- const timer = setInterval(async () => {
432
- try {
433
- result = await bot?.pickFriend?.(i)?.sendMsg?.(msg) || true
434
- } catch (error) {
435
- result = true
436
- }
437
- if (result) {
438
- clearInterval(timer)
439
- logger.mark(`[ws-plugin] 连接名字:${this.name} 通知主人:${i} 处理完成`)
440
- }
441
- }, 5000)
442
- }
443
- }
444
- }
445
-
446
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/encoders/modules.py DELETED
@@ -1,226 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from torch.utils.checkpoint import checkpoint
4
-
5
- from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel
6
-
7
- import open_clip
8
- from ldm.util import default, count_params
9
-
10
-
11
- default_device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
12
-
13
-
14
- class AbstractEncoder(nn.Module):
15
- def __init__(self):
16
- super().__init__()
17
-
18
- def encode(self, *args, **kwargs):
19
- raise NotImplementedError
20
-
21
-
22
- class IdentityEncoder(AbstractEncoder):
23
-
24
- def encode(self, x):
25
- return x
26
-
27
-
28
- class ClassEmbedder(nn.Module):
29
- def __init__(self, embed_dim, n_classes=1000, key='class', ucg_rate=0.1):
30
- super().__init__()
31
- self.key = key
32
- self.embedding = nn.Embedding(n_classes, embed_dim)
33
- self.n_classes = n_classes
34
- self.ucg_rate = ucg_rate
35
-
36
- def forward(self, batch, key=None, disable_dropout=False):
37
- if key is None:
38
- key = self.key
39
- # this is for use in crossattn
40
- c = batch[key][:, None]
41
- if self.ucg_rate > 0. and not disable_dropout:
42
- mask = 1. - torch.bernoulli(torch.ones_like(c) * self.ucg_rate)
43
- c = mask * c + (1-mask) * torch.ones_like(c)*(self.n_classes-1)
44
- c = c.long()
45
- c = self.embedding(c)
46
- return c
47
-
48
- def get_unconditional_conditioning(self, bs, device=None):
49
- if device is None:
50
- device = default_device
51
- uc_class = self.n_classes - 1 # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000)
52
- uc = torch.ones((bs,), device=device) * uc_class
53
- uc = {self.key: uc}
54
- return uc
55
-
56
-
57
- def disabled_train(self, mode=True):
58
- """Overwrite model.train with this function to make sure train/eval mode
59
- does not change anymore."""
60
- return self
61
-
62
-
63
- class FrozenT5Embedder(AbstractEncoder):
64
- """Uses the T5 transformer encoder for text"""
65
- def __init__(self, version="google/t5-v1_1-large", device=None, max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
66
- super().__init__()
67
- if device is None:
68
- device = default_device
69
- self.tokenizer = T5Tokenizer.from_pretrained(version)
70
- self.transformer = T5EncoderModel.from_pretrained(version)
71
- self.device = device
72
- self.max_length = max_length # TODO: typical value?
73
- if freeze:
74
- self.freeze()
75
-
76
- def freeze(self):
77
- self.transformer = self.transformer.eval()
78
- #self.train = disabled_train
79
- for param in self.parameters():
80
- param.requires_grad = False
81
-
82
- def forward(self, text):
83
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
84
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
85
- tokens = batch_encoding["input_ids"].to(self.device)
86
- outputs = self.transformer(input_ids=tokens)
87
-
88
- z = outputs.last_hidden_state
89
- return z
90
-
91
- def encode(self, text):
92
- return self(text)
93
-
94
-
95
- class FrozenCLIPEmbedder(AbstractEncoder):
96
- """Uses the CLIP transformer encoder for text (from huggingface)"""
97
- LAYERS = [
98
- "last",
99
- "pooled",
100
- "hidden"
101
- ]
102
- def __init__(self, version="openai/clip-vit-large-patch14", device=None, max_length=77,
103
- freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32
104
- super().__init__()
105
- if device is None:
106
- device = default_device
107
- assert layer in self.LAYERS
108
- self.tokenizer = CLIPTokenizer.from_pretrained(version)
109
- self.transformer = CLIPTextModel.from_pretrained(version)
110
- self.device = device
111
- self.max_length = max_length
112
- if freeze:
113
- self.freeze()
114
- self.layer = layer
115
- self.layer_idx = layer_idx
116
- if layer == "hidden":
117
- assert layer_idx is not None
118
- assert 0 <= abs(layer_idx) <= 12
119
-
120
- def freeze(self):
121
- self.transformer = self.transformer.eval()
122
- #self.train = disabled_train
123
- for param in self.parameters():
124
- param.requires_grad = False
125
-
126
- def forward(self, text):
127
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
128
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
129
- tokens = batch_encoding["input_ids"].to(self.device)
130
- outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
131
- if self.layer == "last":
132
- z = outputs.last_hidden_state
133
- elif self.layer == "pooled":
134
- z = outputs.pooler_output[:, None, :]
135
- else:
136
- z = outputs.hidden_states[self.layer_idx]
137
- return z
138
-
139
- def encode(self, text):
140
- return self(text)
141
-
142
-
143
- class FrozenOpenCLIPEmbedder(AbstractEncoder):
144
- """
145
- Uses the OpenCLIP transformer encoder for text
146
- """
147
- LAYERS = [
148
- #"pooled",
149
- "last",
150
- "penultimate"
151
- ]
152
- def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device=None, max_length=77,
153
- freeze=True, layer="last"):
154
- super().__init__()
155
- if device is None:
156
- device = default_device
157
- assert layer in self.LAYERS
158
- model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version)
159
- del model.visual
160
- self.model = model
161
-
162
- self.device = device
163
- self.max_length = max_length
164
- if freeze:
165
- self.freeze()
166
- self.layer = layer
167
- if self.layer == "last":
168
- self.layer_idx = 0
169
- elif self.layer == "penultimate":
170
- self.layer_idx = 1
171
- else:
172
- raise NotImplementedError()
173
-
174
- def freeze(self):
175
- self.model = self.model.eval()
176
- for param in self.parameters():
177
- param.requires_grad = False
178
-
179
- def forward(self, text):
180
- tokens = open_clip.tokenize(text)
181
- z = self.encode_with_transformer(tokens.to(self.device))
182
- return z
183
-
184
- def encode_with_transformer(self, text):
185
- x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model]
186
- x = x + self.model.positional_embedding
187
- x = x.permute(1, 0, 2) # NLD -> LND
188
- x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
189
- x = x.permute(1, 0, 2) # LND -> NLD
190
- x = self.model.ln_final(x)
191
- return x
192
-
193
- def text_transformer_forward(self, x: torch.Tensor, attn_mask = None):
194
- for i, r in enumerate(self.model.transformer.resblocks):
195
- if i == len(self.model.transformer.resblocks) - self.layer_idx:
196
- break
197
- if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting():
198
- x = checkpoint(r, x, attn_mask)
199
- else:
200
- x = r(x, attn_mask=attn_mask)
201
- return x
202
-
203
- def encode(self, text):
204
- return self(text)
205
-
206
-
207
- class FrozenCLIPT5Encoder(AbstractEncoder):
208
- def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device=None,
209
- clip_max_length=77, t5_max_length=77):
210
- super().__init__()
211
- if device is None:
212
- device = default_device
213
- self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length)
214
- self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length)
215
- print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, "
216
- f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params.")
217
-
218
- def encode(self, text):
219
- return self(text)
220
-
221
- def forward(self, text):
222
- clip_z = self.clip_encoder.encode(text)
223
- t5_z = self.t5_encoder.encode(text)
224
- return [clip_z, t5_z]
225
-
226
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Curranj/GPT-QRI/app.py DELETED
@@ -1,78 +0,0 @@
1
- import sklearn
2
- import sqlite3
3
- import numpy as np
4
- from sklearn.metrics.pairwise import cosine_similarity
5
- import openai
6
- import os
7
- import gradio as gr
8
-
9
-
10
- openai.api_key = os.environ["Secret"]
11
-
12
- def find_closest_neighbors(vector1, dictionary_of_vectors):
13
- """
14
- Takes a vector and a dictionary of vectors and returns the three closest neighbors
15
- """
16
- vector = openai.Embedding.create(
17
- input=vector1,
18
- engine="text-embedding-ada-002"
19
- )['data'][0]['embedding']
20
-
21
- vector = np.array(vector)
22
-
23
- cosine_similarities = {}
24
- for key, value in dictionary_of_vectors.items():
25
- cosine_similarities[key] = cosine_similarity(vector.reshape(1, -1), value.reshape(1, -1))[0][0]
26
-
27
- sorted_cosine_similarities = sorted(cosine_similarities.items(), key=lambda x: x[1], reverse=True)
28
- match_list = sorted_cosine_similarities[0:4]
29
-
30
- return match_list
31
-
32
- def predict(message, history):
33
- # Connect to the database
34
- conn = sqlite3.connect('QRIdatabase7.db')
35
- cursor = conn.cursor()
36
- cursor.execute('''SELECT text, embedding FROM chunks''')
37
- rows = cursor.fetchall()
38
-
39
- dictionary_of_vectors = {}
40
- for row in rows:
41
- text = row[0]
42
- embedding_str = row[1]
43
- embedding = np.fromstring(embedding_str, sep=' ')
44
- dictionary_of_vectors[text] = embedding
45
- conn.close()
46
-
47
- # Find the closest neighbors
48
- match_list = find_closest_neighbors(message, dictionary_of_vectors)
49
- context = ''
50
- for match in match_list:
51
- context += str(match[0])
52
- context = context[:-1500]
53
-
54
- prep = f"This is an OpenAI model tuned to answer questions specific to the Qualia Research institute, a research institute that focuses on consciousness. Here is some question-specific context, and then the Question to answer, related to consciousness, the human experience, and phenomenology: {context}. Here is a question specific to QRI and consciousness in general Q: {message} A: "
55
-
56
- history_openai_format = []
57
- for human, assistant in history:
58
- history_openai_format.append({"role": "user", "content": human })
59
- history_openai_format.append({"role": "assistant", "content":assistant})
60
- history_openai_format.append({"role": "user", "content": prep})
61
-
62
- response = openai.ChatCompletion.create(
63
- model='gpt-3.5-turbo',
64
- messages= history_openai_format,
65
- temperature=1.0,
66
- stream=True
67
- )
68
-
69
- partial_message = ""
70
- for chunk in response:
71
- if len(chunk['choices'][0]['delta']) != 0:
72
- partial_message = partial_message + chunk['choices'][0]['delta']['content']
73
- yield partial_message
74
-
75
- demo = gr.ChatInterface(predict).queue()
76
-
77
- if __name__ == "__main__":
78
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/builders/__init__.py DELETED
@@ -1,77 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- from video_llama.datasets.builders.base_dataset_builder import load_dataset_config
9
- from video_llama.datasets.builders.image_text_pair_builder import (
10
- CCSBUBuilder,
11
- LaionBuilder,
12
- CCSBUAlignBuilder
13
- )
14
- from video_llama.datasets.builders.video_caption_builder import WebvidBuilder
15
- from video_llama.common.registry import registry
16
- from video_llama.datasets.builders.instruct_builder import WebvidInstruct_Builder,LlavaInstruct_Builder
17
- __all__ = [
18
- "CCSBUBuilder",
19
- "LaionBuilder",
20
- "CCSBUAlignBuilder",
21
- "WebvidBuilder",
22
- "LlavaInstruct_Builder",
23
- "WebvidInstruct_Builder"
24
-
25
- ]
26
-
27
-
28
- def load_dataset(name, cfg_path=None, vis_path=None, data_type=None):
29
- """
30
- Example
31
-
32
- >>> dataset = load_dataset("coco_caption", cfg=None)
33
- >>> splits = dataset.keys()
34
- >>> print([len(dataset[split]) for split in splits])
35
-
36
- """
37
- if cfg_path is None:
38
- cfg = None
39
- else:
40
- cfg = load_dataset_config(cfg_path)
41
-
42
- try:
43
- builder = registry.get_builder_class(name)(cfg)
44
- except TypeError:
45
- print(
46
- f"Dataset {name} not found. Available datasets:\n"
47
- + ", ".join([str(k) for k in dataset_zoo.get_names()])
48
- )
49
- exit(1)
50
-
51
- if vis_path is not None:
52
- if data_type is None:
53
- # use default data type in the config
54
- data_type = builder.config.data_type
55
-
56
- assert (
57
- data_type in builder.config.build_info
58
- ), f"Invalid data_type {data_type} for {name}."
59
-
60
- builder.config.build_info.get(data_type).storage = vis_path
61
-
62
- dataset = builder.build_datasets()
63
- return dataset
64
-
65
-
66
- class DatasetZoo:
67
- def __init__(self) -> None:
68
- self.dataset_zoo = {
69
- k: list(v.DATASET_CONFIG_DICT.keys())
70
- for k, v in sorted(registry.mapping["builder_name_mapping"].items())
71
- }
72
-
73
- def get_names(self):
74
- return list(self.dataset_zoo.keys())
75
-
76
-
77
- dataset_zoo = DatasetZoo()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageDraw.py DELETED
@@ -1,1038 +0,0 @@
1
- #
2
- # The Python Imaging Library
3
- # $Id$
4
- #
5
- # drawing interface operations
6
- #
7
- # History:
8
- # 1996-04-13 fl Created (experimental)
9
- # 1996-08-07 fl Filled polygons, ellipses.
10
- # 1996-08-13 fl Added text support
11
- # 1998-06-28 fl Handle I and F images
12
- # 1998-12-29 fl Added arc; use arc primitive to draw ellipses
13
- # 1999-01-10 fl Added shape stuff (experimental)
14
- # 1999-02-06 fl Added bitmap support
15
- # 1999-02-11 fl Changed all primitives to take options
16
- # 1999-02-20 fl Fixed backwards compatibility
17
- # 2000-10-12 fl Copy on write, when necessary
18
- # 2001-02-18 fl Use default ink for bitmap/text also in fill mode
19
- # 2002-10-24 fl Added support for CSS-style color strings
20
- # 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
21
- # 2002-12-11 fl Refactored low-level drawing API (work in progress)
22
- # 2004-08-26 fl Made Draw() a factory function, added getdraw() support
23
- # 2004-09-04 fl Added width support to line primitive
24
- # 2004-09-10 fl Added font mode handling
25
- # 2006-06-19 fl Added font bearing support (getmask2)
26
- #
27
- # Copyright (c) 1997-2006 by Secret Labs AB
28
- # Copyright (c) 1996-2006 by Fredrik Lundh
29
- #
30
- # See the README file for information on usage and redistribution.
31
- #
32
-
33
- import math
34
- import numbers
35
-
36
- from . import Image, ImageColor
37
-
38
- """
39
- A simple 2D drawing interface for PIL images.
40
- <p>
41
- Application code should use the <b>Draw</b> factory, instead of
42
- directly.
43
- """
44
-
45
-
46
- class ImageDraw:
47
- font = None
48
-
49
- def __init__(self, im, mode=None):
50
- """
51
- Create a drawing instance.
52
-
53
- :param im: The image to draw in.
54
- :param mode: Optional mode to use for color values. For RGB
55
- images, this argument can be RGB or RGBA (to blend the
56
- drawing into the image). For all other modes, this argument
57
- must be the same as the image mode. If omitted, the mode
58
- defaults to the mode of the image.
59
- """
60
- im.load()
61
- if im.readonly:
62
- im._copy() # make it writeable
63
- blend = 0
64
- if mode is None:
65
- mode = im.mode
66
- if mode != im.mode:
67
- if mode == "RGBA" and im.mode == "RGB":
68
- blend = 1
69
- else:
70
- msg = "mode mismatch"
71
- raise ValueError(msg)
72
- if mode == "P":
73
- self.palette = im.palette
74
- else:
75
- self.palette = None
76
- self._image = im
77
- self.im = im.im
78
- self.draw = Image.core.draw(self.im, blend)
79
- self.mode = mode
80
- if mode in ("I", "F"):
81
- self.ink = self.draw.draw_ink(1)
82
- else:
83
- self.ink = self.draw.draw_ink(-1)
84
- if mode in ("1", "P", "I", "F"):
85
- # FIXME: fix Fill2 to properly support matte for I+F images
86
- self.fontmode = "1"
87
- else:
88
- self.fontmode = "L" # aliasing is okay for other modes
89
- self.fill = False
90
-
91
- def getfont(self):
92
- """
93
- Get the current default font.
94
-
95
- To set the default font for this ImageDraw instance::
96
-
97
- from PIL import ImageDraw, ImageFont
98
- draw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf")
99
-
100
- To set the default font for all future ImageDraw instances::
101
-
102
- from PIL import ImageDraw, ImageFont
103
- ImageDraw.ImageDraw.font = ImageFont.truetype("Tests/fonts/FreeMono.ttf")
104
-
105
- If the current default font is ``None``,
106
- it is initialized with ``ImageFont.load_default()``.
107
-
108
- :returns: An image font."""
109
- if not self.font:
110
- # FIXME: should add a font repository
111
- from . import ImageFont
112
-
113
- self.font = ImageFont.load_default()
114
- return self.font
115
-
116
- def _getink(self, ink, fill=None):
117
- if ink is None and fill is None:
118
- if self.fill:
119
- fill = self.ink
120
- else:
121
- ink = self.ink
122
- else:
123
- if ink is not None:
124
- if isinstance(ink, str):
125
- ink = ImageColor.getcolor(ink, self.mode)
126
- if self.palette and not isinstance(ink, numbers.Number):
127
- ink = self.palette.getcolor(ink, self._image)
128
- ink = self.draw.draw_ink(ink)
129
- if fill is not None:
130
- if isinstance(fill, str):
131
- fill = ImageColor.getcolor(fill, self.mode)
132
- if self.palette and not isinstance(fill, numbers.Number):
133
- fill = self.palette.getcolor(fill, self._image)
134
- fill = self.draw.draw_ink(fill)
135
- return ink, fill
136
-
137
- def arc(self, xy, start, end, fill=None, width=1):
138
- """Draw an arc."""
139
- ink, fill = self._getink(fill)
140
- if ink is not None:
141
- self.draw.draw_arc(xy, start, end, ink, width)
142
-
143
- def bitmap(self, xy, bitmap, fill=None):
144
- """Draw a bitmap."""
145
- bitmap.load()
146
- ink, fill = self._getink(fill)
147
- if ink is None:
148
- ink = fill
149
- if ink is not None:
150
- self.draw.draw_bitmap(xy, bitmap.im, ink)
151
-
152
- def chord(self, xy, start, end, fill=None, outline=None, width=1):
153
- """Draw a chord."""
154
- ink, fill = self._getink(outline, fill)
155
- if fill is not None:
156
- self.draw.draw_chord(xy, start, end, fill, 1)
157
- if ink is not None and ink != fill and width != 0:
158
- self.draw.draw_chord(xy, start, end, ink, 0, width)
159
-
160
- def ellipse(self, xy, fill=None, outline=None, width=1):
161
- """Draw an ellipse."""
162
- ink, fill = self._getink(outline, fill)
163
- if fill is not None:
164
- self.draw.draw_ellipse(xy, fill, 1)
165
- if ink is not None and ink != fill and width != 0:
166
- self.draw.draw_ellipse(xy, ink, 0, width)
167
-
168
- def line(self, xy, fill=None, width=0, joint=None):
169
- """Draw a line, or a connected sequence of line segments."""
170
- ink = self._getink(fill)[0]
171
- if ink is not None:
172
- self.draw.draw_lines(xy, ink, width)
173
- if joint == "curve" and width > 4:
174
- if not isinstance(xy[0], (list, tuple)):
175
- xy = [tuple(xy[i : i + 2]) for i in range(0, len(xy), 2)]
176
- for i in range(1, len(xy) - 1):
177
- point = xy[i]
178
- angles = [
179
- math.degrees(math.atan2(end[0] - start[0], start[1] - end[1]))
180
- % 360
181
- for start, end in ((xy[i - 1], point), (point, xy[i + 1]))
182
- ]
183
- if angles[0] == angles[1]:
184
- # This is a straight line, so no joint is required
185
- continue
186
-
187
- def coord_at_angle(coord, angle):
188
- x, y = coord
189
- angle -= 90
190
- distance = width / 2 - 1
191
- return tuple(
192
- p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d))
193
- for p, p_d in (
194
- (x, distance * math.cos(math.radians(angle))),
195
- (y, distance * math.sin(math.radians(angle))),
196
- )
197
- )
198
-
199
- flipped = (
200
- angles[1] > angles[0] and angles[1] - 180 > angles[0]
201
- ) or (angles[1] < angles[0] and angles[1] + 180 > angles[0])
202
- coords = [
203
- (point[0] - width / 2 + 1, point[1] - width / 2 + 1),
204
- (point[0] + width / 2 - 1, point[1] + width / 2 - 1),
205
- ]
206
- if flipped:
207
- start, end = (angles[1] + 90, angles[0] + 90)
208
- else:
209
- start, end = (angles[0] - 90, angles[1] - 90)
210
- self.pieslice(coords, start - 90, end - 90, fill)
211
-
212
- if width > 8:
213
- # Cover potential gaps between the line and the joint
214
- if flipped:
215
- gap_coords = [
216
- coord_at_angle(point, angles[0] + 90),
217
- point,
218
- coord_at_angle(point, angles[1] + 90),
219
- ]
220
- else:
221
- gap_coords = [
222
- coord_at_angle(point, angles[0] - 90),
223
- point,
224
- coord_at_angle(point, angles[1] - 90),
225
- ]
226
- self.line(gap_coords, fill, width=3)
227
-
228
- def shape(self, shape, fill=None, outline=None):
229
- """(Experimental) Draw a shape."""
230
- shape.close()
231
- ink, fill = self._getink(outline, fill)
232
- if fill is not None:
233
- self.draw.draw_outline(shape, fill, 1)
234
- if ink is not None and ink != fill:
235
- self.draw.draw_outline(shape, ink, 0)
236
-
237
- def pieslice(self, xy, start, end, fill=None, outline=None, width=1):
238
- """Draw a pieslice."""
239
- ink, fill = self._getink(outline, fill)
240
- if fill is not None:
241
- self.draw.draw_pieslice(xy, start, end, fill, 1)
242
- if ink is not None and ink != fill and width != 0:
243
- self.draw.draw_pieslice(xy, start, end, ink, 0, width)
244
-
245
- def point(self, xy, fill=None):
246
- """Draw one or more individual pixels."""
247
- ink, fill = self._getink(fill)
248
- if ink is not None:
249
- self.draw.draw_points(xy, ink)
250
-
251
- def polygon(self, xy, fill=None, outline=None, width=1):
252
- """Draw a polygon."""
253
- ink, fill = self._getink(outline, fill)
254
- if fill is not None:
255
- self.draw.draw_polygon(xy, fill, 1)
256
- if ink is not None and ink != fill and width != 0:
257
- if width == 1:
258
- self.draw.draw_polygon(xy, ink, 0, width)
259
- else:
260
- # To avoid expanding the polygon outwards,
261
- # use the fill as a mask
262
- mask = Image.new("1", self.im.size)
263
- mask_ink = self._getink(1)[0]
264
-
265
- fill_im = mask.copy()
266
- draw = Draw(fill_im)
267
- draw.draw.draw_polygon(xy, mask_ink, 1)
268
-
269
- ink_im = mask.copy()
270
- draw = Draw(ink_im)
271
- width = width * 2 - 1
272
- draw.draw.draw_polygon(xy, mask_ink, 0, width)
273
-
274
- mask.paste(ink_im, mask=fill_im)
275
-
276
- im = Image.new(self.mode, self.im.size)
277
- draw = Draw(im)
278
- draw.draw.draw_polygon(xy, ink, 0, width)
279
- self.im.paste(im.im, (0, 0) + im.size, mask.im)
280
-
281
- def regular_polygon(
282
- self, bounding_circle, n_sides, rotation=0, fill=None, outline=None, width=1
283
- ):
284
- """Draw a regular polygon."""
285
- xy = _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation)
286
- self.polygon(xy, fill, outline, width)
287
-
288
- def rectangle(self, xy, fill=None, outline=None, width=1):
289
- """Draw a rectangle."""
290
- ink, fill = self._getink(outline, fill)
291
- if fill is not None:
292
- self.draw.draw_rectangle(xy, fill, 1)
293
- if ink is not None and ink != fill and width != 0:
294
- self.draw.draw_rectangle(xy, ink, 0, width)
295
-
296
- def rounded_rectangle(
297
- self, xy, radius=0, fill=None, outline=None, width=1, *, corners=None
298
- ):
299
- """Draw a rounded rectangle."""
300
- if isinstance(xy[0], (list, tuple)):
301
- (x0, y0), (x1, y1) = xy
302
- else:
303
- x0, y0, x1, y1 = xy
304
- if x1 < x0:
305
- msg = "x1 must be greater than or equal to x0"
306
- raise ValueError(msg)
307
- if y1 < y0:
308
- msg = "y1 must be greater than or equal to y0"
309
- raise ValueError(msg)
310
- if corners is None:
311
- corners = (True, True, True, True)
312
-
313
- d = radius * 2
314
-
315
- full_x, full_y = False, False
316
- if all(corners):
317
- full_x = d >= x1 - x0 - 1
318
- if full_x:
319
- # The two left and two right corners are joined
320
- d = x1 - x0
321
- full_y = d >= y1 - y0 - 1
322
- if full_y:
323
- # The two top and two bottom corners are joined
324
- d = y1 - y0
325
- if full_x and full_y:
326
- # If all corners are joined, that is a circle
327
- return self.ellipse(xy, fill, outline, width)
328
-
329
- if d == 0 or not any(corners):
330
- # If the corners have no curve,
331
- # or there are no corners,
332
- # that is a rectangle
333
- return self.rectangle(xy, fill, outline, width)
334
-
335
- r = d // 2
336
- ink, fill = self._getink(outline, fill)
337
-
338
- def draw_corners(pieslice):
339
- if full_x:
340
- # Draw top and bottom halves
341
- parts = (
342
- ((x0, y0, x0 + d, y0 + d), 180, 360),
343
- ((x0, y1 - d, x0 + d, y1), 0, 180),
344
- )
345
- elif full_y:
346
- # Draw left and right halves
347
- parts = (
348
- ((x0, y0, x0 + d, y0 + d), 90, 270),
349
- ((x1 - d, y0, x1, y0 + d), 270, 90),
350
- )
351
- else:
352
- # Draw four separate corners
353
- parts = []
354
- for i, part in enumerate(
355
- (
356
- ((x0, y0, x0 + d, y0 + d), 180, 270),
357
- ((x1 - d, y0, x1, y0 + d), 270, 360),
358
- ((x1 - d, y1 - d, x1, y1), 0, 90),
359
- ((x0, y1 - d, x0 + d, y1), 90, 180),
360
- )
361
- ):
362
- if corners[i]:
363
- parts.append(part)
364
- for part in parts:
365
- if pieslice:
366
- self.draw.draw_pieslice(*(part + (fill, 1)))
367
- else:
368
- self.draw.draw_arc(*(part + (ink, width)))
369
-
370
- if fill is not None:
371
- draw_corners(True)
372
-
373
- if full_x:
374
- self.draw.draw_rectangle((x0, y0 + r + 1, x1, y1 - r - 1), fill, 1)
375
- else:
376
- self.draw.draw_rectangle((x0 + r + 1, y0, x1 - r - 1, y1), fill, 1)
377
- if not full_x and not full_y:
378
- left = [x0, y0, x0 + r, y1]
379
- if corners[0]:
380
- left[1] += r + 1
381
- if corners[3]:
382
- left[3] -= r + 1
383
- self.draw.draw_rectangle(left, fill, 1)
384
-
385
- right = [x1 - r, y0, x1, y1]
386
- if corners[1]:
387
- right[1] += r + 1
388
- if corners[2]:
389
- right[3] -= r + 1
390
- self.draw.draw_rectangle(right, fill, 1)
391
- if ink is not None and ink != fill and width != 0:
392
- draw_corners(False)
393
-
394
- if not full_x:
395
- top = [x0, y0, x1, y0 + width - 1]
396
- if corners[0]:
397
- top[0] += r + 1
398
- if corners[1]:
399
- top[2] -= r + 1
400
- self.draw.draw_rectangle(top, ink, 1)
401
-
402
- bottom = [x0, y1 - width + 1, x1, y1]
403
- if corners[3]:
404
- bottom[0] += r + 1
405
- if corners[2]:
406
- bottom[2] -= r + 1
407
- self.draw.draw_rectangle(bottom, ink, 1)
408
- if not full_y:
409
- left = [x0, y0, x0 + width - 1, y1]
410
- if corners[0]:
411
- left[1] += r + 1
412
- if corners[3]:
413
- left[3] -= r + 1
414
- self.draw.draw_rectangle(left, ink, 1)
415
-
416
- right = [x1 - width + 1, y0, x1, y1]
417
- if corners[1]:
418
- right[1] += r + 1
419
- if corners[2]:
420
- right[3] -= r + 1
421
- self.draw.draw_rectangle(right, ink, 1)
422
-
423
- def _multiline_check(self, text):
424
- split_character = "\n" if isinstance(text, str) else b"\n"
425
-
426
- return split_character in text
427
-
428
- def _multiline_split(self, text):
429
- split_character = "\n" if isinstance(text, str) else b"\n"
430
-
431
- return text.split(split_character)
432
-
433
- def _multiline_spacing(self, font, spacing, stroke_width):
434
- return (
435
- self.textbbox((0, 0), "A", font, stroke_width=stroke_width)[3]
436
- + stroke_width
437
- + spacing
438
- )
439
-
440
- def text(
441
- self,
442
- xy,
443
- text,
444
- fill=None,
445
- font=None,
446
- anchor=None,
447
- spacing=4,
448
- align="left",
449
- direction=None,
450
- features=None,
451
- language=None,
452
- stroke_width=0,
453
- stroke_fill=None,
454
- embedded_color=False,
455
- *args,
456
- **kwargs,
457
- ):
458
- """Draw text."""
459
- if self._multiline_check(text):
460
- return self.multiline_text(
461
- xy,
462
- text,
463
- fill,
464
- font,
465
- anchor,
466
- spacing,
467
- align,
468
- direction,
469
- features,
470
- language,
471
- stroke_width,
472
- stroke_fill,
473
- embedded_color,
474
- )
475
-
476
- if embedded_color and self.mode not in ("RGB", "RGBA"):
477
- msg = "Embedded color supported only in RGB and RGBA modes"
478
- raise ValueError(msg)
479
-
480
- if font is None:
481
- font = self.getfont()
482
-
483
- def getink(fill):
484
- ink, fill = self._getink(fill)
485
- if ink is None:
486
- return fill
487
- return ink
488
-
489
- def draw_text(ink, stroke_width=0, stroke_offset=None):
490
- mode = self.fontmode
491
- if stroke_width == 0 and embedded_color:
492
- mode = "RGBA"
493
- coord = []
494
- start = []
495
- for i in range(2):
496
- coord.append(int(xy[i]))
497
- start.append(math.modf(xy[i])[0])
498
- try:
499
- mask, offset = font.getmask2(
500
- text,
501
- mode,
502
- direction=direction,
503
- features=features,
504
- language=language,
505
- stroke_width=stroke_width,
506
- anchor=anchor,
507
- ink=ink,
508
- start=start,
509
- *args,
510
- **kwargs,
511
- )
512
- coord = coord[0] + offset[0], coord[1] + offset[1]
513
- except AttributeError:
514
- try:
515
- mask = font.getmask(
516
- text,
517
- mode,
518
- direction,
519
- features,
520
- language,
521
- stroke_width,
522
- anchor,
523
- ink,
524
- start=start,
525
- *args,
526
- **kwargs,
527
- )
528
- except TypeError:
529
- mask = font.getmask(text)
530
- if stroke_offset:
531
- coord = coord[0] + stroke_offset[0], coord[1] + stroke_offset[1]
532
- if mode == "RGBA":
533
- # font.getmask2(mode="RGBA") returns color in RGB bands and mask in A
534
- # extract mask and set text alpha
535
- color, mask = mask, mask.getband(3)
536
- color.fillband(3, (ink >> 24) & 0xFF)
537
- x, y = coord
538
- self.im.paste(color, (x, y, x + mask.size[0], y + mask.size[1]), mask)
539
- else:
540
- self.draw.draw_bitmap(coord, mask, ink)
541
-
542
- ink = getink(fill)
543
- if ink is not None:
544
- stroke_ink = None
545
- if stroke_width:
546
- stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink
547
-
548
- if stroke_ink is not None:
549
- # Draw stroked text
550
- draw_text(stroke_ink, stroke_width)
551
-
552
- # Draw normal text
553
- draw_text(ink, 0)
554
- else:
555
- # Only draw normal text
556
- draw_text(ink)
557
-
558
- def multiline_text(
559
- self,
560
- xy,
561
- text,
562
- fill=None,
563
- font=None,
564
- anchor=None,
565
- spacing=4,
566
- align="left",
567
- direction=None,
568
- features=None,
569
- language=None,
570
- stroke_width=0,
571
- stroke_fill=None,
572
- embedded_color=False,
573
- ):
574
- if direction == "ttb":
575
- msg = "ttb direction is unsupported for multiline text"
576
- raise ValueError(msg)
577
-
578
- if anchor is None:
579
- anchor = "la"
580
- elif len(anchor) != 2:
581
- msg = "anchor must be a 2 character string"
582
- raise ValueError(msg)
583
- elif anchor[1] in "tb":
584
- msg = "anchor not supported for multiline text"
585
- raise ValueError(msg)
586
-
587
- widths = []
588
- max_width = 0
589
- lines = self._multiline_split(text)
590
- line_spacing = self._multiline_spacing(font, spacing, stroke_width)
591
- for line in lines:
592
- line_width = self.textlength(
593
- line, font, direction=direction, features=features, language=language
594
- )
595
- widths.append(line_width)
596
- max_width = max(max_width, line_width)
597
-
598
- top = xy[1]
599
- if anchor[1] == "m":
600
- top -= (len(lines) - 1) * line_spacing / 2.0
601
- elif anchor[1] == "d":
602
- top -= (len(lines) - 1) * line_spacing
603
-
604
- for idx, line in enumerate(lines):
605
- left = xy[0]
606
- width_difference = max_width - widths[idx]
607
-
608
- # first align left by anchor
609
- if anchor[0] == "m":
610
- left -= width_difference / 2.0
611
- elif anchor[0] == "r":
612
- left -= width_difference
613
-
614
- # then align by align parameter
615
- if align == "left":
616
- pass
617
- elif align == "center":
618
- left += width_difference / 2.0
619
- elif align == "right":
620
- left += width_difference
621
- else:
622
- msg = 'align must be "left", "center" or "right"'
623
- raise ValueError(msg)
624
-
625
- self.text(
626
- (left, top),
627
- line,
628
- fill,
629
- font,
630
- anchor,
631
- direction=direction,
632
- features=features,
633
- language=language,
634
- stroke_width=stroke_width,
635
- stroke_fill=stroke_fill,
636
- embedded_color=embedded_color,
637
- )
638
- top += line_spacing
639
-
640
- def textlength(
641
- self,
642
- text,
643
- font=None,
644
- direction=None,
645
- features=None,
646
- language=None,
647
- embedded_color=False,
648
- ):
649
- """Get the length of a given string, in pixels with 1/64 precision."""
650
- if self._multiline_check(text):
651
- msg = "can't measure length of multiline text"
652
- raise ValueError(msg)
653
- if embedded_color and self.mode not in ("RGB", "RGBA"):
654
- msg = "Embedded color supported only in RGB and RGBA modes"
655
- raise ValueError(msg)
656
-
657
- if font is None:
658
- font = self.getfont()
659
- mode = "RGBA" if embedded_color else self.fontmode
660
- return font.getlength(text, mode, direction, features, language)
661
-
662
- def textbbox(
663
- self,
664
- xy,
665
- text,
666
- font=None,
667
- anchor=None,
668
- spacing=4,
669
- align="left",
670
- direction=None,
671
- features=None,
672
- language=None,
673
- stroke_width=0,
674
- embedded_color=False,
675
- ):
676
- """Get the bounding box of a given string, in pixels."""
677
- if embedded_color and self.mode not in ("RGB", "RGBA"):
678
- msg = "Embedded color supported only in RGB and RGBA modes"
679
- raise ValueError(msg)
680
-
681
- if self._multiline_check(text):
682
- return self.multiline_textbbox(
683
- xy,
684
- text,
685
- font,
686
- anchor,
687
- spacing,
688
- align,
689
- direction,
690
- features,
691
- language,
692
- stroke_width,
693
- embedded_color,
694
- )
695
-
696
- if font is None:
697
- font = self.getfont()
698
- mode = "RGBA" if embedded_color else self.fontmode
699
- bbox = font.getbbox(
700
- text, mode, direction, features, language, stroke_width, anchor
701
- )
702
- return bbox[0] + xy[0], bbox[1] + xy[1], bbox[2] + xy[0], bbox[3] + xy[1]
703
-
704
- def multiline_textbbox(
705
- self,
706
- xy,
707
- text,
708
- font=None,
709
- anchor=None,
710
- spacing=4,
711
- align="left",
712
- direction=None,
713
- features=None,
714
- language=None,
715
- stroke_width=0,
716
- embedded_color=False,
717
- ):
718
- if direction == "ttb":
719
- msg = "ttb direction is unsupported for multiline text"
720
- raise ValueError(msg)
721
-
722
- if anchor is None:
723
- anchor = "la"
724
- elif len(anchor) != 2:
725
- msg = "anchor must be a 2 character string"
726
- raise ValueError(msg)
727
- elif anchor[1] in "tb":
728
- msg = "anchor not supported for multiline text"
729
- raise ValueError(msg)
730
-
731
- widths = []
732
- max_width = 0
733
- lines = self._multiline_split(text)
734
- line_spacing = self._multiline_spacing(font, spacing, stroke_width)
735
- for line in lines:
736
- line_width = self.textlength(
737
- line,
738
- font,
739
- direction=direction,
740
- features=features,
741
- language=language,
742
- embedded_color=embedded_color,
743
- )
744
- widths.append(line_width)
745
- max_width = max(max_width, line_width)
746
-
747
- top = xy[1]
748
- if anchor[1] == "m":
749
- top -= (len(lines) - 1) * line_spacing / 2.0
750
- elif anchor[1] == "d":
751
- top -= (len(lines) - 1) * line_spacing
752
-
753
- bbox = None
754
-
755
- for idx, line in enumerate(lines):
756
- left = xy[0]
757
- width_difference = max_width - widths[idx]
758
-
759
- # first align left by anchor
760
- if anchor[0] == "m":
761
- left -= width_difference / 2.0
762
- elif anchor[0] == "r":
763
- left -= width_difference
764
-
765
- # then align by align parameter
766
- if align == "left":
767
- pass
768
- elif align == "center":
769
- left += width_difference / 2.0
770
- elif align == "right":
771
- left += width_difference
772
- else:
773
- msg = 'align must be "left", "center" or "right"'
774
- raise ValueError(msg)
775
-
776
- bbox_line = self.textbbox(
777
- (left, top),
778
- line,
779
- font,
780
- anchor,
781
- direction=direction,
782
- features=features,
783
- language=language,
784
- stroke_width=stroke_width,
785
- embedded_color=embedded_color,
786
- )
787
- if bbox is None:
788
- bbox = bbox_line
789
- else:
790
- bbox = (
791
- min(bbox[0], bbox_line[0]),
792
- min(bbox[1], bbox_line[1]),
793
- max(bbox[2], bbox_line[2]),
794
- max(bbox[3], bbox_line[3]),
795
- )
796
-
797
- top += line_spacing
798
-
799
- if bbox is None:
800
- return xy[0], xy[1], xy[0], xy[1]
801
- return bbox
802
-
803
-
804
- def Draw(im, mode=None):
805
- """
806
- A simple 2D drawing interface for PIL images.
807
-
808
- :param im: The image to draw in.
809
- :param mode: Optional mode to use for color values. For RGB
810
- images, this argument can be RGB or RGBA (to blend the
811
- drawing into the image). For all other modes, this argument
812
- must be the same as the image mode. If omitted, the mode
813
- defaults to the mode of the image.
814
- """
815
- try:
816
- return im.getdraw(mode)
817
- except AttributeError:
818
- return ImageDraw(im, mode)
819
-
820
-
821
- # experimental access to the outline API
822
- try:
823
- Outline = Image.core.outline
824
- except AttributeError:
825
- Outline = None
826
-
827
-
828
- def getdraw(im=None, hints=None):
829
- """
830
- (Experimental) A more advanced 2D drawing interface for PIL images,
831
- based on the WCK interface.
832
-
833
- :param im: The image to draw in.
834
- :param hints: An optional list of hints.
835
- :returns: A (drawing context, drawing resource factory) tuple.
836
- """
837
- # FIXME: this needs more work!
838
- # FIXME: come up with a better 'hints' scheme.
839
- handler = None
840
- if not hints or "nicest" in hints:
841
- try:
842
- from . import _imagingagg as handler
843
- except ImportError:
844
- pass
845
- if handler is None:
846
- from . import ImageDraw2 as handler
847
- if im:
848
- im = handler.Draw(im)
849
- return im, handler
850
-
851
-
852
- def floodfill(image, xy, value, border=None, thresh=0):
853
- """
854
- (experimental) Fills a bounded region with a given color.
855
-
856
- :param image: Target image.
857
- :param xy: Seed position (a 2-item coordinate tuple). See
858
- :ref:`coordinate-system`.
859
- :param value: Fill color.
860
- :param border: Optional border value. If given, the region consists of
861
- pixels with a color different from the border color. If not given,
862
- the region consists of pixels having the same color as the seed
863
- pixel.
864
- :param thresh: Optional threshold value which specifies a maximum
865
- tolerable difference of a pixel value from the 'background' in
866
- order for it to be replaced. Useful for filling regions of
867
- non-homogeneous, but similar, colors.
868
- """
869
- # based on an implementation by Eric S. Raymond
870
- # amended by yo1995 @20180806
871
- pixel = image.load()
872
- x, y = xy
873
- try:
874
- background = pixel[x, y]
875
- if _color_diff(value, background) <= thresh:
876
- return # seed point already has fill color
877
- pixel[x, y] = value
878
- except (ValueError, IndexError):
879
- return # seed point outside image
880
- edge = {(x, y)}
881
- # use a set to keep record of current and previous edge pixels
882
- # to reduce memory consumption
883
- full_edge = set()
884
- while edge:
885
- new_edge = set()
886
- for x, y in edge: # 4 adjacent method
887
- for s, t in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)):
888
- # If already processed, or if a coordinate is negative, skip
889
- if (s, t) in full_edge or s < 0 or t < 0:
890
- continue
891
- try:
892
- p = pixel[s, t]
893
- except (ValueError, IndexError):
894
- pass
895
- else:
896
- full_edge.add((s, t))
897
- if border is None:
898
- fill = _color_diff(p, background) <= thresh
899
- else:
900
- fill = p != value and p != border
901
- if fill:
902
- pixel[s, t] = value
903
- new_edge.add((s, t))
904
- full_edge = edge # discard pixels processed
905
- edge = new_edge
906
-
907
-
908
- def _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation):
909
- """
910
- Generate a list of vertices for a 2D regular polygon.
911
-
912
- :param bounding_circle: The bounding circle is a tuple defined
913
- by a point and radius. The polygon is inscribed in this circle.
914
- (e.g. ``bounding_circle=(x, y, r)`` or ``((x, y), r)``)
915
- :param n_sides: Number of sides
916
- (e.g. ``n_sides=3`` for a triangle, ``6`` for a hexagon)
917
- :param rotation: Apply an arbitrary rotation to the polygon
918
- (e.g. ``rotation=90``, applies a 90 degree rotation)
919
- :return: List of regular polygon vertices
920
- (e.g. ``[(25, 50), (50, 50), (50, 25), (25, 25)]``)
921
-
922
- How are the vertices computed?
923
- 1. Compute the following variables
924
- - theta: Angle between the apothem & the nearest polygon vertex
925
- - side_length: Length of each polygon edge
926
- - centroid: Center of bounding circle (1st, 2nd elements of bounding_circle)
927
- - polygon_radius: Polygon radius (last element of bounding_circle)
928
- - angles: Location of each polygon vertex in polar grid
929
- (e.g. A square with 0 degree rotation => [225.0, 315.0, 45.0, 135.0])
930
-
931
- 2. For each angle in angles, get the polygon vertex at that angle
932
- The vertex is computed using the equation below.
933
- X= xcos(φ) + ysin(φ)
934
- Y= −xsin(φ) + ycos(φ)
935
-
936
- Note:
937
- φ = angle in degrees
938
- x = 0
939
- y = polygon_radius
940
-
941
- The formula above assumes rotation around the origin.
942
- In our case, we are rotating around the centroid.
943
- To account for this, we use the formula below
944
- X = xcos(φ) + ysin(φ) + centroid_x
945
- Y = −xsin(φ) + ycos(φ) + centroid_y
946
- """
947
- # 1. Error Handling
948
- # 1.1 Check `n_sides` has an appropriate value
949
- if not isinstance(n_sides, int):
950
- msg = "n_sides should be an int"
951
- raise TypeError(msg)
952
- if n_sides < 3:
953
- msg = "n_sides should be an int > 2"
954
- raise ValueError(msg)
955
-
956
- # 1.2 Check `bounding_circle` has an appropriate value
957
- if not isinstance(bounding_circle, (list, tuple)):
958
- msg = "bounding_circle should be a tuple"
959
- raise TypeError(msg)
960
-
961
- if len(bounding_circle) == 3:
962
- *centroid, polygon_radius = bounding_circle
963
- elif len(bounding_circle) == 2:
964
- centroid, polygon_radius = bounding_circle
965
- else:
966
- msg = (
967
- "bounding_circle should contain 2D coordinates "
968
- "and a radius (e.g. (x, y, r) or ((x, y), r) )"
969
- )
970
- raise ValueError(msg)
971
-
972
- if not all(isinstance(i, (int, float)) for i in (*centroid, polygon_radius)):
973
- msg = "bounding_circle should only contain numeric data"
974
- raise ValueError(msg)
975
-
976
- if not len(centroid) == 2:
977
- msg = "bounding_circle centre should contain 2D coordinates (e.g. (x, y))"
978
- raise ValueError(msg)
979
-
980
- if polygon_radius <= 0:
981
- msg = "bounding_circle radius should be > 0"
982
- raise ValueError(msg)
983
-
984
- # 1.3 Check `rotation` has an appropriate value
985
- if not isinstance(rotation, (int, float)):
986
- msg = "rotation should be an int or float"
987
- raise ValueError(msg)
988
-
989
- # 2. Define Helper Functions
990
- def _apply_rotation(point, degrees, centroid):
991
- return (
992
- round(
993
- point[0] * math.cos(math.radians(360 - degrees))
994
- - point[1] * math.sin(math.radians(360 - degrees))
995
- + centroid[0],
996
- 2,
997
- ),
998
- round(
999
- point[1] * math.cos(math.radians(360 - degrees))
1000
- + point[0] * math.sin(math.radians(360 - degrees))
1001
- + centroid[1],
1002
- 2,
1003
- ),
1004
- )
1005
-
1006
- def _compute_polygon_vertex(centroid, polygon_radius, angle):
1007
- start_point = [polygon_radius, 0]
1008
- return _apply_rotation(start_point, angle, centroid)
1009
-
1010
- def _get_angles(n_sides, rotation):
1011
- angles = []
1012
- degrees = 360 / n_sides
1013
- # Start with the bottom left polygon vertex
1014
- current_angle = (270 - 0.5 * degrees) + rotation
1015
- for _ in range(0, n_sides):
1016
- angles.append(current_angle)
1017
- current_angle += degrees
1018
- if current_angle > 360:
1019
- current_angle -= 360
1020
- return angles
1021
-
1022
- # 3. Variable Declarations
1023
- angles = _get_angles(n_sides, rotation)
1024
-
1025
- # 4. Compute Vertices
1026
- return [
1027
- _compute_polygon_vertex(centroid, polygon_radius, angle) for angle in angles
1028
- ]
1029
-
1030
-
1031
- def _color_diff(color1, color2):
1032
- """
1033
- Uses 1-norm distance to calculate difference between two values.
1034
- """
1035
- if isinstance(color2, tuple):
1036
- return sum(abs(color1[i] - color2[i]) for i in range(0, len(color2)))
1037
- else:
1038
- return abs(color1 - color2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/openapi/docs.py DELETED
@@ -1,203 +0,0 @@
1
- import json
2
- from typing import Any, Dict, Optional
3
-
4
- from fastapi.encoders import jsonable_encoder
5
- from starlette.responses import HTMLResponse
6
-
7
- swagger_ui_default_parameters = {
8
- "dom_id": "#swagger-ui",
9
- "layout": "BaseLayout",
10
- "deepLinking": True,
11
- "showExtensions": True,
12
- "showCommonExtensions": True,
13
- }
14
-
15
-
16
- def get_swagger_ui_html(
17
- *,
18
- openapi_url: str,
19
- title: str,
20
- swagger_js_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui-bundle.js",
21
- swagger_css_url: str = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5/swagger-ui.css",
22
- swagger_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png",
23
- oauth2_redirect_url: Optional[str] = None,
24
- init_oauth: Optional[Dict[str, Any]] = None,
25
- swagger_ui_parameters: Optional[Dict[str, Any]] = None,
26
- ) -> HTMLResponse:
27
- current_swagger_ui_parameters = swagger_ui_default_parameters.copy()
28
- if swagger_ui_parameters:
29
- current_swagger_ui_parameters.update(swagger_ui_parameters)
30
-
31
- html = f"""
32
- <!DOCTYPE html>
33
- <html>
34
- <head>
35
- <link type="text/css" rel="stylesheet" href="{swagger_css_url}">
36
- <link rel="shortcut icon" href="{swagger_favicon_url}">
37
- <title>{title}</title>
38
- </head>
39
- <body>
40
- <div id="swagger-ui">
41
- </div>
42
- <script src="{swagger_js_url}"></script>
43
- <!-- `SwaggerUIBundle` is now available on the page -->
44
- <script>
45
- const ui = SwaggerUIBundle({{
46
- url: '{openapi_url}',
47
- """
48
-
49
- for key, value in current_swagger_ui_parameters.items():
50
- html += f"{json.dumps(key)}: {json.dumps(jsonable_encoder(value))},\n"
51
-
52
- if oauth2_redirect_url:
53
- html += f"oauth2RedirectUrl: window.location.origin + '{oauth2_redirect_url}',"
54
-
55
- html += """
56
- presets: [
57
- SwaggerUIBundle.presets.apis,
58
- SwaggerUIBundle.SwaggerUIStandalonePreset
59
- ],
60
- })"""
61
-
62
- if init_oauth:
63
- html += f"""
64
- ui.initOAuth({json.dumps(jsonable_encoder(init_oauth))})
65
- """
66
-
67
- html += """
68
- </script>
69
- </body>
70
- </html>
71
- """
72
- return HTMLResponse(html)
73
-
74
-
75
- def get_redoc_html(
76
- *,
77
- openapi_url: str,
78
- title: str,
79
- redoc_js_url: str = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js",
80
- redoc_favicon_url: str = "https://fastapi.tiangolo.com/img/favicon.png",
81
- with_google_fonts: bool = True,
82
- ) -> HTMLResponse:
83
- html = f"""
84
- <!DOCTYPE html>
85
- <html>
86
- <head>
87
- <title>{title}</title>
88
- <!-- needed for adaptive design -->
89
- <meta charset="utf-8"/>
90
- <meta name="viewport" content="width=device-width, initial-scale=1">
91
- """
92
- if with_google_fonts:
93
- html += """
94
- <link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
95
- """
96
- html += f"""
97
- <link rel="shortcut icon" href="{redoc_favicon_url}">
98
- <!--
99
- ReDoc doesn't change outer page styles
100
- -->
101
- <style>
102
- body {{
103
- margin: 0;
104
- padding: 0;
105
- }}
106
- </style>
107
- </head>
108
- <body>
109
- <noscript>
110
- ReDoc requires Javascript to function. Please enable it to browse the documentation.
111
- </noscript>
112
- <redoc spec-url="{openapi_url}"></redoc>
113
- <script src="{redoc_js_url}"> </script>
114
- </body>
115
- </html>
116
- """
117
- return HTMLResponse(html)
118
-
119
-
120
- def get_swagger_ui_oauth2_redirect_html() -> HTMLResponse:
121
- # copied from https://github.com/swagger-api/swagger-ui/blob/v4.14.0/dist/oauth2-redirect.html
122
- html = """
123
- <!doctype html>
124
- <html lang="en-US">
125
- <head>
126
- <title>Swagger UI: OAuth2 Redirect</title>
127
- </head>
128
- <body>
129
- <script>
130
- 'use strict';
131
- function run () {
132
- var oauth2 = window.opener.swaggerUIRedirectOauth2;
133
- var sentState = oauth2.state;
134
- var redirectUrl = oauth2.redirectUrl;
135
- var isValid, qp, arr;
136
-
137
- if (/code|token|error/.test(window.location.hash)) {
138
- qp = window.location.hash.substring(1).replace('?', '&');
139
- } else {
140
- qp = location.search.substring(1);
141
- }
142
-
143
- arr = qp.split("&");
144
- arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
145
- qp = qp ? JSON.parse('{' + arr.join() + '}',
146
- function (key, value) {
147
- return key === "" ? value : decodeURIComponent(value);
148
- }
149
- ) : {};
150
-
151
- isValid = qp.state === sentState;
152
-
153
- if ((
154
- oauth2.auth.schema.get("flow") === "accessCode" ||
155
- oauth2.auth.schema.get("flow") === "authorizationCode" ||
156
- oauth2.auth.schema.get("flow") === "authorization_code"
157
- ) && !oauth2.auth.code) {
158
- if (!isValid) {
159
- oauth2.errCb({
160
- authId: oauth2.auth.name,
161
- source: "auth",
162
- level: "warning",
163
- message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
164
- });
165
- }
166
-
167
- if (qp.code) {
168
- delete oauth2.state;
169
- oauth2.auth.code = qp.code;
170
- oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
171
- } else {
172
- let oauthErrorMsg;
173
- if (qp.error) {
174
- oauthErrorMsg = "["+qp.error+"]: " +
175
- (qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
176
- (qp.error_uri ? "More info: "+qp.error_uri : "");
177
- }
178
-
179
- oauth2.errCb({
180
- authId: oauth2.auth.name,
181
- source: "auth",
182
- level: "error",
183
- message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
184
- });
185
- }
186
- } else {
187
- oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
188
- }
189
- window.close();
190
- }
191
-
192
- if (document.readyState !== 'loading') {
193
- run();
194
- } else {
195
- document.addEventListener('DOMContentLoaded', function () {
196
- run();
197
- });
198
- }
199
- </script>
200
- </body>
201
- </html>
202
- """
203
- return HTMLResponse(content=html)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/designspaceLib/statNames.py DELETED
@@ -1,252 +0,0 @@
1
- """Compute name information for a given location in user-space coordinates
2
- using STAT data. This can be used to fill-in automatically the names of an
3
- instance:
4
-
5
- .. code:: python
6
-
7
- instance = doc.instances[0]
8
- names = getStatNames(doc, instance.getFullUserLocation(doc))
9
- print(names.styleNames)
10
- """
11
- from __future__ import annotations
12
-
13
- from dataclasses import dataclass
14
- from typing import Dict, Optional, Tuple, Union
15
- import logging
16
-
17
- from fontTools.designspaceLib import (
18
- AxisDescriptor,
19
- AxisLabelDescriptor,
20
- DesignSpaceDocument,
21
- DesignSpaceDocumentError,
22
- DiscreteAxisDescriptor,
23
- SimpleLocationDict,
24
- SourceDescriptor,
25
- )
26
-
27
- LOGGER = logging.getLogger(__name__)
28
-
29
- # TODO(Python 3.8): use Literal
30
- # RibbiStyleName = Union[Literal["regular"], Literal["bold"], Literal["italic"], Literal["bold italic"]]
31
- RibbiStyle = str
32
- BOLD_ITALIC_TO_RIBBI_STYLE = {
33
- (False, False): "regular",
34
- (False, True): "italic",
35
- (True, False): "bold",
36
- (True, True): "bold italic",
37
- }
38
-
39
-
40
- @dataclass
41
- class StatNames:
42
- """Name data generated from the STAT table information."""
43
-
44
- familyNames: Dict[str, str]
45
- styleNames: Dict[str, str]
46
- postScriptFontName: Optional[str]
47
- styleMapFamilyNames: Dict[str, str]
48
- styleMapStyleName: Optional[RibbiStyle]
49
-
50
-
51
- def getStatNames(
52
- doc: DesignSpaceDocument, userLocation: SimpleLocationDict
53
- ) -> StatNames:
54
- """Compute the family, style, PostScript names of the given ``userLocation``
55
- using the document's STAT information.
56
-
57
- Also computes localizations.
58
-
59
- If not enough STAT data is available for a given name, either its dict of
60
- localized names will be empty (family and style names), or the name will be
61
- None (PostScript name).
62
-
63
- .. versionadded:: 5.0
64
- """
65
- familyNames: Dict[str, str] = {}
66
- defaultSource: Optional[SourceDescriptor] = doc.findDefault()
67
- if defaultSource is None:
68
- LOGGER.warning("Cannot determine default source to look up family name.")
69
- elif defaultSource.familyName is None:
70
- LOGGER.warning(
71
- "Cannot look up family name, assign the 'familyname' attribute to the default source."
72
- )
73
- else:
74
- familyNames = {
75
- "en": defaultSource.familyName,
76
- **defaultSource.localisedFamilyName,
77
- }
78
-
79
- styleNames: Dict[str, str] = {}
80
- # If a free-standing label matches the location, use it for name generation.
81
- label = doc.labelForUserLocation(userLocation)
82
- if label is not None:
83
- styleNames = {"en": label.name, **label.labelNames}
84
- # Otherwise, scour the axis labels for matches.
85
- else:
86
- # Gather all languages in which at least one translation is provided
87
- # Then build names for all these languages, but fallback to English
88
- # whenever a translation is missing.
89
- labels = _getAxisLabelsForUserLocation(doc.axes, userLocation)
90
- if labels:
91
- languages = set(
92
- language for label in labels for language in label.labelNames
93
- )
94
- languages.add("en")
95
- for language in languages:
96
- styleName = " ".join(
97
- label.labelNames.get(language, label.defaultName)
98
- for label in labels
99
- if not label.elidable
100
- )
101
- if not styleName and doc.elidedFallbackName is not None:
102
- styleName = doc.elidedFallbackName
103
- styleNames[language] = styleName
104
-
105
- if "en" not in familyNames or "en" not in styleNames:
106
- # Not enough information to compute PS names of styleMap names
107
- return StatNames(
108
- familyNames=familyNames,
109
- styleNames=styleNames,
110
- postScriptFontName=None,
111
- styleMapFamilyNames={},
112
- styleMapStyleName=None,
113
- )
114
-
115
- postScriptFontName = f"{familyNames['en']}-{styleNames['en']}".replace(" ", "")
116
-
117
- styleMapStyleName, regularUserLocation = _getRibbiStyle(doc, userLocation)
118
-
119
- styleNamesForStyleMap = styleNames
120
- if regularUserLocation != userLocation:
121
- regularStatNames = getStatNames(doc, regularUserLocation)
122
- styleNamesForStyleMap = regularStatNames.styleNames
123
-
124
- styleMapFamilyNames = {}
125
- for language in set(familyNames).union(styleNames.keys()):
126
- familyName = familyNames.get(language, familyNames["en"])
127
- styleName = styleNamesForStyleMap.get(language, styleNamesForStyleMap["en"])
128
- styleMapFamilyNames[language] = (familyName + " " + styleName).strip()
129
-
130
- return StatNames(
131
- familyNames=familyNames,
132
- styleNames=styleNames,
133
- postScriptFontName=postScriptFontName,
134
- styleMapFamilyNames=styleMapFamilyNames,
135
- styleMapStyleName=styleMapStyleName,
136
- )
137
-
138
-
139
- def _getSortedAxisLabels(
140
- axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
141
- ) -> Dict[str, list[AxisLabelDescriptor]]:
142
- """Returns axis labels sorted by their ordering, with unordered ones appended as
143
- they are listed."""
144
-
145
- # First, get the axis labels with explicit ordering...
146
- sortedAxes = sorted(
147
- (axis for axis in axes if axis.axisOrdering is not None),
148
- key=lambda a: a.axisOrdering,
149
- )
150
- sortedLabels: Dict[str, list[AxisLabelDescriptor]] = {
151
- axis.name: axis.axisLabels for axis in sortedAxes
152
- }
153
-
154
- # ... then append the others in the order they appear.
155
- # NOTE: This relies on Python 3.7+ dict's preserved insertion order.
156
- for axis in axes:
157
- if axis.axisOrdering is None:
158
- sortedLabels[axis.name] = axis.axisLabels
159
-
160
- return sortedLabels
161
-
162
-
163
- def _getAxisLabelsForUserLocation(
164
- axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
165
- userLocation: SimpleLocationDict,
166
- ) -> list[AxisLabelDescriptor]:
167
- labels: list[AxisLabelDescriptor] = []
168
-
169
- allAxisLabels = _getSortedAxisLabels(axes)
170
- if allAxisLabels.keys() != userLocation.keys():
171
- LOGGER.warning(
172
- f"Mismatch between user location '{userLocation.keys()}' and available "
173
- f"labels for '{allAxisLabels.keys()}'."
174
- )
175
-
176
- for axisName, axisLabels in allAxisLabels.items():
177
- userValue = userLocation[axisName]
178
- label: Optional[AxisLabelDescriptor] = next(
179
- (
180
- l
181
- for l in axisLabels
182
- if l.userValue == userValue
183
- or (
184
- l.userMinimum is not None
185
- and l.userMaximum is not None
186
- and l.userMinimum <= userValue <= l.userMaximum
187
- )
188
- ),
189
- None,
190
- )
191
- if label is None:
192
- LOGGER.debug(
193
- f"Document needs a label for axis '{axisName}', user value '{userValue}'."
194
- )
195
- else:
196
- labels.append(label)
197
-
198
- return labels
199
-
200
-
201
- def _getRibbiStyle(
202
- self: DesignSpaceDocument, userLocation: SimpleLocationDict
203
- ) -> Tuple[RibbiStyle, SimpleLocationDict]:
204
- """Compute the RIBBI style name of the given user location,
205
- return the location of the matching Regular in the RIBBI group.
206
-
207
- .. versionadded:: 5.0
208
- """
209
- regularUserLocation = {}
210
- axes_by_tag = {axis.tag: axis for axis in self.axes}
211
-
212
- bold: bool = False
213
- italic: bool = False
214
-
215
- axis = axes_by_tag.get("wght")
216
- if axis is not None:
217
- for regular_label in axis.axisLabels:
218
- if (
219
- regular_label.linkedUserValue == userLocation[axis.name]
220
- # In the "recursive" case where both the Regular has
221
- # linkedUserValue pointing the Bold, and the Bold has
222
- # linkedUserValue pointing to the Regular, only consider the
223
- # first case: Regular (e.g. 400) has linkedUserValue pointing to
224
- # Bold (e.g. 700, higher than Regular)
225
- and regular_label.userValue < regular_label.linkedUserValue
226
- ):
227
- regularUserLocation[axis.name] = regular_label.userValue
228
- bold = True
229
- break
230
-
231
- axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt")
232
- if axis is not None:
233
- for upright_label in axis.axisLabels:
234
- if (
235
- upright_label.linkedUserValue == userLocation[axis.name]
236
- # In the "recursive" case where both the Upright has
237
- # linkedUserValue pointing the Italic, and the Italic has
238
- # linkedUserValue pointing to the Upright, only consider the
239
- # first case: Upright (e.g. ital=0, slant=0) has
240
- # linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or
241
- # slant=12 for backwards italics, in any case higher than
242
- # Upright in absolute value, hence the abs() below.
243
- and abs(upright_label.userValue) < abs(upright_label.linkedUserValue)
244
- ):
245
- regularUserLocation[axis.name] = upright_label.userValue
246
- italic = True
247
- break
248
-
249
- return BOLD_ITALIC_TO_RIBBI_STYLE[bold, italic], {
250
- **userLocation,
251
- **regularUserLocation,
252
- }