Commit
·
da557f7
1
Parent(s):
25d1f2b
Update parquet files (step 116 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0019c/NewBing/Dockerfile +0 -34
- spaces/0xHacked/zkProver/README.md +0 -11
- spaces/1368565466ki/Satdia/monotonic_align/core.py +0 -36
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contraband Police Offline Activation Keygen No Internet Required.md +0 -134
- spaces/1phancelerku/anime-remove-background/Alparslan Byk Seluklu Son Blm HD Kalite Seluklu Sultanlnn Ykselii.md +0 -103
- spaces/1toTree/lora_test/ppdiffusers/commands/ppdiffusers_cli.py +0 -41
- spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/__init__.py +0 -107
- spaces/ADOPLE/ResumeSummarizer/style.css +0 -26
- spaces/AIConsultant/MusicGen/audiocraft/optim/fsdp.py +0 -195
- spaces/AIWaves/Debate/gradio_backend.py +0 -139
- spaces/ASJMO/freegpt/client/css/conversation.css +0 -158
- spaces/Abhilashvj/planogram-compliance/utils/segment/dataloaders.py +0 -459
- spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/r/[id]/$types.d.ts +0 -23
- spaces/AgentVerse/agentVerse/agentverse/agents/tasksolving_agent/critic.py +0 -127
- spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/__init__.py +0 -8
- spaces/AgentVerse/agentVerse/setup.py +0 -50
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/Factory.js +0 -13
- spaces/Akshay-More-007/starcoder/app.py +0 -11
- spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/cleaners.py +0 -134
- spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/countless3d.py +0 -356
- spaces/AllAideas/SegmentacionVideo/utils/custom_layers.py +0 -67
- spaces/Allakhazam/Home/app.py +0 -48
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/pndm/__init__.py +0 -1
- spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py +0 -2
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/image.py +0 -152
- spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/filters.py +0 -120
- spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +0 -97
- spaces/ArnePan/German-LLM-leaderboard/app.py +0 -153
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py +0 -296
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/msgpack/ext.py +0 -193
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/train_net.py +0 -228
- spaces/Bart92/RVC_HF/app.py +0 -0
- spaces/Benson/text-generation/Examples/Aethersx2 Apk Version 6.0.md +0 -96
- spaces/Benson/text-generation/Examples/Descargar Fifa 4 En Lnea.md +0 -132
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/densepose.py +0 -581
- spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/par.h +0 -62
- spaces/CVPR/regionclip-demo/detectron2/modeling/postprocessing.py +0 -101
- spaces/Chomkwoy/Nilkessye/cpool_new/src/left_pool.cpp +0 -91
- spaces/CofAI/chat/g4f/Provider/Providers/Dfehub.py +0 -49
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/__init__.py +0 -0
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_routedef.py +0 -216
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_compat.py +0 -623
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9da94804.css +0 -1
- spaces/DYSHITELGOOGLA/app/README.md +0 -12
- spaces/DaleChen/AutoGPT/autogpt/speech/brian.py +0 -40
- spaces/DaweiZ/toy-gpt/app.py +0 -44
- spaces/Dorado607/ChuanhuChatGPT/modules/models/azure.py +0 -17
- spaces/ECCV2022/bytetrack/yolox/core/launch.py +0 -219
- spaces/Epitech/Scarecrow/original_app/README.md +0 -11
spaces/0019c/NewBing/Dockerfile
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
# Build Stage
|
2 |
-
# 使用 golang:alpine 作为构建阶段的基础镜像
|
3 |
-
FROM golang:alpine AS builder
|
4 |
-
|
5 |
-
# 添加 git,以便之后能从GitHub克隆项目
|
6 |
-
RUN apk --no-cache add git
|
7 |
-
|
8 |
-
# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
|
9 |
-
RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
|
10 |
-
|
11 |
-
# 设置工作目录为之前克隆的项目目录
|
12 |
-
WORKDIR /workspace/app
|
13 |
-
|
14 |
-
# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
|
15 |
-
RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
|
16 |
-
|
17 |
-
# Runtime Stage
|
18 |
-
# 使用轻量级的 alpine 镜像作为运行时的基础镜像
|
19 |
-
FROM alpine
|
20 |
-
|
21 |
-
# 设置工作目录
|
22 |
-
WORKDIR /workspace/app
|
23 |
-
|
24 |
-
# 从构建阶段复制编译后的二进制文件到运行时镜像中
|
25 |
-
COPY --from=builder /workspace/app/go-proxy-bingai .
|
26 |
-
|
27 |
-
# 设置环境变量,此处为随机字符
|
28 |
-
ENV Go_Proxy_BingAI_USER_TOKEN_1="1h_21qf8tNmRtDy5a4fZ05RFgkZeZ9akmnW9NtSo5s6aJilplld4X4Lj7BkJ3EQSNbu7tu-z_-OAHqeELJqlpF-bvOCMo5lWGjyCTcJcqIHnYiu_vlgrdDyo99wQHgsvNR5pKASGikeDgAVSN7CN6YM74n7glWgJ7hGpd33s9zcgdCea94XcsO5AmoPIoxA02O6zGkpTnIdc61W7D1WQUflqxgaSHCGWlrhw7aoPs-io"
|
29 |
-
|
30 |
-
# 暴露8080端口
|
31 |
-
EXPOSE 8080
|
32 |
-
|
33 |
-
# 容器启动时运行的命令
|
34 |
-
CMD ["/workspace/app/go-proxy-bingai"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/0xHacked/zkProver/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ZkProver
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: bsd
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1368565466ki/Satdia/monotonic_align/core.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import numba
|
2 |
-
|
3 |
-
|
4 |
-
@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]),
|
5 |
-
nopython=True, nogil=True)
|
6 |
-
def maximum_path_jit(paths, values, t_ys, t_xs):
|
7 |
-
b = paths.shape[0]
|
8 |
-
max_neg_val = -1e9
|
9 |
-
for i in range(int(b)):
|
10 |
-
path = paths[i]
|
11 |
-
value = values[i]
|
12 |
-
t_y = t_ys[i]
|
13 |
-
t_x = t_xs[i]
|
14 |
-
|
15 |
-
v_prev = v_cur = 0.0
|
16 |
-
index = t_x - 1
|
17 |
-
|
18 |
-
for y in range(t_y):
|
19 |
-
for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
|
20 |
-
if x == y:
|
21 |
-
v_cur = max_neg_val
|
22 |
-
else:
|
23 |
-
v_cur = value[y - 1, x]
|
24 |
-
if x == 0:
|
25 |
-
if y == 0:
|
26 |
-
v_prev = 0.
|
27 |
-
else:
|
28 |
-
v_prev = max_neg_val
|
29 |
-
else:
|
30 |
-
v_prev = value[y - 1, x - 1]
|
31 |
-
value[y, x] += max(v_prev, v_cur)
|
32 |
-
|
33 |
-
for y in range(t_y - 1, -1, -1):
|
34 |
-
path[y, index] = 1
|
35 |
-
if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
|
36 |
-
index = index - 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contraband Police Offline Activation Keygen No Internet Required.md
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br> - Confiscate contraband and arrest smugglers. <br> - Upgrade your station and equipment. <br> - Respond to emergencies and chase fleeing vehicles. | | H2: Why You Need an Offline Activation Keygen for Contraband Police | - The game is not free and requires a Steam account to play. <br> - An offline activation keygen can bypass the Steam verification and let you play the game without internet connection. <br> - An offline activation keygen can also save you money and avoid potential malware or viruses from downloading cracked versions of the game. | | H2: How to Get an Offline Activation Keygen for Contraband Police | - Find a reliable source that offers offline activation keygens for Contraband Police. <br> - Download the keygen file and run it on your computer. <br> - Follow the instructions on the screen and generate a unique activation code for the game. <br> - Enter the code in the game and enjoy playing Contraband Police offline. | | H2: Conclusion | - Summarize the main points of the article and encourage readers to try out Contraband Police with an offline activation keygen. | | H2: FAQs | - Answer some common questions about Contraband Police and offline activation keygens. | Article with HTML formatting: <h1>Contraband Police: A Thrilling Checkpoint Simulator Game</h1>
|
3 |
-
<p>If you are looking for a game that combines simulation, action, and strategy, then you might want to check out Contraband Police. This game takes you back to 1981 when smuggling is rampant in a communist country called Acaristan. You will play as a border guard inspector who has to inspect documents and packages of drivers who want to enter the country. You will also have to confiscate contraband, arrest smugglers, upgrade your station and equipment, respond to emergencies, and chase fleeing vehicles.</p>
|
4 |
-
<h2>Contraband Police offline activation keygen</h2><br /><p><b><b>Download File</b> ⚹ <a href="https://byltly.com/2uKA3W">https://byltly.com/2uKA3W</a></b></p><br /><br />
|
5 |
-
<p>Contraband Police is a game that will test your skills, judgment, and morality. You will have to deal with different types of smugglers who will try to deceive you with fake documents, hidden compartments, bribes, threats, or violence. You will also have to face the consequences of your actions, whether you choose to be honest, corrupt, or somewhere in between. You will also have to make decisions that will affect the future of Acaristan and its people.</p>
|
6 |
-
<p>Contraband Police is a game that will keep you on your toes and immerse you in a realistic and captivating world of 80s communism. You will experience the thrill of being a border guard inspector who has to balance between duty and survival.</p>
|
7 |
-
<h2>How to Play Contraband Police</h2>
|
8 |
-
<p>The gameplay of Contraband Police is divided into two phases: inspection and intervention.</p>
|
9 |
-
<h3>Inspection</h3>
|
10 |
-
<p>In this phase, you will have to inspect documents and packages of drivers who want to enter Acaristan. You will have access to various tools and equipment that will help you verify the validity of their papers and contents of their vehicles.</p>
|
11 |
-
<p>You will have to check for details such as name, photo, nationality, license plate, vehicle type, weight limit, cargo list, etc. You will also have to scan their packages for any contraband such as drugs, weapons, cash, or other illegal items.</p>
|
12 |
-
<p>If you find any discrepancies or violations, you will have to confiscate the contraband and issue a fine or an arrest warrant depending on the severity of the offense. You will also have to report your findings to your superiors and receive feedback on your performance.</p>
|
13 |
-
<p>How to get Contraband Police offline activation code for free<br />
|
14 |
-
Contraband Police crack download with offline keygen<br />
|
15 |
-
Contraband Police offline activation generator no survey<br />
|
16 |
-
Contraband Police offline activation keygen torrent<br />
|
17 |
-
Contraband Police offline activation serial number<br />
|
18 |
-
Contraband Police offline activation license key<br />
|
19 |
-
Contraband Police offline activation patch<br />
|
20 |
-
Contraband Police offline activation hack<br />
|
21 |
-
Contraband Police offline activation bypass<br />
|
22 |
-
Contraband Police offline activation unlocker<br />
|
23 |
-
Contraband Police offline activation mod<br />
|
24 |
-
Contraband Police offline activation cheat<br />
|
25 |
-
Contraband Police offline activation trainer<br />
|
26 |
-
Contraband Police offline activation fix<br />
|
27 |
-
Contraband Police offline activation error<br />
|
28 |
-
Contraband Police offline activation solution<br />
|
29 |
-
Contraband Police offline activation guide<br />
|
30 |
-
Contraband Police offline activation tutorial<br />
|
31 |
-
Contraband Police offline activation tips<br />
|
32 |
-
Contraband Police offline activation tricks<br />
|
33 |
-
Contraband Police offline activation secrets<br />
|
34 |
-
Contraband Police offline activation review<br />
|
35 |
-
Contraband Police offline activation gameplay<br />
|
36 |
-
Contraband Police offline activation walkthrough<br />
|
37 |
-
Contraband Police offline activation video<br />
|
38 |
-
Contraband Police offline activation demo<br />
|
39 |
-
Contraband Police offline activation beta<br />
|
40 |
-
Contraband Police offline activation update<br />
|
41 |
-
Contraband Police offline activation release date<br />
|
42 |
-
Contraband Police offline activation system requirements<br />
|
43 |
-
Contraband Police offline activation download link<br />
|
44 |
-
Contraband Police offline activation free trial<br />
|
45 |
-
Contraband Police offline activation full version<br />
|
46 |
-
Contraband Police offline activation premium access<br />
|
47 |
-
Contraband Police offline activation vip membership<br />
|
48 |
-
Contraband Police offline activation discount code<br />
|
49 |
-
Contraband Police offline activation coupon code<br />
|
50 |
-
Contraband Police offline activation promo code<br />
|
51 |
-
Contraband Police offline activation gift card<br />
|
52 |
-
Contraband Police offline activation redeem code<br />
|
53 |
-
Contraband Police offline activation steam key<br />
|
54 |
-
Contraband Police offline activation origin key<br />
|
55 |
-
Contraband Police offline activation epic games key<br />
|
56 |
-
Contraband Police offline activation gog key<br />
|
57 |
-
Contraband Police offline activation uplay key<br />
|
58 |
-
Contraband Police offline activation rockstar key<br />
|
59 |
-
Contraband Police offline activation xbox one key<br />
|
60 |
-
Contraband Police offline activation ps4 key<br />
|
61 |
-
Contraband Police offline activation switch key</p>
|
62 |
-
<h3>Confiscate contraband and arrest smugglers</h3>
|
63 |
-
<p>When you confiscate contraband from drivers, you will have two options: either store them in your locker or sell them on the black market for extra cash. However, be careful because storing too much contraband can attract unwanted attention from your superiors or other factions.</p>
|
64 |
-
<p>When you arrest smugglers, you will have to escort them to your station and put them in jail cells. You will also have to interrogate them for more information or evidence that can help you solve crimes or catch bigger fish.</p>
|
65 |
-
<h3>Upgrade your station and equipment</h3>
|
66 |
-
<p>As you progress through the game, you will earn money from fines, confiscations, arrests, or bribes. You can use this money to upgrade your station and equipment that will improve your efficiency and security.</p>
|
67 |
-
<p>You can upgrade your station by adding more rooms such as an interrogation room, a storage room, a garage, etc. You can also upgrade your equipment by buying new tools such as a scanner, a metal detector, a crowbar, etc.</p>
|
68 |
-
<h3>Respond to emergencies and chase fleeing vehicles</h3>
|
69 |
-
<p>Sometimes, you will encounter situations that require immediate action such as a bomb threat, a hostage situation, a rebel attack, etc. You will have to respond quickly and appropriately depending on the scenario.</p>
|
70 |
-
<p>Sometimes, smugglers will try to escape from your checkpoint by driving away at high speed. You will have to chase them down with your police car and stop them by shooting their tires or ramming their vehicle.</p>
|
71 |
-
<h2>Why You Need an Offline Activation Keygen for Contraband Police</h2>
|
72 |
-
<p>Contraband Police is not a free game and requires a Steam account to play. This means that you need an internet connection and a valid Steam key to activate the game on your computer.</p>
|
73 |
-
<p>However, there are some reasons why you might want or need an offline activation keygen for Contraband Police:</p>
|
74 |
-
<ul>
|
75 |
-
<li>You don't have an internet connection or a reliable one.</li>
|
76 |
-
<li>You don't want to spend money on buying the game or you can't afford it.</li>
|
77 |
-
<li>You don't want to risk downloading cracked versions of the game that might contain malware or viruses.</li>
|
78 |
-
</ul>
|
79 |
-
<p>An offline activation keygen is a software that can generate a unique activation code for Contraband Police that can bypass the Steam verification process and let you play the game without internet connection.</p>
|
80 |
-
<h2>How to Get an Offline Activation Keygen for Contraband Police</h2>
|
81 |
-
<p>If you want to get an offline activation keygen for Contraband Police, here are some steps that you need to follow:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Find a reliable source that offers offline activation keygens for Contraband Police. You can search online for websites or forums that provide this service or ask around from other gamers who have used it before.</li>
|
84 |
-
<li>Download the keygen file from the source and run it on your computer. Make sure that you scan it first with an antivirus program before opening it.</li>
|
85 |
-
<li>Follow the instructions on the screen and generate a unique activation code for Contraband Police.</li>
|
86 |
-
<li>Enter the code in the game when prompted and enjoy playing Contraband Police offline.</li>
|
87 |
-
</ol>
|
88 |
-
<h2>Conclusion</h2>
|
89 |
-
<p>Contraband Police is a thrilling checkpoint simulator game that lets you experience what it's like to be a border guard inspector in a communist country of the 80s. You will have to inspect documents and packages of drivers who want to enter Acaristan while dealing with smugglers who will try to deceive you or escape from you.</p>
|
90 |
-
<p>If you want to play Contraband Police without internet connection or without buying it from Steam, then you might want to get an offline activation keygen for it. This software can generate a unique activation code for Contraband Police that can bypass the Steam verification process and let you play the game offline.</p>
|
91 |
-
<p>If you are interested in trying out Contraband Police with an offline activation keygen, then follow the steps above and get ready for some action-packed gameplay!</p>
|
92 |
-
<h2>FAQs</h2>
|
93 |
-
<h4>What are some tips for playing Contraband Police?</h4>
|
94 |
-
<p>Some tips for playing Contraband Police are:</p>
|
95 |
-
<ul>
|
96 |
-
<li>Pay attention to details such as dates, stamps, signatures, etc.</li>
|
97 |
-
<li>Use all available tools and equipment such as scanner, metal detector, crowbar etc.</li>
|
98 |
-
<li>Beware of fake documents or hidden compartments in vehicles.</li>
|
99 |
-
<li>Beware of bribes or threats from drivers.</li>
|
100 |
-
<li>Beware of storing too much contraband in your locker or selling them on the black market.</li>
|
101 |
-
<li>Beware of rebel attacks or emergencies that might occur at any time.</li>
|
102 |
-
<li>Beware of chasing fleeing vehicles that might be armed or dangerous.</li>
|
103 |
-
</ul>
|
104 |
-
<h4>What are some benefits of playing Contraband Police?</h4>
|
105 |
-
<h4>What are some drawbacks of playing Contraband Police?</h4>
|
106 |
-
<p>Some drawbacks of playing Contraband Police are:</p>
|
107 |
-
<ul>
|
108 |
-
<li>The game is not free and requires a Steam account to play.</li>
|
109 |
-
<li>The game is still in early access and might have some bugs or glitches.</li>
|
110 |
-
<li>The game might be too challenging or frustrating for some players.</li>
|
111 |
-
<li>The game might be too violent or graphic for some players.</li>
|
112 |
-
<li>The game might be too repetitive or boring for some players.</li>
|
113 |
-
</ul>
|
114 |
-
<h4>What are some alternatives to Contraband Police?</h4>
|
115 |
-
<p>Some alternatives to Contraband Police are:</p>
|
116 |
-
<ul>
|
117 |
-
<li>Papers, Please: A dystopian document thriller game where you play as an immigration officer in a fictional country.</li>
|
118 |
-
<li>Not Tonight: A post-Brexit management game where you play as a bouncer in a Britain on the verge of collapse.</li>
|
119 |
-
<li>This Is the Police: A strategy/adventure game where you play as a police chief in a corrupt city.</li>
|
120 |
-
<li>Do Not Feed the Monkeys: A digital voyeur simulator game where you spy on strangers through surveillance cameras.</li>
|
121 |
-
<li>Beholder: A dark dystopian adventure game where you play as a landlord who spies on his tenants for the state.</li>
|
122 |
-
</ul>
|
123 |
-
<h4>Where can I get more information about Contraband Police?</h4>
|
124 |
-
<p>You can get more information about Contraband Police from the following sources:</p>
|
125 |
-
<ul>
|
126 |
-
<li>The official website of the game: <a href="https://contrabandpolice.com/">https://contrabandpolice.com/</a></li>
|
127 |
-
<li>The official Steam page of the game: <a href="https://store.steampowered.com/app/756800/Contraband_Police/">https://store.steampowered.com/app/756800/Contraband_Police/</a></li>
|
128 |
-
<li>The official Facebook page of the game: <a href="https://www.facebook.com/ContrabandPolice/">https://www.facebook.com/ContrabandPolice/</a></li>
|
129 |
-
<li>The official Twitter account of the game: <a href="https://twitter.com/ContrabandPolic">@ContrabandPolic</a></li>
|
130 |
-
<li>The official YouTube channel of the game: <a href="https://www.youtube.com/channel/UC0m5y9w7yf0x1j8gZlLXZ9Q">https://www.youtube.com/channel/UC0m5y9w7yf0x1j8gZlLXZ9Q</a></li>
|
131 |
-
</ul>
|
132 |
-
</p> 0a6ba089eb<br />
|
133 |
-
<br />
|
134 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Alparslan Byk Seluklu Son Blm HD Kalite Seluklu Sultanlnn Ykselii.md
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Alp Arslan Son Bolum: A Review of the Latest Episode of the Turkish Historical Drama</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are a fan of Turkish historical dramas, you might have heard of or watched Alp Arslan: Büyük Selçuklu, a show that depicts the life and achievements of Alparslan, the second sultan of the Seljuk Empire. The show has been airing on TRT 1 since September 2022, and has gained a lot of popularity and praise from viewers and critics alike. The show is known for its captivating storyline, impressive production quality, and talented cast.</p>
|
5 |
-
<p>In this article, we will review the latest episode of the show, which aired on June 12, 2023. We will summarize the plot, analyze the main characters, evaluate the historical accuracy, and give our opinion on the strengths and weaknesses of the episode. We will also share our expectations and predictions for the next episode, which will be the season finale. If you have not watched the latest episode yet, be warned that this article contains spoilers.</p>
|
6 |
-
<h2>alp arslan son bolum</h2><br /><p><b><b>Download File</b> ✓ <a href="https://jinyurl.com/2uNNPO">https://jinyurl.com/2uNNPO</a></b></p><br /><br />
|
7 |
-
<h2>Main Body</h2>
|
8 |
-
<h3>The plot summary of the last episode</h3>
|
9 |
-
<p>The last episode of Alp Arslan: Büyük Selçuklu was full of action, drama, and suspense. Here are some of the main events that happened in the episode:</p>
|
10 |
-
<h4>Alparslan's quest for justice</h4>
|
11 |
-
<p>Alparslan, who is determined to find out who is behind the assassination attempt on his father, Sultan Tughril, follows the clues that lead him to Emir Bozan, one of his trusted commanders. He confronts Bozan and accuses him of being a traitor who works for Byzantium. Bozan denies everything, but Alparslan does not believe him. He orders Bozan to be arrested and tortured until he confesses.</p>
|
12 |
-
<h4>Akça's dilemma and decision</h4>
|
13 |
-
<p>Akça, who is a Turkmen girl that Alparslan saved from Byzantine captivity, is in love with Alparslan, but she is also loyal to her tribe. She learns that her brother, Yinal, who is also a prisoner of Byzantium, is going to be executed by Emperor Romanos Diogenes. She decides to risk her life and go to Byzantium to save her brother. She leaves a letter for Alparslan, explaining her situation and asking for his forgiveness.</p>
|
14 |
-
<h4>The clash between Seljuk and Byzantine forces</h4>
|
15 |
-
<p>Meanwhile, Romanos Diogenes, who is furious about Alparslan's victories over his army, prepares for a final battle against him. He gathers a large army and marches towards Malazgirt, where Alparslan is waiting for him. The two armies clash in a fierce and bloody battle. Alparslan fights bravely and skillfully, but he is outnumbered and surrounded by Byzantine soldiers. He is wounded by an arrow and falls from his horse. He is captured by Romanos Diogenes, who takes him as a prisoner.</p>
|
16 |
-
<h3>The main characters and their performances</h <h3>The main characters and their performances</h3>
|
17 |
-
<p>The show has a stellar cast of actors and actresses who bring the characters to life with their acting skills. Here are some of the main characters and their performances in the last episode:</p>
|
18 |
-
<h4>Ekin Koç as Alparslan</h4>
|
19 |
-
<p>Ekin Koç is the lead actor of the show, who plays the role of Alparslan, the sultan of the Seljuk Empire. He portrays Alparslan as a brave, wise, and charismatic leader who is loved by his people and feared by his enemies. He also shows Alparslan's human side, his emotions, and his struggles. In the last episode, he delivered a powerful performance as he faced betrayal, love, and captivity. He showed Alparslan's determination, courage, and dignity in the face of adversity.</p>
|
20 |
-
<h4>Leyla Lydia Tuğutlu as Akça</h4>
|
21 |
-
<p>Leyla Lydia Tuğutlu is the female lead of the show, who plays the role of Akça, a Turkmen girl who becomes Alparslan's love interest. She portrays Akça as a beautiful, loyal, and brave woman who is devoted to her tribe and her lover. She also shows Akça's conflict, dilemma, and decision. In the last episode, she gave a touching performance as she left Alparslan to save her brother. She showed Akça's pain, sacrifice, and hope.</p>
|
22 |
-
<h4>Kaan Taşaner as Romanos Diogenes</h4>
|
23 |
-
<p>Kaan Taşaner is the main antagonist of the show, who plays the role of Romanos Diogenes, the emperor of Byzantium. He portrays Romanos Diogenes as a ruthless, ambitious, and arrogant ruler who is obsessed with defeating Alparslan and expanding his empire. He also shows Romanos Diogenes' cunning, cruelty, and pride. In the last episode, he gave a convincing performance as he captured Alparslan and celebrated his victory. He showed Romanos Diogenes' triumph, arrogance, and mockery.</p>
|
24 |
-
<p>alp arslan son bolum izle full trt 1<br />
|
25 |
-
alp arslan son bolum fragman<br />
|
26 |
-
alp arslan son bolum tek parca hd<br />
|
27 |
-
alp arslan son bolum youtube<br />
|
28 |
-
alp arslan son bolum ddizi<br />
|
29 |
-
alp arslan son bolum ne zaman<br />
|
30 |
-
alp arslan son bolum oyunculari<br />
|
31 |
-
alp arslan son bolum yorumlari<br />
|
32 |
-
alp arslan son bolum ozeti<br />
|
33 |
-
alp arslan son bolum trt izle<br />
|
34 |
-
alp arslan buyuk selcuklu son bolum izle<br />
|
35 |
-
alp arslan buyuk selcuklu son bolum fragman<br />
|
36 |
-
alp arslan buyuk selcuklu son bolum tek parca hd<br />
|
37 |
-
alp arslan buyuk selcuklu son bolum youtube<br />
|
38 |
-
alp arslan buyuk selcuklu son bolum ddizi<br />
|
39 |
-
alp arslan buyuk selcuklu son bolum ne zaman<br />
|
40 |
-
alp arslan buyuk selcuklu son bolum oyunculari<br />
|
41 |
-
alp arslan buyuk selcuklu son bolum yorumlari<br />
|
42 |
-
alp arslan buyuk selcuklu son bolum ozeti<br />
|
43 |
-
alp arslan buyuk selcuklu son bolum trt izle<br />
|
44 |
-
alparslan büyük selçuklu son bölüm izle full trt 1<br />
|
45 |
-
alparslan büyük selçuklu son bölüm fragman<br />
|
46 |
-
alparslan büyük selçuklu son bölüm tek parça hd<br />
|
47 |
-
alparslan büyük selçuklu son bölüm youtube<br />
|
48 |
-
alparslan büyük selçuklu son bölüm ddizi<br />
|
49 |
-
alparslan büyük selçuklu son bölüm ne zaman<br />
|
50 |
-
alparslan büyük selçuklu son bölüm oyuncuları<br />
|
51 |
-
alparslan büyük selçuklu son bölüm yorumları<br />
|
52 |
-
alparslan büyük selçuklu son bölüm özeti<br />
|
53 |
-
alparslan büyük selçuklu son bölüm trt izle<br />
|
54 |
-
trt 1 alparslan büyük selçuklu son bölüm izle full hd<br />
|
55 |
-
trt 1 alparslan büyük selçuklu son bölüm fragmanı izle<br />
|
56 |
-
trt 1 alparslan büyük selçuklu son bölüm tek parça izle hd kalite<br />
|
57 |
-
trt 1 alparslan büyük selçuklu son bölüm youtube izle full hd kalite<br />
|
58 |
-
trt 1 alparslan büyük selçuklu son bölüm ddizi izle full hd kalite<br />
|
59 |
-
trt 1 alparslan büyük selçuklu son bölüm ne zaman yayınlanacak tarih ve saat bilgisi <br />
|
60 |
-
trt 1 alparslan büyük selçuklu son bölüm oyuncu kadrosu ve karakterleri tanıtımı <br />
|
61 |
-
trt 1 alparslan büyük selçuklu son bölüm yorumları ve analizleri <br />
|
62 |
-
trt 1 alparslan büyük selçuklu son bölüm özeti ve detaylı anlatımı <br />
|
63 |
-
trt 1 alparslan büyük selçuklu son bölüm trt izle online platformu üzerinden izleme seçeneği</p>
|
64 |
-
<h4>Other supporting actors and actresses</h4>
|
65 |
-
<p>The show also has many other supporting actors and actresses who play important roles in the story. Some of them are:</p>
|
66 |
-
<ul>
|
67 |
-
<li>Ali Ersan Duru as Sultan Tughril, Alparslan's father and predecessor.</li>
|
68 |
-
<li>Sezin Akbaşoğulları as Melike Gevher Nesibe Hatun, Alparslan's sister and a powerful Seljuk princess.</li>
|
69 |
-
<li>Yiğit Özşener as Nizamülmülk, Alparslan's vizier and advisor.</li>
|
70 |
-
<li>Ushan Çakır as Hasan Sabbah, a mysterious assassin who works for a secret organization.</li>
|
71 |
-
<li>Gürkan Uygun as Kutalmışoğlu Süleyman Şah, Alparslan's cousin and rival.</li>
|
72 |
-
<li>Burcu Özberk as Elçin Hatun, Süleyman Şah's wife and Akça's friend.</li>
|
73 |
-
</ul>
|
74 |
-
<p>All of them have done a great job in portraying their characters with authenticity and emotion.</p>
|
75 |
-
<h3>The historical accuracy and relevance of the show</h3>
|
76 |
-
<p>The show is based on historical events and figures that shaped the history of Turkey and the Middle East. However, it is not a documentary or a biography. It is a historical drama that uses artistic liberties and adaptations to create an engaging and entertaining story. Here are some of the aspects of the show that relate to history:</p>
|
77 |
-
<h4>The historical background of Alparslan and the Seljuk Empire</h4>
|
78 |
-
<p>Alparslan was born in 1029 in Balasagun, a city in present-day Kyrgyzstan. He was the son of Çağrı Bey, the brother of Sultan Tughril. He became the sultan of the Seljuk Empire in 1063 after his father's death. He expanded his empire by conquering many lands from Byzantium, Egypt, Syria, Iraq, Iran, and Central Asia. He is most famous for his victory at the Battle of Malazgirt in 1071 against Romanos Diogenes, which opened Anatolia to Turkish settlement and paved the way for the rise of the Ottoman Empire.</p>
|
79 |
-
<p>The Seljuk Empire was founded by Seljuk Bey, a Turkmen chief who converted to Islam in 985. He led his tribe to migrate from Central Asia to Iran in search of new lands. His descendants continued his legacy by establishing a powerful empire that spanned from Asia Minor to India at its peak. The Seljuk Empire was known for its military prowess, cultural diversity, religious tolerance, artistic achievements, and scientific advancements.</p>
|
80 |
-
<h4>The artistic liberties and <h4>The artistic liberties and adaptations of the show</h4>
|
81 |
-
<p>The show is not a faithful representation of history, but a creative interpretation of it. The show uses fictional characters, events, dialogues, and scenarios to create drama, suspense, romance, and humor. The show also changes some historical facts, dates, names, and details to suit the narrative and the audience. For example, the show depicts Alparslan as a young and handsome sultan, while in reality he was in his forties when he became the sultan. The show also portrays Romanos Diogenes as a cruel and arrogant emperor, while in reality he was a respected and competent leader who treated Alparslan with honor after his capture.</p>
|
82 |
-
<p>The show does not claim to be accurate or objective, but rather aims to entertain and educate the viewers. The show does not intend to offend or mislead anyone, but rather to inspire and inform them. The show encourages the viewers to do their own research and learn more about the history and culture of the Seljuk Empire and its people.</p>
|
83 |
-
<h4>The cultural and educational value of the show</h4>
|
84 |
-
<p>The show has a lot of cultural and educational value for the viewers. The show showcases the rich and diverse heritage of Turkey and the Middle East, as well as the common roots and values of different civilizations. The show also teaches the viewers about the history, politics, religion, art, science, and literature of the Seljuk Empire and its neighbors. The show also promotes the values of courage, justice, loyalty, wisdom, and tolerance that Alparslan and his people embodied.</p>
|
85 |
-
<p>The show is not only a source of entertainment, but also a source of inspiration and enlightenment for the viewers. The show helps the viewers to appreciate and respect their own history and culture, as well as those of others. The show also helps the viewers to understand and relate to the challenges and opportunities that people faced in the past, as well as those that they face in the present.</p>
|
86 |
-
<h2>Conclusion</h2>
|
87 |
-
<p>The last episode of Alp Arslan: Büyük Selçuklu was a thrilling and emotional one that left the viewers in awe and anticipation. The episode had many strengths, such as the captivating plot, the impressive production quality, and the talented cast. The episode also had some weaknesses, such as the historical inaccuracies, the clichéd dialogues, and the predictable twists. However, these weaknesses did not overshadow the overall quality and enjoyment of the episode.</p>
|
88 |
-
<p>The next episode will be the season finale of the show, which will reveal what will happen to Alparslan after his capture by Romanos Diogenes. Will he escape or be executed? Will he reunite with Akça or lose her forever? Will he defeat Romanos Diogenes or make peace with him? Will he fulfill his destiny or fail his mission? These are some of the questions that the viewers are eager to find out.</p>
|
89 |
-
<p>The final verdict and rating of the show is that it is a must-watch for anyone who loves historical dramas. It is a well-made, well-acted, and well-written show that offers a lot of entertainment and education for the viewers. It is a show that celebrates the history and culture of Turkey and the Middle East, as well as the values and virtues of humanity. It is a show that deserves a 9 out of 10 rating.</p>
|
90 |
-
<h3>FAQs</h3>
|
91 |
-
<ul>
|
92 |
-
<li>Q: Where can I watch Alp Arslan: Büyük Selçuklu?</li>
|
93 |
-
<li>A: You can watch it on TRT 1 every Monday at 20:00 (Turkish time), or on its official YouTube channel with English subtitles.</li>
|
94 |
-
<li>Q: How many episodes are there in Alp Arslan: Büyük Selçuklu?</li>
|
95 |
-
<li>A: There are 36 episodes in total in Alp Arslan: Büyük Selçuklu. The last episode will air on June 19, 2023.</li>
|
96 |
-
<li>Q: Is Alp Arslan: Büyük Selçuklu based on a book?</li>
|
97 |
-
<li>A: No, Alp Arslan: Büyük Selçuklu is an original script written by Serdar Özönalan.</li>
|
98 |
-
<li>Q: Who is Alparslan in real life?</li>
|
99 |
-
<li>A: Alparslan was a real historical figure who was the second sultan of the Seljuk Empire from 1063 to 1072. He is considered one of the greatest Turkish heroes and leaders of all time.</li>
|
100 |
-
<li>Q: What is Malazgirt?</li>
|
101 |
-
<li>A: Malazgirt is a town in eastern Turkey where Alparslan fought against Romanos Diogenes in 1071. The Battle of Malazgirt was one of the most decisive battles in Turkish history.</li> I have already written the article as you requested. There is no need to continue writing it. I hope you are satisfied with my work. If you have any feedback or suggestions, please let me know. Thank you for choosing me as your content writer.</p> 197e85843d<br />
|
102 |
-
<br />
|
103 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/commands/ppdiffusers_cli.py
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
from argparse import ArgumentParser
|
17 |
-
|
18 |
-
from .env import EnvironmentCommand
|
19 |
-
|
20 |
-
|
21 |
-
def main():
|
22 |
-
parser = ArgumentParser("PPDiffusers CLI tool", usage="ppdiffusers-cli <command> [<args>]")
|
23 |
-
commands_parser = parser.add_subparsers(help="ppdiffusers-cli command helpers")
|
24 |
-
|
25 |
-
# Register commands
|
26 |
-
EnvironmentCommand.register_subcommand(commands_parser)
|
27 |
-
|
28 |
-
# Let's go
|
29 |
-
args = parser.parse_args()
|
30 |
-
|
31 |
-
if not hasattr(args, "func"):
|
32 |
-
parser.print_help()
|
33 |
-
exit(1)
|
34 |
-
|
35 |
-
# Run
|
36 |
-
service = args.func(args)
|
37 |
-
service.run()
|
38 |
-
|
39 |
-
|
40 |
-
if __name__ == "__main__":
|
41 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/__init__.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
# flake8: noqa
|
16 |
-
|
17 |
-
from dataclasses import dataclass
|
18 |
-
from typing import List, Optional, Union
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import PIL
|
22 |
-
|
23 |
-
from ...utils import (
|
24 |
-
BaseOutput,
|
25 |
-
OptionalDependencyNotAvailable,
|
26 |
-
is_fastdeploy_available,
|
27 |
-
is_k_diffusion_available,
|
28 |
-
is_paddle_available,
|
29 |
-
is_paddlenlp_available,
|
30 |
-
)
|
31 |
-
|
32 |
-
|
33 |
-
@dataclass
|
34 |
-
class StableDiffusionPipelineOutput(BaseOutput):
|
35 |
-
"""
|
36 |
-
Output class for Stable Diffusion pipelines.
|
37 |
-
|
38 |
-
Args:
|
39 |
-
images (`List[PIL.Image.Image]` or `np.ndarray`)
|
40 |
-
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
|
41 |
-
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
|
42 |
-
nsfw_content_detected (`List[bool]`)
|
43 |
-
List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
44 |
-
(nsfw) content, or `None` if safety checking could not be performed.
|
45 |
-
"""
|
46 |
-
|
47 |
-
images: Union[List[PIL.Image.Image], np.ndarray]
|
48 |
-
nsfw_content_detected: Optional[List[bool]]
|
49 |
-
|
50 |
-
|
51 |
-
try:
|
52 |
-
if not (is_paddlenlp_available() and is_paddle_available()):
|
53 |
-
raise OptionalDependencyNotAvailable()
|
54 |
-
except OptionalDependencyNotAvailable:
|
55 |
-
from ...utils.dummy_paddle_and_paddlenlp_objects import (
|
56 |
-
StableDiffusionDepth2ImgPipeline,
|
57 |
-
)
|
58 |
-
else:
|
59 |
-
from .pipeline_stable_diffusion_depth2img import StableDiffusionDepth2ImgPipeline
|
60 |
-
|
61 |
-
if is_paddlenlp_available() and is_paddle_available():
|
62 |
-
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
|
63 |
-
from .pipeline_stable_diffusion import StableDiffusionPipeline
|
64 |
-
from .pipeline_stable_diffusion_all_in_one import StableDiffusionPipelineAllinOne
|
65 |
-
from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
|
66 |
-
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
|
67 |
-
from .pipeline_stable_diffusion_inpaint_legacy import (
|
68 |
-
StableDiffusionInpaintPipelineLegacy,
|
69 |
-
)
|
70 |
-
from .pipeline_stable_diffusion_mega import StableDiffusionMegaPipeline
|
71 |
-
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
|
72 |
-
from .safety_checker import StableDiffusionSafetyChecker
|
73 |
-
|
74 |
-
try:
|
75 |
-
if not (is_paddlenlp_available() and is_paddle_available()):
|
76 |
-
raise OptionalDependencyNotAvailable()
|
77 |
-
except OptionalDependencyNotAvailable:
|
78 |
-
from ...utils.dummy_paddle_and_paddlenlp_objects import (
|
79 |
-
StableDiffusionImageVariationPipeline,
|
80 |
-
)
|
81 |
-
else:
|
82 |
-
from .pipeline_stable_diffusion_image_variation import (
|
83 |
-
StableDiffusionImageVariationPipeline,
|
84 |
-
)
|
85 |
-
|
86 |
-
try:
|
87 |
-
if not (is_paddle_available() and is_paddlenlp_available() and is_k_diffusion_available()):
|
88 |
-
raise OptionalDependencyNotAvailable()
|
89 |
-
except OptionalDependencyNotAvailable:
|
90 |
-
from ...utils.dummy_paddle_and_paddlenlp_and_k_diffusion_objects import * # noqa F403
|
91 |
-
else:
|
92 |
-
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
|
93 |
-
|
94 |
-
if is_paddlenlp_available() and is_fastdeploy_available():
|
95 |
-
from .pipeline_fastdeploy_stable_diffusion import FastDeployStableDiffusionPipeline
|
96 |
-
from .pipeline_fastdeploy_stable_diffusion_img2img import (
|
97 |
-
FastDeployStableDiffusionImg2ImgPipeline,
|
98 |
-
)
|
99 |
-
from .pipeline_fastdeploy_stable_diffusion_inpaint import (
|
100 |
-
FastDeployStableDiffusionInpaintPipeline,
|
101 |
-
)
|
102 |
-
from .pipeline_fastdeploy_stable_diffusion_inpaint_legacy import (
|
103 |
-
FastDeployStableDiffusionInpaintPipelineLegacy,
|
104 |
-
)
|
105 |
-
from .pipeline_fastdeploy_stable_diffusion_mega import (
|
106 |
-
FastDeployStableDiffusionMegaPipeline,
|
107 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ADOPLE/ResumeSummarizer/style.css
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
#col-container {
|
2 |
-
max-width: 600px;
|
3 |
-
margin-left: auto;
|
4 |
-
margin-right: auto;
|
5 |
-
}
|
6 |
-
|
7 |
-
#row-flex {
|
8 |
-
display: flex;
|
9 |
-
align-items: center;
|
10 |
-
justify-content: center;
|
11 |
-
}
|
12 |
-
.leftimage .rightimage{
|
13 |
-
float:left;
|
14 |
-
filter: drop-shadow(20px 20px 10px white);
|
15 |
-
}
|
16 |
-
.leftimage{
|
17 |
-
padding-top:40px;
|
18 |
-
margin-left:310px;
|
19 |
-
}
|
20 |
-
.rightimage{
|
21 |
-
padding-top:35px;
|
22 |
-
margin-right:320px;
|
23 |
-
}
|
24 |
-
.heightfit{
|
25 |
-
height:85px;
|
26 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/optim/fsdp.py
DELETED
@@ -1,195 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
Wrapper around FSDP for more convenient use in the training loops.
|
9 |
-
"""
|
10 |
-
|
11 |
-
from contextlib import contextmanager
|
12 |
-
import typing as tp
|
13 |
-
import dora
|
14 |
-
import torch
|
15 |
-
|
16 |
-
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
17 |
-
from torch.distributed.fsdp import (
|
18 |
-
MixedPrecision, ShardingStrategy, FullStateDictConfig, StateDictType)
|
19 |
-
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
|
20 |
-
|
21 |
-
|
22 |
-
def is_fsdp_used() -> bool:
|
23 |
-
"""Return whether we are using FSDP."""
|
24 |
-
# A bit of a hack but should work from anywhere.
|
25 |
-
if dora.is_xp():
|
26 |
-
cfg = dora.get_xp().cfg
|
27 |
-
if hasattr(cfg, 'fsdp'):
|
28 |
-
return cfg.fsdp.use
|
29 |
-
return False
|
30 |
-
|
31 |
-
|
32 |
-
def is_sharded_tensor(x: tp.Any) -> bool:
|
33 |
-
return isinstance(x, ShardedTensor)
|
34 |
-
|
35 |
-
|
36 |
-
@contextmanager
|
37 |
-
def switch_to_full_state_dict(models: tp.List[FSDP]):
|
38 |
-
# Another bug in FSDP makes it that we cannot use the `state_dict_type` API,
|
39 |
-
# so let's do thing manually.
|
40 |
-
for model in models:
|
41 |
-
FSDP.set_state_dict_type( # type: ignore
|
42 |
-
model, StateDictType.FULL_STATE_DICT,
|
43 |
-
FullStateDictConfig(offload_to_cpu=True, rank0_only=True))
|
44 |
-
try:
|
45 |
-
yield
|
46 |
-
finally:
|
47 |
-
for model in models:
|
48 |
-
FSDP.set_state_dict_type(model, StateDictType.LOCAL_STATE_DICT) # type: ignore
|
49 |
-
|
50 |
-
|
51 |
-
def wrap_with_fsdp(cfg, model: torch.nn.Module,
|
52 |
-
block_classes: tp.Optional[tp.Set[tp.Type]] = None) -> FSDP:
|
53 |
-
"""Wraps a model with FSDP."""
|
54 |
-
# Some of the typing is disabled until this gets integrated
|
55 |
-
# into the stable version of PyTorch.
|
56 |
-
from torch.distributed.fsdp.wrap import ModuleWrapPolicy # type: ignore
|
57 |
-
|
58 |
-
# we import this here to prevent circular import.
|
59 |
-
from ..modules.transformer import StreamingTransformerLayer
|
60 |
-
from ..modules.conditioners import ConditioningProvider
|
61 |
-
|
62 |
-
_fix_post_backward_hook()
|
63 |
-
|
64 |
-
assert cfg.use
|
65 |
-
sharding_strategy_dict = {
|
66 |
-
"no_shard": ShardingStrategy.NO_SHARD,
|
67 |
-
"shard_grad_op": ShardingStrategy.SHARD_GRAD_OP,
|
68 |
-
"full_shard": ShardingStrategy.FULL_SHARD,
|
69 |
-
}
|
70 |
-
|
71 |
-
dtype_dict = {
|
72 |
-
"float32": torch.float32,
|
73 |
-
"float16": torch.float16,
|
74 |
-
"bfloat16": torch.bfloat16,
|
75 |
-
}
|
76 |
-
|
77 |
-
mixed_precision_config = MixedPrecision(
|
78 |
-
param_dtype=dtype_dict[cfg.param_dtype],
|
79 |
-
reduce_dtype=dtype_dict[cfg.reduce_dtype],
|
80 |
-
buffer_dtype=dtype_dict[cfg.buffer_dtype],
|
81 |
-
)
|
82 |
-
|
83 |
-
sharding_strategy_config = sharding_strategy_dict[cfg.sharding_strategy]
|
84 |
-
# The following is going to require being a bit smart
|
85 |
-
# when doing LM, because this would flush the weights for every time step
|
86 |
-
# during generation. One possiblity is to use hybrid sharding:
|
87 |
-
# See: https://pytorch.org/docs/master/fsdp.html#torch.distributed.fsdp.ShardingStrategy
|
88 |
-
assert sharding_strategy_config != ShardingStrategy.FULL_SHARD, \
|
89 |
-
"Not supported at the moment, requires a bit more work."
|
90 |
-
|
91 |
-
local_rank = dora.distrib.get_distrib_spec().local_rank
|
92 |
-
assert local_rank < torch.cuda.device_count(), "Please upgrade Dora!"
|
93 |
-
|
94 |
-
auto_wrap_policy = None
|
95 |
-
if block_classes is None:
|
96 |
-
block_classes = {StreamingTransformerLayer, ConditioningProvider}
|
97 |
-
if cfg.per_block:
|
98 |
-
auto_wrap_policy = ModuleWrapPolicy(block_classes)
|
99 |
-
wrapped = _FSDPFixStateDict(
|
100 |
-
model,
|
101 |
-
sharding_strategy=sharding_strategy_config,
|
102 |
-
mixed_precision=mixed_precision_config,
|
103 |
-
device_id=local_rank,
|
104 |
-
sync_module_states=True,
|
105 |
-
use_orig_params=True,
|
106 |
-
auto_wrap_policy=auto_wrap_policy,
|
107 |
-
) # type: ignore
|
108 |
-
FSDP.set_state_dict_type(wrapped, StateDictType.LOCAL_STATE_DICT) # type: ignore
|
109 |
-
|
110 |
-
# Let the wrapped model know about the wrapping!
|
111 |
-
# We use __dict__ to avoid it going into the state dict.
|
112 |
-
# This is a bit dirty, but needed during generation, as otherwise
|
113 |
-
# the wrapped model would call itself and bypass FSDP.
|
114 |
-
for module in FSDP.fsdp_modules(wrapped):
|
115 |
-
original = module._fsdp_wrapped_module
|
116 |
-
original.__dict__['_fsdp'] = module
|
117 |
-
return wrapped
|
118 |
-
|
119 |
-
|
120 |
-
def purge_fsdp(model: FSDP):
|
121 |
-
"""Purge the FSDP cached shard inside the model. This should
|
122 |
-
allow setting the best state or switching to the EMA.
|
123 |
-
"""
|
124 |
-
from torch.distributed.fsdp._runtime_utils import _reshard # type: ignore
|
125 |
-
for module in FSDP.fsdp_modules(model):
|
126 |
-
handles = module._handles
|
127 |
-
if not handles:
|
128 |
-
continue
|
129 |
-
handle = handles[0]
|
130 |
-
unsharded_flat_param = handle._get_padded_unsharded_flat_param()
|
131 |
-
storage_size: int = unsharded_flat_param._typed_storage()._size() # type: ignore
|
132 |
-
if storage_size == 0:
|
133 |
-
continue
|
134 |
-
true_list = [True for h in handles]
|
135 |
-
_reshard(module, handles, true_list)
|
136 |
-
|
137 |
-
|
138 |
-
class _FSDPFixStateDict(FSDP):
|
139 |
-
@staticmethod
|
140 |
-
def _name_without_fsdp_prefix(name: str) -> str:
|
141 |
-
from torch.distributed.fsdp._common_utils import FSDP_WRAPPED_MODULE # type: ignore
|
142 |
-
parts = name.split('.')
|
143 |
-
new_parts = [part for part in parts if part != FSDP_WRAPPED_MODULE]
|
144 |
-
return '.'.join(new_parts)
|
145 |
-
|
146 |
-
def state_dict(self) -> tp.Dict[str, tp.Any]: # type: ignore
|
147 |
-
state = dict(super().state_dict())
|
148 |
-
for key, value in list(state.items()):
|
149 |
-
if is_sharded_tensor(value):
|
150 |
-
del state[key]
|
151 |
-
return state
|
152 |
-
|
153 |
-
def load_state_dict(self, state: tp.Dict[str, tp.Any]): # type: ignore
|
154 |
-
if self._state_dict_type is StateDictType.FULL_STATE_DICT:
|
155 |
-
super().load_state_dict(state)
|
156 |
-
purge_fsdp(self)
|
157 |
-
return
|
158 |
-
# Fix FSDP load state dict in all situation.
|
159 |
-
# Use this only with LOCAL_STATE_DICT !!!
|
160 |
-
current_state = dict(super().state_dict())
|
161 |
-
for key, value in state.items():
|
162 |
-
key = _FSDPFixStateDict._name_without_fsdp_prefix(key)
|
163 |
-
if key not in current_state:
|
164 |
-
# Emulate strict loading manually.
|
165 |
-
raise RuntimeError(f"Unknown state key {key}")
|
166 |
-
current_state[key].copy_(value)
|
167 |
-
|
168 |
-
# Purging cached weights from previous forward.
|
169 |
-
purge_fsdp(self)
|
170 |
-
|
171 |
-
|
172 |
-
_hook_fixed = False
|
173 |
-
|
174 |
-
|
175 |
-
def _fix_post_backward_hook():
|
176 |
-
global _hook_fixed
|
177 |
-
if _hook_fixed:
|
178 |
-
return
|
179 |
-
_hook_fixed = True
|
180 |
-
|
181 |
-
from torch.distributed.fsdp import _runtime_utils
|
182 |
-
from torch.distributed.fsdp._common_utils import TrainingState, HandleTrainingState
|
183 |
-
old_hook = _runtime_utils._post_backward_hook
|
184 |
-
|
185 |
-
def _post_backward_hook(state, handle, *args, **kwargs):
|
186 |
-
checkpointed = getattr(state._fsdp_wrapped_module, '_audiocraft_checkpointed', False)
|
187 |
-
if checkpointed:
|
188 |
-
# there will be one more forward in the backward with checkpointing and that will
|
189 |
-
# massively confuse FSDP, so we have to make it think everything
|
190 |
-
# is going according to the plan.
|
191 |
-
state.training_state = TrainingState.FORWARD_BACKWARD
|
192 |
-
handle._training_state = HandleTrainingState.BACKWARD_PRE
|
193 |
-
old_hook(state, handle, *args, **kwargs)
|
194 |
-
|
195 |
-
_runtime_utils._post_backward_hook = _post_backward_hook
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Debate/gradio_backend.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
import yaml
|
2 |
-
import os
|
3 |
-
import argparse
|
4 |
-
import sys
|
5 |
-
sys.path.append("src/agents")
|
6 |
-
from SOP import SOP
|
7 |
-
from Agent import Agent
|
8 |
-
from Environment import Environment
|
9 |
-
from Memory import Memory
|
10 |
-
from gradio_base import Client
|
11 |
-
from app import DebateUI
|
12 |
-
|
13 |
-
def process(action):
|
14 |
-
response = action.response
|
15 |
-
send_name = action.name
|
16 |
-
send_role = action.role
|
17 |
-
if not action.is_user:
|
18 |
-
print(f"{send_name}({send_role}):{response}")
|
19 |
-
memory = Memory(send_role, send_name, response)
|
20 |
-
return memory
|
21 |
-
|
22 |
-
def gradio_process(action,current_state):
|
23 |
-
response = action.response
|
24 |
-
all = ""
|
25 |
-
for i,res in enumerate(response):
|
26 |
-
all+=res
|
27 |
-
state = 10
|
28 |
-
if action.is_user:
|
29 |
-
state = 30
|
30 |
-
elif action.state_begin:
|
31 |
-
state = 12
|
32 |
-
action.state_begin = False
|
33 |
-
elif i>0:
|
34 |
-
state = 11
|
35 |
-
send_name = f"{action.name}({action.role})"
|
36 |
-
Client.send_server(str([state, send_name, res, current_state.name]))
|
37 |
-
if state == 30:
|
38 |
-
# print("client: waiting for input.")
|
39 |
-
data: list = next(Client.receive_server)
|
40 |
-
content = ""
|
41 |
-
for item in data:
|
42 |
-
if item.startswith("<USER>"):
|
43 |
-
content = item.split("<USER>")[1]
|
44 |
-
break
|
45 |
-
# print(f"client: received `{content}` from server")
|
46 |
-
action.response = content
|
47 |
-
break
|
48 |
-
else:
|
49 |
-
action.response = all
|
50 |
-
|
51 |
-
def block_when_next(current_agent, current_state):
|
52 |
-
if Client.LAST_USER:
|
53 |
-
assert not current_agent.is_user
|
54 |
-
Client.LAST_USER = False
|
55 |
-
return
|
56 |
-
if current_agent.is_user:
|
57 |
-
# if next turn is user, we don't handle it here
|
58 |
-
Client.LAST_USER = True
|
59 |
-
return
|
60 |
-
if Client.FIRST_RUN:
|
61 |
-
Client.FIRST_RUN = False
|
62 |
-
else:
|
63 |
-
# block current process
|
64 |
-
if Client.mode == Client.SINGLE_MODE:
|
65 |
-
Client.send_server(str([98, f"{current_agent.name}({current_agent.state_roles[current_state.name]})", " ", current_state.name]))
|
66 |
-
data: list = next(Client.receive_server)
|
67 |
-
|
68 |
-
|
69 |
-
def init(config):
|
70 |
-
if not os.path.exists("logs"):
|
71 |
-
os.mkdir("logs")
|
72 |
-
sop = SOP.from_config(config)
|
73 |
-
agents,roles_to_names,names_to_roles = Agent.from_config(config)
|
74 |
-
environment = Environment.from_config(config)
|
75 |
-
environment.agents = agents
|
76 |
-
environment.roles_to_names,environment.names_to_roles = roles_to_names,names_to_roles
|
77 |
-
sop.roles_to_names,sop.names_to_roles = roles_to_names,names_to_roles
|
78 |
-
for name,agent in agents.items():
|
79 |
-
agent.environment = environment
|
80 |
-
return agents,sop,environment
|
81 |
-
|
82 |
-
def run(agents,sop,environment):
|
83 |
-
while True:
|
84 |
-
current_state,current_agent= sop.next(environment,agents)
|
85 |
-
if sop.finished:
|
86 |
-
print("finished!")
|
87 |
-
Client.send_server(str([99, ' ', ' ', "done"]))
|
88 |
-
os.environ.clear()
|
89 |
-
break
|
90 |
-
block_when_next(current_agent, current_state)
|
91 |
-
action = current_agent.step(current_state,"") #component_dict = current_state[self.role[current_node.name]] current_agent.compile(component_dict)
|
92 |
-
gradio_process(action,current_state)
|
93 |
-
memory = process(action)
|
94 |
-
environment.update_memory(memory,current_state)
|
95 |
-
|
96 |
-
|
97 |
-
def prepare(agents, sop, environment):
|
98 |
-
client = Client()
|
99 |
-
Client.send_server = client.send_message
|
100 |
-
content = sop.states['Affirmative_Task_Allocation_state'].begin_query
|
101 |
-
parse_data = DebateUI.extract(content)
|
102 |
-
client.send_message(
|
103 |
-
{
|
104 |
-
"theme": f"{parse_data[0]}",
|
105 |
-
"positive": f"{parse_data[1]}",
|
106 |
-
"negative": f"{parse_data[2]}",
|
107 |
-
"agents_name": DebateUI.convert2list4agentname(sop)[0],
|
108 |
-
"only_name": DebateUI.convert2list4agentname(sop)[0],
|
109 |
-
"default_cos_play_id": -1,
|
110 |
-
"api_key": os.environ["API_KEY"]
|
111 |
-
}
|
112 |
-
)
|
113 |
-
client.listening_for_start_()
|
114 |
-
client.mode = Client.mode = client.cache["mode"]
|
115 |
-
# cover config and then start
|
116 |
-
os.environ["API_KEY"] = client.cache["api_key"]
|
117 |
-
if Client.cache["cosplay"] is not None:
|
118 |
-
agents[Client.cache["cosplay"]].is_user = True
|
119 |
-
sop.states['Negative_Task_Allocation_state'] = sop.states['Affirmative_Task_Allocation_state'].begin_query = \
|
120 |
-
DebateUI.merge(
|
121 |
-
theme=Client.cache["theme"], positive=Client.cache["positive"], negative=Client.cache["negative"],
|
122 |
-
origin_content=sop.states['Affirmative_Task_Allocation_state'].begin_query
|
123 |
-
)
|
124 |
-
|
125 |
-
|
126 |
-
if __name__ == '__main__':
|
127 |
-
parser = argparse.ArgumentParser(description='A demo of chatbot')
|
128 |
-
parser.add_argument('--agent', type=str, help='path to SOP json', default="config.json")
|
129 |
-
args = parser.parse_args()
|
130 |
-
|
131 |
-
agents,sop,environment = init(args.agent)
|
132 |
-
|
133 |
-
# add ==============================
|
134 |
-
prepare(agents, sop, environment)
|
135 |
-
# ==================================
|
136 |
-
|
137 |
-
run(agents,sop,environment)
|
138 |
-
|
139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/client/css/conversation.css
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
.conversation {
|
2 |
-
width: 60%;
|
3 |
-
margin: 0px 16px;
|
4 |
-
display: flex;
|
5 |
-
flex-direction: column;
|
6 |
-
}
|
7 |
-
|
8 |
-
.conversation #messages {
|
9 |
-
width: 100%;
|
10 |
-
display: flex;
|
11 |
-
flex-direction: column;
|
12 |
-
overflow: auto;
|
13 |
-
overflow-wrap: break-word;
|
14 |
-
padding-bottom: 8px;
|
15 |
-
}
|
16 |
-
|
17 |
-
.conversation .user-input {
|
18 |
-
max-height: 180px;
|
19 |
-
margin: 16px 0px;
|
20 |
-
}
|
21 |
-
|
22 |
-
.conversation .user-input input {
|
23 |
-
font-size: 1rem;
|
24 |
-
background: none;
|
25 |
-
border: none;
|
26 |
-
outline: none;
|
27 |
-
color: var(--colour-3);
|
28 |
-
}
|
29 |
-
|
30 |
-
.conversation .user-input input::placeholder {
|
31 |
-
color: var(--user-input);
|
32 |
-
}
|
33 |
-
|
34 |
-
.conversation-title {
|
35 |
-
color: var(--colour-3);
|
36 |
-
font-size: 14px;
|
37 |
-
}
|
38 |
-
|
39 |
-
.conversation .user-input textarea {
|
40 |
-
font-size: 1rem;
|
41 |
-
width: 100%;
|
42 |
-
height: 100%;
|
43 |
-
padding: 12px;
|
44 |
-
background: none;
|
45 |
-
border: none;
|
46 |
-
outline: none;
|
47 |
-
color: var(--colour-3);
|
48 |
-
resize: vertical;
|
49 |
-
max-height: 150px;
|
50 |
-
min-height: 80px;
|
51 |
-
}
|
52 |
-
|
53 |
-
.box {
|
54 |
-
backdrop-filter: blur(20px);
|
55 |
-
-webkit-backdrop-filter: blur(20px);
|
56 |
-
background-color: var(--blur-bg);
|
57 |
-
height: 100%;
|
58 |
-
width: 100%;
|
59 |
-
border-radius: var(--border-radius-1);
|
60 |
-
border: 1px solid var(--blur-border);
|
61 |
-
}
|
62 |
-
|
63 |
-
.box.input-box {
|
64 |
-
position: relative;
|
65 |
-
align-items: center;
|
66 |
-
padding: 8px;
|
67 |
-
cursor: pointer;
|
68 |
-
}
|
69 |
-
|
70 |
-
#send-button {
|
71 |
-
position: absolute;
|
72 |
-
bottom: 25%;
|
73 |
-
right: 10px;
|
74 |
-
z-index: 1;
|
75 |
-
padding: 16px;
|
76 |
-
}
|
77 |
-
|
78 |
-
#cursor {
|
79 |
-
line-height: 17px;
|
80 |
-
margin-left: 3px;
|
81 |
-
-webkit-animation: blink 0.8s infinite;
|
82 |
-
animation: blink 0.8s infinite;
|
83 |
-
width: 7px;
|
84 |
-
height: 15px;
|
85 |
-
}
|
86 |
-
|
87 |
-
@keyframes blink {
|
88 |
-
0% {
|
89 |
-
background: #ffffff00;
|
90 |
-
}
|
91 |
-
|
92 |
-
50% {
|
93 |
-
background: white;
|
94 |
-
}
|
95 |
-
|
96 |
-
100% {
|
97 |
-
background: #ffffff00;
|
98 |
-
}
|
99 |
-
}
|
100 |
-
|
101 |
-
@-webkit-keyframes blink {
|
102 |
-
0% {
|
103 |
-
background: #ffffff00;
|
104 |
-
}
|
105 |
-
|
106 |
-
50% {
|
107 |
-
background: white;
|
108 |
-
}
|
109 |
-
|
110 |
-
100% {
|
111 |
-
background: #ffffff00;
|
112 |
-
}
|
113 |
-
}
|
114 |
-
|
115 |
-
/* scrollbar */
|
116 |
-
.conversation #messages::-webkit-scrollbar {
|
117 |
-
width: 4px;
|
118 |
-
padding: 8px 0px;
|
119 |
-
}
|
120 |
-
|
121 |
-
.conversation #messages::-webkit-scrollbar-track {
|
122 |
-
background-color: #ffffff00;
|
123 |
-
}
|
124 |
-
|
125 |
-
.conversation #messages::-webkit-scrollbar-thumb {
|
126 |
-
background-color: #555555;
|
127 |
-
border-radius: 10px;
|
128 |
-
}
|
129 |
-
|
130 |
-
@media screen and (max-width: 990px) {
|
131 |
-
.conversation {
|
132 |
-
width: 100%;
|
133 |
-
height: 90%;
|
134 |
-
}
|
135 |
-
}
|
136 |
-
|
137 |
-
@media screen and (max-height: 720px) {
|
138 |
-
.conversation.box {
|
139 |
-
height: 70%;
|
140 |
-
}
|
141 |
-
|
142 |
-
.conversation .user-input textarea {
|
143 |
-
font-size: 0.875rem;
|
144 |
-
}
|
145 |
-
}
|
146 |
-
|
147 |
-
@media screen and (max-width: 360px) {
|
148 |
-
.box {
|
149 |
-
border-radius: 0;
|
150 |
-
}
|
151 |
-
.conversation {
|
152 |
-
margin: 0;
|
153 |
-
margin-top: 48px;
|
154 |
-
}
|
155 |
-
.conversation .user-input {
|
156 |
-
margin: 2px 0 8px 0;
|
157 |
-
}
|
158 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/segment/dataloaders.py
DELETED
@@ -1,459 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Dataloaders
|
4 |
-
"""
|
5 |
-
|
6 |
-
import os
|
7 |
-
import random
|
8 |
-
|
9 |
-
import cv2
|
10 |
-
import numpy as np
|
11 |
-
import torch
|
12 |
-
from torch.utils.data import DataLoader, distributed
|
13 |
-
|
14 |
-
from ..augmentations import augment_hsv, copy_paste, letterbox
|
15 |
-
from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker
|
16 |
-
from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn
|
17 |
-
from ..torch_utils import torch_distributed_zero_first
|
18 |
-
from .augmentations import mixup, random_perspective
|
19 |
-
|
20 |
-
RANK = int(os.getenv("RANK", -1))
|
21 |
-
|
22 |
-
|
23 |
-
def create_dataloader(
|
24 |
-
path,
|
25 |
-
imgsz,
|
26 |
-
batch_size,
|
27 |
-
stride,
|
28 |
-
single_cls=False,
|
29 |
-
hyp=None,
|
30 |
-
augment=False,
|
31 |
-
cache=False,
|
32 |
-
pad=0.0,
|
33 |
-
rect=False,
|
34 |
-
rank=-1,
|
35 |
-
workers=8,
|
36 |
-
image_weights=False,
|
37 |
-
quad=False,
|
38 |
-
prefix="",
|
39 |
-
shuffle=False,
|
40 |
-
mask_downsample_ratio=1,
|
41 |
-
overlap_mask=False,
|
42 |
-
seed=0,
|
43 |
-
):
|
44 |
-
if rect and shuffle:
|
45 |
-
LOGGER.warning(
|
46 |
-
"WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False"
|
47 |
-
)
|
48 |
-
shuffle = False
|
49 |
-
with torch_distributed_zero_first(
|
50 |
-
rank
|
51 |
-
): # init dataset *.cache only once if DDP
|
52 |
-
dataset = LoadImagesAndLabelsAndMasks(
|
53 |
-
path,
|
54 |
-
imgsz,
|
55 |
-
batch_size,
|
56 |
-
augment=augment, # augmentation
|
57 |
-
hyp=hyp, # hyperparameters
|
58 |
-
rect=rect, # rectangular batches
|
59 |
-
cache_images=cache,
|
60 |
-
single_cls=single_cls,
|
61 |
-
stride=int(stride),
|
62 |
-
pad=pad,
|
63 |
-
image_weights=image_weights,
|
64 |
-
prefix=prefix,
|
65 |
-
downsample_ratio=mask_downsample_ratio,
|
66 |
-
overlap=overlap_mask,
|
67 |
-
)
|
68 |
-
|
69 |
-
batch_size = min(batch_size, len(dataset))
|
70 |
-
nd = torch.cuda.device_count() # number of CUDA devices
|
71 |
-
nw = min(
|
72 |
-
[
|
73 |
-
os.cpu_count() // max(nd, 1),
|
74 |
-
batch_size if batch_size > 1 else 0,
|
75 |
-
workers,
|
76 |
-
]
|
77 |
-
) # number of workers
|
78 |
-
sampler = (
|
79 |
-
None
|
80 |
-
if rank == -1
|
81 |
-
else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
82 |
-
)
|
83 |
-
loader = (
|
84 |
-
DataLoader if image_weights else InfiniteDataLoader
|
85 |
-
) # only DataLoader allows for attribute updates
|
86 |
-
generator = torch.Generator()
|
87 |
-
generator.manual_seed(6148914691236517205 + seed + RANK)
|
88 |
-
return (
|
89 |
-
loader(
|
90 |
-
dataset,
|
91 |
-
batch_size=batch_size,
|
92 |
-
shuffle=shuffle and sampler is None,
|
93 |
-
num_workers=nw,
|
94 |
-
sampler=sampler,
|
95 |
-
pin_memory=True,
|
96 |
-
collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4
|
97 |
-
if quad
|
98 |
-
else LoadImagesAndLabelsAndMasks.collate_fn,
|
99 |
-
worker_init_fn=seed_worker,
|
100 |
-
generator=generator,
|
101 |
-
),
|
102 |
-
dataset,
|
103 |
-
)
|
104 |
-
|
105 |
-
|
106 |
-
class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing
|
107 |
-
def __init__(
|
108 |
-
self,
|
109 |
-
path,
|
110 |
-
img_size=640,
|
111 |
-
batch_size=16,
|
112 |
-
augment=False,
|
113 |
-
hyp=None,
|
114 |
-
rect=False,
|
115 |
-
image_weights=False,
|
116 |
-
cache_images=False,
|
117 |
-
single_cls=False,
|
118 |
-
stride=32,
|
119 |
-
pad=0,
|
120 |
-
min_items=0,
|
121 |
-
prefix="",
|
122 |
-
downsample_ratio=1,
|
123 |
-
overlap=False,
|
124 |
-
):
|
125 |
-
super().__init__(
|
126 |
-
path,
|
127 |
-
img_size,
|
128 |
-
batch_size,
|
129 |
-
augment,
|
130 |
-
hyp,
|
131 |
-
rect,
|
132 |
-
image_weights,
|
133 |
-
cache_images,
|
134 |
-
single_cls,
|
135 |
-
stride,
|
136 |
-
pad,
|
137 |
-
min_items,
|
138 |
-
prefix,
|
139 |
-
)
|
140 |
-
self.downsample_ratio = downsample_ratio
|
141 |
-
self.overlap = overlap
|
142 |
-
|
143 |
-
def __getitem__(self, index):
|
144 |
-
index = self.indices[index] # linear, shuffled, or image_weights
|
145 |
-
|
146 |
-
hyp = self.hyp
|
147 |
-
mosaic = self.mosaic and random.random() < hyp["mosaic"]
|
148 |
-
masks = []
|
149 |
-
if mosaic:
|
150 |
-
# Load mosaic
|
151 |
-
img, labels, segments = self.load_mosaic(index)
|
152 |
-
shapes = None
|
153 |
-
|
154 |
-
# MixUp augmentation
|
155 |
-
if random.random() < hyp["mixup"]:
|
156 |
-
img, labels, segments = mixup(
|
157 |
-
img,
|
158 |
-
labels,
|
159 |
-
segments,
|
160 |
-
*self.load_mosaic(random.randint(0, self.n - 1)),
|
161 |
-
)
|
162 |
-
|
163 |
-
else:
|
164 |
-
# Load image
|
165 |
-
img, (h0, w0), (h, w) = self.load_image(index)
|
166 |
-
|
167 |
-
# Letterbox
|
168 |
-
shape = (
|
169 |
-
self.batch_shapes[self.batch[index]]
|
170 |
-
if self.rect
|
171 |
-
else self.img_size
|
172 |
-
) # final letterboxed shape
|
173 |
-
img, ratio, pad = letterbox(
|
174 |
-
img, shape, auto=False, scaleup=self.augment
|
175 |
-
)
|
176 |
-
shapes = (h0, w0), (
|
177 |
-
(h / h0, w / w0),
|
178 |
-
pad,
|
179 |
-
) # for COCO mAP rescaling
|
180 |
-
|
181 |
-
labels = self.labels[index].copy()
|
182 |
-
# [array, array, ....], array.shape=(num_points, 2), xyxyxyxy
|
183 |
-
segments = self.segments[index].copy()
|
184 |
-
if len(segments):
|
185 |
-
for i_s in range(len(segments)):
|
186 |
-
segments[i_s] = xyn2xy(
|
187 |
-
segments[i_s],
|
188 |
-
ratio[0] * w,
|
189 |
-
ratio[1] * h,
|
190 |
-
padw=pad[0],
|
191 |
-
padh=pad[1],
|
192 |
-
)
|
193 |
-
if labels.size: # normalized xywh to pixel xyxy format
|
194 |
-
labels[:, 1:] = xywhn2xyxy(
|
195 |
-
labels[:, 1:],
|
196 |
-
ratio[0] * w,
|
197 |
-
ratio[1] * h,
|
198 |
-
padw=pad[0],
|
199 |
-
padh=pad[1],
|
200 |
-
)
|
201 |
-
|
202 |
-
if self.augment:
|
203 |
-
img, labels, segments = random_perspective(
|
204 |
-
img,
|
205 |
-
labels,
|
206 |
-
segments=segments,
|
207 |
-
degrees=hyp["degrees"],
|
208 |
-
translate=hyp["translate"],
|
209 |
-
scale=hyp["scale"],
|
210 |
-
shear=hyp["shear"],
|
211 |
-
perspective=hyp["perspective"],
|
212 |
-
)
|
213 |
-
|
214 |
-
nl = len(labels) # number of labels
|
215 |
-
if nl:
|
216 |
-
labels[:, 1:5] = xyxy2xywhn(
|
217 |
-
labels[:, 1:5],
|
218 |
-
w=img.shape[1],
|
219 |
-
h=img.shape[0],
|
220 |
-
clip=True,
|
221 |
-
eps=1e-3,
|
222 |
-
)
|
223 |
-
if self.overlap:
|
224 |
-
masks, sorted_idx = polygons2masks_overlap(
|
225 |
-
img.shape[:2],
|
226 |
-
segments,
|
227 |
-
downsample_ratio=self.downsample_ratio,
|
228 |
-
)
|
229 |
-
masks = masks[None] # (640, 640) -> (1, 640, 640)
|
230 |
-
labels = labels[sorted_idx]
|
231 |
-
else:
|
232 |
-
masks = polygons2masks(
|
233 |
-
img.shape[:2],
|
234 |
-
segments,
|
235 |
-
color=1,
|
236 |
-
downsample_ratio=self.downsample_ratio,
|
237 |
-
)
|
238 |
-
|
239 |
-
masks = (
|
240 |
-
torch.from_numpy(masks)
|
241 |
-
if len(masks)
|
242 |
-
else torch.zeros(
|
243 |
-
1 if self.overlap else nl,
|
244 |
-
img.shape[0] // self.downsample_ratio,
|
245 |
-
img.shape[1] // self.downsample_ratio,
|
246 |
-
)
|
247 |
-
)
|
248 |
-
# TODO: albumentations support
|
249 |
-
if self.augment:
|
250 |
-
# Albumentations
|
251 |
-
# there are some augmentation that won't change boxes and masks,
|
252 |
-
# so just be it for now.
|
253 |
-
img, labels = self.albumentations(img, labels)
|
254 |
-
nl = len(labels) # update after albumentations
|
255 |
-
|
256 |
-
# HSV color-space
|
257 |
-
augment_hsv(
|
258 |
-
img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]
|
259 |
-
)
|
260 |
-
|
261 |
-
# Flip up-down
|
262 |
-
if random.random() < hyp["flipud"]:
|
263 |
-
img = np.flipud(img)
|
264 |
-
if nl:
|
265 |
-
labels[:, 2] = 1 - labels[:, 2]
|
266 |
-
masks = torch.flip(masks, dims=[1])
|
267 |
-
|
268 |
-
# Flip left-right
|
269 |
-
if random.random() < hyp["fliplr"]:
|
270 |
-
img = np.fliplr(img)
|
271 |
-
if nl:
|
272 |
-
labels[:, 1] = 1 - labels[:, 1]
|
273 |
-
masks = torch.flip(masks, dims=[2])
|
274 |
-
|
275 |
-
# Cutouts # labels = cutout(img, labels, p=0.5)
|
276 |
-
|
277 |
-
labels_out = torch.zeros((nl, 6))
|
278 |
-
if nl:
|
279 |
-
labels_out[:, 1:] = torch.from_numpy(labels)
|
280 |
-
|
281 |
-
# Convert
|
282 |
-
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
|
283 |
-
img = np.ascontiguousarray(img)
|
284 |
-
|
285 |
-
return (
|
286 |
-
torch.from_numpy(img),
|
287 |
-
labels_out,
|
288 |
-
self.im_files[index],
|
289 |
-
shapes,
|
290 |
-
masks,
|
291 |
-
)
|
292 |
-
|
293 |
-
def load_mosaic(self, index):
|
294 |
-
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
|
295 |
-
labels4, segments4 = [], []
|
296 |
-
s = self.img_size
|
297 |
-
yc, xc = (
|
298 |
-
int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border
|
299 |
-
) # mosaic center x, y
|
300 |
-
|
301 |
-
# 3 additional image indices
|
302 |
-
indices = [index] + random.choices(
|
303 |
-
self.indices, k=3
|
304 |
-
) # 3 additional image indices
|
305 |
-
for i, index in enumerate(indices):
|
306 |
-
# Load image
|
307 |
-
img, _, (h, w) = self.load_image(index)
|
308 |
-
|
309 |
-
# place img in img4
|
310 |
-
if i == 0: # top left
|
311 |
-
img4 = np.full(
|
312 |
-
(s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8
|
313 |
-
) # base image with 4 tiles
|
314 |
-
x1a, y1a, x2a, y2a = (
|
315 |
-
max(xc - w, 0),
|
316 |
-
max(yc - h, 0),
|
317 |
-
xc,
|
318 |
-
yc,
|
319 |
-
) # xmin, ymin, xmax, ymax (large image)
|
320 |
-
x1b, y1b, x2b, y2b = (
|
321 |
-
w - (x2a - x1a),
|
322 |
-
h - (y2a - y1a),
|
323 |
-
w,
|
324 |
-
h,
|
325 |
-
) # xmin, ymin, xmax, ymax (small image)
|
326 |
-
elif i == 1: # top right
|
327 |
-
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
|
328 |
-
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
|
329 |
-
elif i == 2: # bottom left
|
330 |
-
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
|
331 |
-
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
|
332 |
-
elif i == 3: # bottom right
|
333 |
-
x1a, y1a, x2a, y2a = (
|
334 |
-
xc,
|
335 |
-
yc,
|
336 |
-
min(xc + w, s * 2),
|
337 |
-
min(s * 2, yc + h),
|
338 |
-
)
|
339 |
-
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
|
340 |
-
|
341 |
-
img4[y1a:y2a, x1a:x2a] = img[
|
342 |
-
y1b:y2b, x1b:x2b
|
343 |
-
] # img4[ymin:ymax, xmin:xmax]
|
344 |
-
padw = x1a - x1b
|
345 |
-
padh = y1a - y1b
|
346 |
-
|
347 |
-
labels, segments = (
|
348 |
-
self.labels[index].copy(),
|
349 |
-
self.segments[index].copy(),
|
350 |
-
)
|
351 |
-
|
352 |
-
if labels.size:
|
353 |
-
labels[:, 1:] = xywhn2xyxy(
|
354 |
-
labels[:, 1:], w, h, padw, padh
|
355 |
-
) # normalized xywh to pixel xyxy format
|
356 |
-
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
|
357 |
-
labels4.append(labels)
|
358 |
-
segments4.extend(segments)
|
359 |
-
|
360 |
-
# Concat/clip labels
|
361 |
-
labels4 = np.concatenate(labels4, 0)
|
362 |
-
for x in (labels4[:, 1:], *segments4):
|
363 |
-
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
|
364 |
-
# img4, labels4 = replicate(img4, labels4) # replicate
|
365 |
-
|
366 |
-
# Augment
|
367 |
-
img4, labels4, segments4 = copy_paste(
|
368 |
-
img4, labels4, segments4, p=self.hyp["copy_paste"]
|
369 |
-
)
|
370 |
-
img4, labels4, segments4 = random_perspective(
|
371 |
-
img4,
|
372 |
-
labels4,
|
373 |
-
segments4,
|
374 |
-
degrees=self.hyp["degrees"],
|
375 |
-
translate=self.hyp["translate"],
|
376 |
-
scale=self.hyp["scale"],
|
377 |
-
shear=self.hyp["shear"],
|
378 |
-
perspective=self.hyp["perspective"],
|
379 |
-
border=self.mosaic_border,
|
380 |
-
) # border to remove
|
381 |
-
return img4, labels4, segments4
|
382 |
-
|
383 |
-
@staticmethod
|
384 |
-
def collate_fn(batch):
|
385 |
-
img, label, path, shapes, masks = zip(*batch) # transposed
|
386 |
-
batched_masks = torch.cat(masks, 0)
|
387 |
-
for i, l in enumerate(label):
|
388 |
-
l[:, 0] = i # add target image index for build_targets()
|
389 |
-
return (
|
390 |
-
torch.stack(img, 0),
|
391 |
-
torch.cat(label, 0),
|
392 |
-
path,
|
393 |
-
shapes,
|
394 |
-
batched_masks,
|
395 |
-
)
|
396 |
-
|
397 |
-
|
398 |
-
def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
|
399 |
-
"""
|
400 |
-
Args:
|
401 |
-
img_size (tuple): The image size.
|
402 |
-
polygons (np.ndarray): [N, M], N is the number of polygons,
|
403 |
-
M is the number of points(Be divided by 2).
|
404 |
-
"""
|
405 |
-
mask = np.zeros(img_size, dtype=np.uint8)
|
406 |
-
polygons = np.asarray(polygons)
|
407 |
-
polygons = polygons.astype(np.int32)
|
408 |
-
shape = polygons.shape
|
409 |
-
polygons = polygons.reshape(shape[0], -1, 2)
|
410 |
-
cv2.fillPoly(mask, polygons, color=color)
|
411 |
-
nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio)
|
412 |
-
# NOTE: fillPoly firstly then resize is trying the keep the same way
|
413 |
-
# of loss calculation when mask-ratio=1.
|
414 |
-
mask = cv2.resize(mask, (nw, nh))
|
415 |
-
return mask
|
416 |
-
|
417 |
-
|
418 |
-
def polygons2masks(img_size, polygons, color, downsample_ratio=1):
|
419 |
-
"""
|
420 |
-
Args:
|
421 |
-
img_size (tuple): The image size.
|
422 |
-
polygons (list[np.ndarray]): each polygon is [N, M],
|
423 |
-
N is the number of polygons,
|
424 |
-
M is the number of points(Be divided by 2).
|
425 |
-
"""
|
426 |
-
masks = []
|
427 |
-
for si in range(len(polygons)):
|
428 |
-
mask = polygon2mask(
|
429 |
-
img_size, [polygons[si].reshape(-1)], color, downsample_ratio
|
430 |
-
)
|
431 |
-
masks.append(mask)
|
432 |
-
return np.array(masks)
|
433 |
-
|
434 |
-
|
435 |
-
def polygons2masks_overlap(img_size, segments, downsample_ratio=1):
|
436 |
-
"""Return a (640, 640) overlap mask."""
|
437 |
-
masks = np.zeros(
|
438 |
-
(img_size[0] // downsample_ratio, img_size[1] // downsample_ratio),
|
439 |
-
dtype=np.int32 if len(segments) > 255 else np.uint8,
|
440 |
-
)
|
441 |
-
areas = []
|
442 |
-
ms = []
|
443 |
-
for si in range(len(segments)):
|
444 |
-
mask = polygon2mask(
|
445 |
-
img_size,
|
446 |
-
[segments[si].reshape(-1)],
|
447 |
-
downsample_ratio=downsample_ratio,
|
448 |
-
color=1,
|
449 |
-
)
|
450 |
-
ms.append(mask)
|
451 |
-
areas.append(mask.sum())
|
452 |
-
areas = np.asarray(areas)
|
453 |
-
index = np.argsort(-areas)
|
454 |
-
ms = np.array(ms)[index]
|
455 |
-
for i in range(len(segments)):
|
456 |
-
mask = ms[i] * (i + 1)
|
457 |
-
masks = masks + mask
|
458 |
-
masks = np.clip(masks, a_min=0, a_max=i + 1)
|
459 |
-
return masks, index
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/r/[id]/$types.d.ts
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import type * as Kit from '@sveltejs/kit';
|
2 |
-
|
3 |
-
type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
|
4 |
-
type RouteParams = { id: string }
|
5 |
-
type RouteId = '/r/[id]';
|
6 |
-
type MaybeWithVoid<T> = {} extends T ? T | void : T;
|
7 |
-
export type RequiredKeys<T> = { [K in keyof T]-?: {} extends { [P in K]: T[K] } ? never : K; }[keyof T];
|
8 |
-
type OutputDataShape<T> = MaybeWithVoid<Omit<App.PageData, RequiredKeys<T>> & Partial<Pick<App.PageData, keyof T & keyof App.PageData>> & Record<string, any>>
|
9 |
-
type EnsureDefined<T> = T extends null | undefined ? {} : T;
|
10 |
-
type OptionalUnion<U extends Record<string, any>, A extends keyof U = U extends U ? keyof U : never> = U extends unknown ? { [P in Exclude<A, keyof U>]?: never } & U : never;
|
11 |
-
export type Snapshot<T = any> = Kit.Snapshot<T>;
|
12 |
-
type PageServerParentData = EnsureDefined<import('../../$types.js').LayoutServerData>;
|
13 |
-
type PageParentData = EnsureDefined<import('../../$types.js').LayoutData>;
|
14 |
-
|
15 |
-
export type EntryGenerator = () => Promise<Array<RouteParams>> | Array<RouteParams>;
|
16 |
-
export type PageServerLoad<OutputData extends OutputDataShape<PageServerParentData> = OutputDataShape<PageServerParentData>> = Kit.ServerLoad<RouteParams, PageServerParentData, OutputData, RouteId>;
|
17 |
-
export type PageServerLoadEvent = Parameters<PageServerLoad>[0];
|
18 |
-
export type ActionData = unknown;
|
19 |
-
export type PageServerData = Expand<OptionalUnion<EnsureDefined<Kit.AwaitedProperties<Awaited<ReturnType<typeof import('./proxy+page.server.js').load>>>>>>;
|
20 |
-
export type PageData = Expand<Omit<PageParentData, keyof PageServerData> & EnsureDefined<PageServerData>>;
|
21 |
-
export type Action<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Action<RouteParams, OutputData, RouteId>
|
22 |
-
export type Actions<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Actions<RouteParams, OutputData, RouteId>
|
23 |
-
export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/agents/tasksolving_agent/critic.py
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
from colorama import Fore
|
5 |
-
from agentverse.logging import get_logger
|
6 |
-
import bdb
|
7 |
-
from string import Template
|
8 |
-
from typing import TYPE_CHECKING, List, Union
|
9 |
-
|
10 |
-
from agentverse.message import Message
|
11 |
-
|
12 |
-
from agentverse.agents import agent_registry
|
13 |
-
from agentverse.agents.base import BaseAgent
|
14 |
-
from agentverse.utils import AgentCriticism
|
15 |
-
from agentverse.message import CriticMessage
|
16 |
-
|
17 |
-
logger = get_logger()
|
18 |
-
|
19 |
-
|
20 |
-
@agent_registry.register("critic")
|
21 |
-
class CriticAgent(BaseAgent):
|
22 |
-
max_history: int = 3
|
23 |
-
tools: List[dict] = []
|
24 |
-
tool_names: List[str] = []
|
25 |
-
tool_descriptions: str = ""
|
26 |
-
|
27 |
-
def __init__(self, *args, **kwargs):
|
28 |
-
tool_config_file = kwargs.pop("tool_config", "")
|
29 |
-
tools = []
|
30 |
-
tool_names = []
|
31 |
-
tool_descriptions = ""
|
32 |
-
if tool_config_file != "":
|
33 |
-
try:
|
34 |
-
with open(tool_config_file, "r") as f:
|
35 |
-
tools_dict = json.load(f)
|
36 |
-
tools = tools_dict["tools_json"]
|
37 |
-
tool_names = [t["name"] for t in tools]
|
38 |
-
tool_descriptions = "\n".join(
|
39 |
-
[f"- {t['name']}: " + t["description"] for t in tools]
|
40 |
-
)
|
41 |
-
kwargs.update('tools', tools)
|
42 |
-
kwargs.update('tool_names', tool_names)
|
43 |
-
kwargs.update('tool_descriptions', tool_descriptions)
|
44 |
-
except Exception as e:
|
45 |
-
logger.error(e)
|
46 |
-
logger.warn("Failed to load tool config file.")
|
47 |
-
super().__init__(
|
48 |
-
*args,
|
49 |
-
**kwargs,
|
50 |
-
)
|
51 |
-
|
52 |
-
def step(self, env_description: str = "") -> CriticMessage:
|
53 |
-
pass
|
54 |
-
|
55 |
-
async def astep(
|
56 |
-
self,
|
57 |
-
preliminary_solution: str,
|
58 |
-
advice: str = "No advice yet.",
|
59 |
-
task_description: str = "",
|
60 |
-
all_roles: str = "",
|
61 |
-
**kwargs,
|
62 |
-
) -> CriticMessage:
|
63 |
-
"""Asynchronous version of step"""
|
64 |
-
logger.debug("", self.name, Fore.MAGENTA)
|
65 |
-
prepend_prompt, append_prompt = self.get_all_prompts(
|
66 |
-
preliminary_solution=preliminary_solution,
|
67 |
-
advice=advice,
|
68 |
-
task_description=task_description,
|
69 |
-
role_description=self.role_description,
|
70 |
-
agent_name=self.name,
|
71 |
-
all_roles=all_roles,
|
72 |
-
# tool_names=self.tool_names,
|
73 |
-
tool_descriptions=self.tool_descriptions,
|
74 |
-
)
|
75 |
-
history = self.memory.to_messages(self.name, start_index=-self.max_history)
|
76 |
-
parsed_response: Union[AgentCriticism, None] = None
|
77 |
-
for i in range(self.max_retry):
|
78 |
-
try:
|
79 |
-
response = await self.llm.agenerate_response(
|
80 |
-
prepend_prompt, history, append_prompt
|
81 |
-
)
|
82 |
-
parsed_response = self.output_parser.parse(response)
|
83 |
-
break
|
84 |
-
except (KeyboardInterrupt, bdb.BdbQuit):
|
85 |
-
raise
|
86 |
-
except Exception as e:
|
87 |
-
logger.error(e)
|
88 |
-
logger.warn("Retrying...")
|
89 |
-
continue
|
90 |
-
|
91 |
-
if parsed_response is None:
|
92 |
-
logger.error(f"{self.name} failed to generate valid response.")
|
93 |
-
|
94 |
-
message = CriticMessage(
|
95 |
-
content=parsed_response.criticism if parsed_response is not None else "",
|
96 |
-
sender=self.name,
|
97 |
-
sender_agent=self,
|
98 |
-
is_agree=parsed_response.is_agree if parsed_response is not None else False,
|
99 |
-
)
|
100 |
-
return message
|
101 |
-
|
102 |
-
def _fill_prompt_template(
|
103 |
-
self, preliminary_solution: str, advice: str, task_description: str
|
104 |
-
) -> str:
|
105 |
-
"""Fill the placeholders in the prompt template
|
106 |
-
|
107 |
-
In the conversation agent, three placeholders are supported:
|
108 |
-
- ${role_description}
|
109 |
-
- ${task_description}
|
110 |
-
- ${preliminary_solution}
|
111 |
-
- ${advice}
|
112 |
-
"""
|
113 |
-
input_arguments = {
|
114 |
-
"role_description": self.role_description,
|
115 |
-
"task_description": task_description,
|
116 |
-
"preliminary_solution": preliminary_solution,
|
117 |
-
"advice": advice,
|
118 |
-
}
|
119 |
-
return Template(self.prompt_template).safe_substitute(input_arguments)
|
120 |
-
|
121 |
-
def add_message_to_memory(self, messages: List[Message]) -> None:
|
122 |
-
self.memory.add_message(messages)
|
123 |
-
|
124 |
-
def reset(self) -> None:
|
125 |
-
"""Reset the agent"""
|
126 |
-
self.memory.reset()
|
127 |
-
# TODO: reset receiver
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from agentverse.registry import Registry
|
2 |
-
|
3 |
-
executor_registry = Registry(name="ExecutorRegistry")
|
4 |
-
|
5 |
-
from .base import BaseExecutor, NoneExecutor
|
6 |
-
from .code_test import CodeTestExecutor
|
7 |
-
from .tool_using import ToolUsingExecutor
|
8 |
-
from .coverage_test import CoverageTestExecutor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/setup.py
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
import setuptools
|
2 |
-
from setuptools.command.develop import develop
|
3 |
-
import subprocess
|
4 |
-
|
5 |
-
with open("requirements.txt", "r") as f:
|
6 |
-
requirements = f.read().splitlines()
|
7 |
-
|
8 |
-
with open("README.md", "r", encoding='utf8') as fh:
|
9 |
-
long_description = fh.read()
|
10 |
-
|
11 |
-
setuptools.setup(
|
12 |
-
name="agentverse",
|
13 |
-
version="0.1.5",
|
14 |
-
author="OpenBMB",
|
15 |
-
author_email="[email protected]",
|
16 |
-
description="A versatile framework that streamlines the process of creating custom multi-agent environments for large language models (LLMs).",
|
17 |
-
long_description=long_description,
|
18 |
-
long_description_content_type="text/markdown",
|
19 |
-
url="https://github.com/OpenBMB/AgentVerse",
|
20 |
-
packages=setuptools.find_packages(),
|
21 |
-
classifiers=[
|
22 |
-
"Programming Language :: Python :: 3",
|
23 |
-
'License :: OSI Approved :: Apache Software License',
|
24 |
-
"Operating System :: OS Independent",
|
25 |
-
],
|
26 |
-
python_requires=">=3.9",
|
27 |
-
# install_requires=[
|
28 |
-
# "PyYAML",
|
29 |
-
# "fastapi",
|
30 |
-
# "uvicorn",
|
31 |
-
# "py3langid",
|
32 |
-
# "iso-639",
|
33 |
-
# "openai",
|
34 |
-
# "opencv-python",
|
35 |
-
# "gradio",
|
36 |
-
# "httpx[socks]",
|
37 |
-
# "astunparse",
|
38 |
-
# "langchain",
|
39 |
-
# ],
|
40 |
-
install_requires=requirements,
|
41 |
-
include_package_data = True,
|
42 |
-
entry_points={
|
43 |
-
"console_scripts": [
|
44 |
-
"agentverse-benchmark = agentverse_command.benchmark:cli_main",
|
45 |
-
"agentverse-simulation = agentverse_command.main_simulation_cli:cli_main",
|
46 |
-
"agentverse-simulation-gui = agentverse_command.main_simulation_gui:cli_main",
|
47 |
-
"agentverse-tasksolving = agentverse_command.main_tasksolving_cli:cli_main",
|
48 |
-
],
|
49 |
-
},
|
50 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import DynamicText from './DynamicText.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('dynamicText', function (x, y, width, height, config) {
|
6 |
-
var gameObject = new DynamicText(this.scene, x, y, width, height, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.DynamicText', DynamicText);
|
12 |
-
|
13 |
-
export default DynamicText;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akshay-More-007/starcoder/app.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
-
|
3 |
-
checkpoint = "bigcode/starcoder"
|
4 |
-
device = "cpu" # for GPU usage or "cpu" for CPU usage
|
5 |
-
api_key = "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"
|
6 |
-
tokenizer = AutoTokenizer.from_pretrained(checkpoint, use_auth_token=api_key)
|
7 |
-
model = AutoModelForCausalLM.from_pretrained(checkpoint, use_auth_token=api_key).to(device)
|
8 |
-
|
9 |
-
inputs = tokenizer.encode("def print_hello_world ():", return_tensors="pt").to(device)
|
10 |
-
outputs = model.generate(inputs)
|
11 |
-
print(tokenizer.decode(outputs[0]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/cleaners.py
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from text.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3
|
3 |
-
from text.korean import latin_to_hangul, number_to_hangul, divide_hangul, korean_to_lazy_ipa, korean_to_ipa
|
4 |
-
from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2
|
5 |
-
from text.sanskrit import devanagari_to_ipa
|
6 |
-
from text.english import english_to_lazy_ipa, english_to_ipa2, english_to_lazy_ipa2
|
7 |
-
from text.thai import num_to_thai, latin_to_thai
|
8 |
-
# from text.shanghainese import shanghainese_to_ipa
|
9 |
-
# from text.cantonese import cantonese_to_ipa
|
10 |
-
# from text.ngu_dialect import ngu_dialect_to_ipa
|
11 |
-
|
12 |
-
|
13 |
-
def japanese_cleaners(text):
|
14 |
-
text = japanese_to_romaji_with_accent(text)
|
15 |
-
text = re.sub(r'([A-Za-z])$', r'\1.', text)
|
16 |
-
return text
|
17 |
-
|
18 |
-
|
19 |
-
def japanese_cleaners2(text):
|
20 |
-
return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
|
21 |
-
|
22 |
-
|
23 |
-
def korean_cleaners(text):
|
24 |
-
'''Pipeline for Korean text'''
|
25 |
-
text = latin_to_hangul(text)
|
26 |
-
text = number_to_hangul(text)
|
27 |
-
text = divide_hangul(text)
|
28 |
-
text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
|
29 |
-
return text
|
30 |
-
|
31 |
-
|
32 |
-
# def chinese_cleaners(text):
|
33 |
-
# '''Pipeline for Chinese text'''
|
34 |
-
# text = number_to_chinese(text)
|
35 |
-
# text = chinese_to_bopomofo(text)
|
36 |
-
# text = latin_to_bopomofo(text)
|
37 |
-
# text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
|
38 |
-
# return text
|
39 |
-
|
40 |
-
def chinese_cleaners(text):
|
41 |
-
from pypinyin import Style, pinyin
|
42 |
-
text = text.replace("[ZH]", "")
|
43 |
-
phones = [phone[0] for phone in pinyin(text, style=Style.TONE3)]
|
44 |
-
return ' '.join(phones)
|
45 |
-
|
46 |
-
|
47 |
-
def zh_ja_mixture_cleaners(text):
|
48 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
49 |
-
lambda x: chinese_to_romaji(x.group(1))+' ', text)
|
50 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
|
51 |
-
x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text)
|
52 |
-
text = re.sub(r'\s+$', '', text)
|
53 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
54 |
-
return text
|
55 |
-
|
56 |
-
|
57 |
-
def sanskrit_cleaners(text):
|
58 |
-
text = text.replace('॥', '।').replace('ॐ', 'ओम्')
|
59 |
-
text = re.sub(r'([^।])$', r'\1।', text)
|
60 |
-
return text
|
61 |
-
|
62 |
-
|
63 |
-
def cjks_cleaners(text):
|
64 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
65 |
-
lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text)
|
66 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
67 |
-
lambda x: japanese_to_ipa(x.group(1))+' ', text)
|
68 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
69 |
-
lambda x: korean_to_lazy_ipa(x.group(1))+' ', text)
|
70 |
-
text = re.sub(r'\[SA\](.*?)\[SA\]',
|
71 |
-
lambda x: devanagari_to_ipa(x.group(1))+' ', text)
|
72 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
73 |
-
lambda x: english_to_lazy_ipa(x.group(1))+' ', text)
|
74 |
-
text = re.sub(r'\s+$', '', text)
|
75 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
76 |
-
return text
|
77 |
-
|
78 |
-
|
79 |
-
def cjke_cleaners(text):
|
80 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
|
81 |
-
'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text)
|
82 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
|
83 |
-
'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text)
|
84 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
85 |
-
lambda x: korean_to_ipa(x.group(1))+' ', text)
|
86 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
|
87 |
-
'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text)
|
88 |
-
text = re.sub(r'\s+$', '', text)
|
89 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
90 |
-
return text
|
91 |
-
|
92 |
-
|
93 |
-
def cjke_cleaners2(text):
|
94 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
95 |
-
lambda x: chinese_to_ipa(x.group(1))+' ', text)
|
96 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
97 |
-
lambda x: japanese_to_ipa2(x.group(1))+' ', text)
|
98 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
99 |
-
lambda x: korean_to_ipa(x.group(1))+' ', text)
|
100 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
101 |
-
lambda x: english_to_ipa2(x.group(1))+' ', text)
|
102 |
-
text = re.sub(r'\s+$', '', text)
|
103 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
104 |
-
return text
|
105 |
-
|
106 |
-
|
107 |
-
def thai_cleaners(text):
|
108 |
-
text = num_to_thai(text)
|
109 |
-
text = latin_to_thai(text)
|
110 |
-
return text
|
111 |
-
|
112 |
-
|
113 |
-
# def shanghainese_cleaners(text):
|
114 |
-
# text = shanghainese_to_ipa(text)
|
115 |
-
# text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
116 |
-
# return text
|
117 |
-
|
118 |
-
|
119 |
-
# def chinese_dialect_cleaners(text):
|
120 |
-
# text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
121 |
-
# lambda x: chinese_to_ipa2(x.group(1))+' ', text)
|
122 |
-
# text = re.sub(r'\[JA\](.*?)\[JA\]',
|
123 |
-
# lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
|
124 |
-
# text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
|
125 |
-
# '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
|
126 |
-
# text = re.sub(r'\[GD\](.*?)\[GD\]',
|
127 |
-
# lambda x: cantonese_to_ipa(x.group(1))+' ', text)
|
128 |
-
# text = re.sub(r'\[EN\](.*?)\[EN\]',
|
129 |
-
# lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
|
130 |
-
# text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
|
131 |
-
# 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
|
132 |
-
# text = re.sub(r'\s+$', '', text)
|
133 |
-
# text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
134 |
-
# return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/countless3d.py
DELETED
@@ -1,356 +0,0 @@
|
|
1 |
-
from six.moves import range
|
2 |
-
from PIL import Image
|
3 |
-
import numpy as np
|
4 |
-
import io
|
5 |
-
import time
|
6 |
-
import math
|
7 |
-
import random
|
8 |
-
import sys
|
9 |
-
from collections import defaultdict
|
10 |
-
from copy import deepcopy
|
11 |
-
from itertools import combinations
|
12 |
-
from functools import reduce
|
13 |
-
from tqdm import tqdm
|
14 |
-
|
15 |
-
from memory_profiler import profile
|
16 |
-
|
17 |
-
def countless5(a,b,c,d,e):
|
18 |
-
"""First stage of generalizing from countless2d.
|
19 |
-
|
20 |
-
You have five slots: A, B, C, D, E
|
21 |
-
|
22 |
-
You can decide if something is the winner by first checking for
|
23 |
-
matches of three, then matches of two, then picking just one if
|
24 |
-
the other two tries fail. In countless2d, you just check for matches
|
25 |
-
of two and then pick one of them otherwise.
|
26 |
-
|
27 |
-
Unfortunately, you need to check ABC, ABD, ABE, BCD, BDE, & CDE.
|
28 |
-
Then you need to check AB, AC, AD, BC, BD
|
29 |
-
We skip checking E because if none of these match, we pick E. We can
|
30 |
-
skip checking AE, BE, CE, DE since if any of those match, E is our boy
|
31 |
-
so it's redundant.
|
32 |
-
|
33 |
-
So countless grows cominatorially in complexity.
|
34 |
-
"""
|
35 |
-
sections = [ a,b,c,d,e ]
|
36 |
-
|
37 |
-
p2 = lambda q,r: q * (q == r) # q if p == q else 0
|
38 |
-
p3 = lambda q,r,s: q * ( (q == r) & (r == s) ) # q if q == r == s else 0
|
39 |
-
|
40 |
-
lor = lambda x,y: x + (x == 0) * y
|
41 |
-
|
42 |
-
results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) )
|
43 |
-
results3 = reduce(lor, results3)
|
44 |
-
|
45 |
-
results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) )
|
46 |
-
results2 = reduce(lor, results2)
|
47 |
-
|
48 |
-
return reduce(lor, (results3, results2, e))
|
49 |
-
|
50 |
-
def countless8(a,b,c,d,e,f,g,h):
|
51 |
-
"""Extend countless5 to countless8. Same deal, except we also
|
52 |
-
need to check for matches of length 4."""
|
53 |
-
sections = [ a, b, c, d, e, f, g, h ]
|
54 |
-
|
55 |
-
p2 = lambda q,r: q * (q == r)
|
56 |
-
p3 = lambda q,r,s: q * ( (q == r) & (r == s) )
|
57 |
-
p4 = lambda p,q,r,s: p * ( (p == q) & (q == r) & (r == s) )
|
58 |
-
|
59 |
-
lor = lambda x,y: x + (x == 0) * y
|
60 |
-
|
61 |
-
results4 = ( p4(x,y,z,w) for x,y,z,w in combinations(sections, 4) )
|
62 |
-
results4 = reduce(lor, results4)
|
63 |
-
|
64 |
-
results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) )
|
65 |
-
results3 = reduce(lor, results3)
|
66 |
-
|
67 |
-
# We can always use our shortcut of omitting the last element
|
68 |
-
# for N choose 2
|
69 |
-
results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) )
|
70 |
-
results2 = reduce(lor, results2)
|
71 |
-
|
72 |
-
return reduce(lor, [ results4, results3, results2, h ])
|
73 |
-
|
74 |
-
def dynamic_countless3d(data):
|
75 |
-
"""countless8 + dynamic programming. ~2x faster"""
|
76 |
-
sections = []
|
77 |
-
|
78 |
-
# shift zeros up one so they don't interfere with bitwise operators
|
79 |
-
# we'll shift down at the end
|
80 |
-
data += 1
|
81 |
-
|
82 |
-
# This loop splits the 2D array apart into four arrays that are
|
83 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
84 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
85 |
-
factor = (2,2,2)
|
86 |
-
for offset in np.ndindex(factor):
|
87 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
88 |
-
sections.append(part)
|
89 |
-
|
90 |
-
pick = lambda a,b: a * (a == b)
|
91 |
-
lor = lambda x,y: x + (x == 0) * y
|
92 |
-
|
93 |
-
subproblems2 = {}
|
94 |
-
|
95 |
-
results2 = None
|
96 |
-
for x,y in combinations(range(7), 2):
|
97 |
-
res = pick(sections[x], sections[y])
|
98 |
-
subproblems2[(x,y)] = res
|
99 |
-
if results2 is not None:
|
100 |
-
results2 += (results2 == 0) * res
|
101 |
-
else:
|
102 |
-
results2 = res
|
103 |
-
|
104 |
-
subproblems3 = {}
|
105 |
-
|
106 |
-
results3 = None
|
107 |
-
for x,y,z in combinations(range(8), 3):
|
108 |
-
res = pick(subproblems2[(x,y)], sections[z])
|
109 |
-
|
110 |
-
if z != 7:
|
111 |
-
subproblems3[(x,y,z)] = res
|
112 |
-
|
113 |
-
if results3 is not None:
|
114 |
-
results3 += (results3 == 0) * res
|
115 |
-
else:
|
116 |
-
results3 = res
|
117 |
-
|
118 |
-
results3 = reduce(lor, (results3, results2, sections[-1]))
|
119 |
-
|
120 |
-
# free memory
|
121 |
-
results2 = None
|
122 |
-
subproblems2 = None
|
123 |
-
res = None
|
124 |
-
|
125 |
-
results4 = ( pick(subproblems3[(x,y,z)], sections[w]) for x,y,z,w in combinations(range(8), 4) )
|
126 |
-
results4 = reduce(lor, results4)
|
127 |
-
subproblems3 = None # free memory
|
128 |
-
|
129 |
-
final_result = lor(results4, results3) - 1
|
130 |
-
data -= 1
|
131 |
-
return final_result
|
132 |
-
|
133 |
-
def countless3d(data):
|
134 |
-
"""Now write countless8 in such a way that it could be used
|
135 |
-
to process an image."""
|
136 |
-
sections = []
|
137 |
-
|
138 |
-
# shift zeros up one so they don't interfere with bitwise operators
|
139 |
-
# we'll shift down at the end
|
140 |
-
data += 1
|
141 |
-
|
142 |
-
# This loop splits the 2D array apart into four arrays that are
|
143 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
144 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
145 |
-
factor = (2,2,2)
|
146 |
-
for offset in np.ndindex(factor):
|
147 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
148 |
-
sections.append(part)
|
149 |
-
|
150 |
-
p2 = lambda q,r: q * (q == r)
|
151 |
-
p3 = lambda q,r,s: q * ( (q == r) & (r == s) )
|
152 |
-
p4 = lambda p,q,r,s: p * ( (p == q) & (q == r) & (r == s) )
|
153 |
-
|
154 |
-
lor = lambda x,y: x + (x == 0) * y
|
155 |
-
|
156 |
-
results4 = ( p4(x,y,z,w) for x,y,z,w in combinations(sections, 4) )
|
157 |
-
results4 = reduce(lor, results4)
|
158 |
-
|
159 |
-
results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) )
|
160 |
-
results3 = reduce(lor, results3)
|
161 |
-
|
162 |
-
results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) )
|
163 |
-
results2 = reduce(lor, results2)
|
164 |
-
|
165 |
-
final_result = reduce(lor, (results4, results3, results2, sections[-1])) - 1
|
166 |
-
data -= 1
|
167 |
-
return final_result
|
168 |
-
|
169 |
-
def countless_generalized(data, factor):
|
170 |
-
assert len(data.shape) == len(factor)
|
171 |
-
|
172 |
-
sections = []
|
173 |
-
|
174 |
-
mode_of = reduce(lambda x,y: x * y, factor)
|
175 |
-
majority = int(math.ceil(float(mode_of) / 2))
|
176 |
-
|
177 |
-
data += 1
|
178 |
-
|
179 |
-
# This loop splits the 2D array apart into four arrays that are
|
180 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
181 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
182 |
-
for offset in np.ndindex(factor):
|
183 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
184 |
-
sections.append(part)
|
185 |
-
|
186 |
-
def pick(elements):
|
187 |
-
eq = ( elements[i] == elements[i+1] for i in range(len(elements) - 1) )
|
188 |
-
anded = reduce(lambda p,q: p & q, eq)
|
189 |
-
return elements[0] * anded
|
190 |
-
|
191 |
-
def logical_or(x,y):
|
192 |
-
return x + (x == 0) * y
|
193 |
-
|
194 |
-
result = ( pick(combo) for combo in combinations(sections, majority) )
|
195 |
-
result = reduce(logical_or, result)
|
196 |
-
for i in range(majority - 1, 3-1, -1): # 3-1 b/c of exclusive bounds
|
197 |
-
partial_result = ( pick(combo) for combo in combinations(sections, i) )
|
198 |
-
partial_result = reduce(logical_or, partial_result)
|
199 |
-
result = logical_or(result, partial_result)
|
200 |
-
|
201 |
-
partial_result = ( pick(combo) for combo in combinations(sections[:-1], 2) )
|
202 |
-
partial_result = reduce(logical_or, partial_result)
|
203 |
-
result = logical_or(result, partial_result)
|
204 |
-
|
205 |
-
result = logical_or(result, sections[-1]) - 1
|
206 |
-
data -= 1
|
207 |
-
return result
|
208 |
-
|
209 |
-
def dynamic_countless_generalized(data, factor):
|
210 |
-
assert len(data.shape) == len(factor)
|
211 |
-
|
212 |
-
sections = []
|
213 |
-
|
214 |
-
mode_of = reduce(lambda x,y: x * y, factor)
|
215 |
-
majority = int(math.ceil(float(mode_of) / 2))
|
216 |
-
|
217 |
-
data += 1 # offset from zero
|
218 |
-
|
219 |
-
# This loop splits the 2D array apart into four arrays that are
|
220 |
-
# all the result of striding by 2 and offset by (0,0), (0,1), (1,0),
|
221 |
-
# and (1,1) representing the A, B, C, and D positions from Figure 1.
|
222 |
-
for offset in np.ndindex(factor):
|
223 |
-
part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
224 |
-
sections.append(part)
|
225 |
-
|
226 |
-
pick = lambda a,b: a * (a == b)
|
227 |
-
lor = lambda x,y: x + (x == 0) * y # logical or
|
228 |
-
|
229 |
-
subproblems = [ {}, {} ]
|
230 |
-
results2 = None
|
231 |
-
for x,y in combinations(range(len(sections) - 1), 2):
|
232 |
-
res = pick(sections[x], sections[y])
|
233 |
-
subproblems[0][(x,y)] = res
|
234 |
-
if results2 is not None:
|
235 |
-
results2 = lor(results2, res)
|
236 |
-
else:
|
237 |
-
results2 = res
|
238 |
-
|
239 |
-
results = [ results2 ]
|
240 |
-
for r in range(3, majority+1):
|
241 |
-
r_results = None
|
242 |
-
for combo in combinations(range(len(sections)), r):
|
243 |
-
res = pick(subproblems[0][combo[:-1]], sections[combo[-1]])
|
244 |
-
|
245 |
-
if combo[-1] != len(sections) - 1:
|
246 |
-
subproblems[1][combo] = res
|
247 |
-
|
248 |
-
if r_results is not None:
|
249 |
-
r_results = lor(r_results, res)
|
250 |
-
else:
|
251 |
-
r_results = res
|
252 |
-
results.append(r_results)
|
253 |
-
subproblems[0] = subproblems[1]
|
254 |
-
subproblems[1] = {}
|
255 |
-
|
256 |
-
results.reverse()
|
257 |
-
final_result = lor(reduce(lor, results), sections[-1]) - 1
|
258 |
-
data -= 1
|
259 |
-
return final_result
|
260 |
-
|
261 |
-
def downsample_with_averaging(array):
|
262 |
-
"""
|
263 |
-
Downsample x by factor using averaging.
|
264 |
-
|
265 |
-
@return: The downsampled array, of the same type as x.
|
266 |
-
"""
|
267 |
-
factor = (2,2,2)
|
268 |
-
|
269 |
-
if np.array_equal(factor[:3], np.array([1,1,1])):
|
270 |
-
return array
|
271 |
-
|
272 |
-
output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor))
|
273 |
-
temp = np.zeros(output_shape, float)
|
274 |
-
counts = np.zeros(output_shape, np.int)
|
275 |
-
for offset in np.ndindex(factor):
|
276 |
-
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
277 |
-
indexing_expr = tuple(np.s_[:s] for s in part.shape)
|
278 |
-
temp[indexing_expr] += part
|
279 |
-
counts[indexing_expr] += 1
|
280 |
-
return np.cast[array.dtype](temp / counts)
|
281 |
-
|
282 |
-
def downsample_with_max_pooling(array):
|
283 |
-
|
284 |
-
factor = (2,2,2)
|
285 |
-
|
286 |
-
sections = []
|
287 |
-
|
288 |
-
for offset in np.ndindex(factor):
|
289 |
-
part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))]
|
290 |
-
sections.append(part)
|
291 |
-
|
292 |
-
output = sections[0].copy()
|
293 |
-
|
294 |
-
for section in sections[1:]:
|
295 |
-
np.maximum(output, section, output)
|
296 |
-
|
297 |
-
return output
|
298 |
-
|
299 |
-
def striding(array):
|
300 |
-
"""Downsample x by factor using striding.
|
301 |
-
|
302 |
-
@return: The downsampled array, of the same type as x.
|
303 |
-
"""
|
304 |
-
factor = (2,2,2)
|
305 |
-
if np.all(np.array(factor, int) == 1):
|
306 |
-
return array
|
307 |
-
return array[tuple(np.s_[::f] for f in factor)]
|
308 |
-
|
309 |
-
def benchmark():
|
310 |
-
def countless3d_generalized(img):
|
311 |
-
return countless_generalized(img, (2,8,1))
|
312 |
-
def countless3d_dynamic_generalized(img):
|
313 |
-
return dynamic_countless_generalized(img, (8,8,1))
|
314 |
-
|
315 |
-
methods = [
|
316 |
-
# countless3d,
|
317 |
-
# dynamic_countless3d,
|
318 |
-
countless3d_generalized,
|
319 |
-
# countless3d_dynamic_generalized,
|
320 |
-
# striding,
|
321 |
-
# downsample_with_averaging,
|
322 |
-
# downsample_with_max_pooling
|
323 |
-
]
|
324 |
-
|
325 |
-
data = np.zeros(shape=(16**2, 16**2, 16**2), dtype=np.uint8) + 1
|
326 |
-
|
327 |
-
N = 5
|
328 |
-
|
329 |
-
print('Algorithm\tMPx\tMB/sec\tSec\tN=%d' % N)
|
330 |
-
|
331 |
-
for fn in methods:
|
332 |
-
start = time.time()
|
333 |
-
for _ in range(N):
|
334 |
-
result = fn(data)
|
335 |
-
end = time.time()
|
336 |
-
|
337 |
-
total_time = (end - start)
|
338 |
-
mpx = N * float(data.shape[0] * data.shape[1] * data.shape[2]) / total_time / 1024.0 / 1024.0
|
339 |
-
mbytes = mpx * np.dtype(data.dtype).itemsize
|
340 |
-
# Output in tab separated format to enable copy-paste into excel/numbers
|
341 |
-
print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time))
|
342 |
-
|
343 |
-
if __name__ == '__main__':
|
344 |
-
benchmark()
|
345 |
-
|
346 |
-
# Algorithm MPx MB/sec Sec N=5
|
347 |
-
# countless3d 10.564 10.564 60.58
|
348 |
-
# dynamic_countless3d 22.717 22.717 28.17
|
349 |
-
# countless3d_generalized 9.702 9.702 65.96
|
350 |
-
# countless3d_dynamic_generalized 22.720 22.720 28.17
|
351 |
-
# striding 253360.506 253360.506 0.00
|
352 |
-
# downsample_with_averaging 224.098 224.098 2.86
|
353 |
-
# downsample_with_max_pooling 690.474 690.474 0.93
|
354 |
-
|
355 |
-
|
356 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AllAideas/SegmentacionVideo/utils/custom_layers.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
from tensorflow import keras
|
3 |
-
from keras import layers
|
4 |
-
|
5 |
-
|
6 |
-
class PositionalEmbedding(layers.Layer):
|
7 |
-
def __init__(self, sequence_length, output_dim, **kwargs):
|
8 |
-
super().__init__(**kwargs)
|
9 |
-
self.position_embeddings = layers.Embedding(
|
10 |
-
input_dim=sequence_length, output_dim=output_dim
|
11 |
-
)
|
12 |
-
self.sequence_length = sequence_length
|
13 |
-
self.output_dim = output_dim
|
14 |
-
|
15 |
-
def call(self, inputs):
|
16 |
-
# The inputs are of shape: `(batch_size, frames, num_features)`
|
17 |
-
length = tf.shape(inputs)[1]
|
18 |
-
positions = tf.range(start=0, limit=length, delta=1)
|
19 |
-
embedded_positions = self.position_embeddings(positions)
|
20 |
-
return inputs + embedded_positions
|
21 |
-
|
22 |
-
def compute_mask(self, inputs, mask=None):
|
23 |
-
mask = tf.reduce_any(tf.cast(inputs, "bool"), axis=-1)
|
24 |
-
return mask
|
25 |
-
|
26 |
-
def get_config(self):
|
27 |
-
config = super().get_config()
|
28 |
-
config.update({
|
29 |
-
"sequence_length": self.sequence_length,
|
30 |
-
"output_dim": self.output_dim,
|
31 |
-
})
|
32 |
-
return config
|
33 |
-
|
34 |
-
|
35 |
-
class TransformerEncoder(layers.Layer):
|
36 |
-
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
|
37 |
-
super().__init__(**kwargs)
|
38 |
-
self.embed_dim = embed_dim
|
39 |
-
self.dense_dim = dense_dim
|
40 |
-
self.num_heads = num_heads
|
41 |
-
self.attention = layers.MultiHeadAttention(
|
42 |
-
num_heads=num_heads, key_dim=embed_dim, dropout=0.3
|
43 |
-
)
|
44 |
-
self.dense_proj = keras.Sequential(
|
45 |
-
[layers.Dense(dense_dim, activation=tf.nn.gelu), layers.Dense(embed_dim),]
|
46 |
-
)
|
47 |
-
self.layernorm_1 = layers.LayerNormalization()
|
48 |
-
self.layernorm_2 = layers.LayerNormalization()
|
49 |
-
|
50 |
-
def call(self, inputs, mask=None):
|
51 |
-
if mask is not None:
|
52 |
-
mask = mask[:, tf.newaxis, :]
|
53 |
-
|
54 |
-
attention_output = self.attention(inputs, inputs, attention_mask=mask)
|
55 |
-
proj_input = self.layernorm_1(inputs + attention_output)
|
56 |
-
proj_output = self.dense_proj(proj_input)
|
57 |
-
return self.layernorm_2(proj_input + proj_output)
|
58 |
-
|
59 |
-
|
60 |
-
def get_config(self):
|
61 |
-
config = super().get_config()
|
62 |
-
config.update({
|
63 |
-
"embed_dim": self.embed_dim,
|
64 |
-
"dense_dim": self.dense_dim,
|
65 |
-
"num_heads": self.num_heads,
|
66 |
-
})
|
67 |
-
return config
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Allakhazam/Home/app.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import gradio
|
2 |
-
|
3 |
-
class Model:
|
4 |
-
def __init__(self, name, path="", prefix=""):
|
5 |
-
self.name = name
|
6 |
-
self.path = path
|
7 |
-
self.prefix = prefix
|
8 |
-
|
9 |
-
models = [
|
10 |
-
Model("Marvel","models/ItsJayQz/Marvel_WhatIf_Diffusion", "whatif style"),
|
11 |
-
Model("Portrait plus", "models/wavymulder/portraitplus", "portrait+ style"),
|
12 |
-
Model("CF25", "models/gsdf/Counterfeit-V2.5", "anime style"),
|
13 |
-
Model("vintedois", "models/22h/vintedois-diffusion-v0-1", "vintedois style"),
|
14 |
-
Model("dreamlike", "models/dreamlike-art/dreamlike-diffusion-1.0","dreamlike style"),
|
15 |
-
Model("GTA5","models/ItsJayQz/GTA5_Artwork_Diffusion", "GTA5 style")
|
16 |
-
]
|
17 |
-
|
18 |
-
model1=[]
|
19 |
-
model2=[]
|
20 |
-
model3=[]
|
21 |
-
|
22 |
-
for i in range(len(models)):
|
23 |
-
model3.append(models[i].name)
|
24 |
-
model1.append(gradio.Interface.load(models[i].path))
|
25 |
-
model2.append(models[i].prefix)
|
26 |
-
|
27 |
-
def process1(prompt, modelSelected):
|
28 |
-
if (modelSelected==''):
|
29 |
-
modelSelected = "Marvel"
|
30 |
-
model_idx=model3.index(modelSelected)
|
31 |
-
prompt+=", in "+model2[model_idx]
|
32 |
-
image_return = model1[model_idx](prompt)
|
33 |
-
return image_return
|
34 |
-
|
35 |
-
sandbox = gradio.Interface(fn=process1,
|
36 |
-
inputs=[gradio.Textbox(label="Enter Prompt:"), gradio.Dropdown(model3)],
|
37 |
-
outputs=[gradio.Image(label="Produced Image")],
|
38 |
-
title='Text to Image',
|
39 |
-
examples=[
|
40 |
-
["Viggo Mortensen Gryffindor wizard portrait, Hogwart University, castle tower background", "Portrait plus"],
|
41 |
-
["1girl pirate, left patch, detailed face, black hat, big sailing boat, ocean in background", "CF25"],
|
42 |
-
["Portrait close up, Elvis Presley, concert hall in the background", "GTA5"],
|
43 |
-
["Marvel Blackwidow portrait close up. building city background", "Marvel"],
|
44 |
-
["close up portrait Benedict Cumberbatch wizard of black magic, robe with hood, Hogwart University, castle tower background, oil painting on canvas", "vintedois"],
|
45 |
-
["A white rabbit wizard, Hogwart University, Castle in the background", "dreamlike"]
|
46 |
-
])
|
47 |
-
|
48 |
-
sandbox.queue(concurrency_count=20).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/pndm/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .pipeline_pndm import PNDMPipeline
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './ann_r50-d8_512x512_40k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './encnet_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/image.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
from annotator.uniformer.mmcv.image import imread, imwrite
|
6 |
-
from .color import color_val
|
7 |
-
|
8 |
-
|
9 |
-
def imshow(img, win_name='', wait_time=0):
|
10 |
-
"""Show an image.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
img (str or ndarray): The image to be displayed.
|
14 |
-
win_name (str): The window name.
|
15 |
-
wait_time (int): Value of waitKey param.
|
16 |
-
"""
|
17 |
-
cv2.imshow(win_name, imread(img))
|
18 |
-
if wait_time == 0: # prevent from hanging if windows was closed
|
19 |
-
while True:
|
20 |
-
ret = cv2.waitKey(1)
|
21 |
-
|
22 |
-
closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1
|
23 |
-
# if user closed window or if some key pressed
|
24 |
-
if closed or ret != -1:
|
25 |
-
break
|
26 |
-
else:
|
27 |
-
ret = cv2.waitKey(wait_time)
|
28 |
-
|
29 |
-
|
30 |
-
def imshow_bboxes(img,
|
31 |
-
bboxes,
|
32 |
-
colors='green',
|
33 |
-
top_k=-1,
|
34 |
-
thickness=1,
|
35 |
-
show=True,
|
36 |
-
win_name='',
|
37 |
-
wait_time=0,
|
38 |
-
out_file=None):
|
39 |
-
"""Draw bboxes on an image.
|
40 |
-
|
41 |
-
Args:
|
42 |
-
img (str or ndarray): The image to be displayed.
|
43 |
-
bboxes (list or ndarray): A list of ndarray of shape (k, 4).
|
44 |
-
colors (list[str or tuple or Color]): A list of colors.
|
45 |
-
top_k (int): Plot the first k bboxes only if set positive.
|
46 |
-
thickness (int): Thickness of lines.
|
47 |
-
show (bool): Whether to show the image.
|
48 |
-
win_name (str): The window name.
|
49 |
-
wait_time (int): Value of waitKey param.
|
50 |
-
out_file (str, optional): The filename to write the image.
|
51 |
-
|
52 |
-
Returns:
|
53 |
-
ndarray: The image with bboxes drawn on it.
|
54 |
-
"""
|
55 |
-
img = imread(img)
|
56 |
-
img = np.ascontiguousarray(img)
|
57 |
-
|
58 |
-
if isinstance(bboxes, np.ndarray):
|
59 |
-
bboxes = [bboxes]
|
60 |
-
if not isinstance(colors, list):
|
61 |
-
colors = [colors for _ in range(len(bboxes))]
|
62 |
-
colors = [color_val(c) for c in colors]
|
63 |
-
assert len(bboxes) == len(colors)
|
64 |
-
|
65 |
-
for i, _bboxes in enumerate(bboxes):
|
66 |
-
_bboxes = _bboxes.astype(np.int32)
|
67 |
-
if top_k <= 0:
|
68 |
-
_top_k = _bboxes.shape[0]
|
69 |
-
else:
|
70 |
-
_top_k = min(top_k, _bboxes.shape[0])
|
71 |
-
for j in range(_top_k):
|
72 |
-
left_top = (_bboxes[j, 0], _bboxes[j, 1])
|
73 |
-
right_bottom = (_bboxes[j, 2], _bboxes[j, 3])
|
74 |
-
cv2.rectangle(
|
75 |
-
img, left_top, right_bottom, colors[i], thickness=thickness)
|
76 |
-
|
77 |
-
if show:
|
78 |
-
imshow(img, win_name, wait_time)
|
79 |
-
if out_file is not None:
|
80 |
-
imwrite(img, out_file)
|
81 |
-
return img
|
82 |
-
|
83 |
-
|
84 |
-
def imshow_det_bboxes(img,
|
85 |
-
bboxes,
|
86 |
-
labels,
|
87 |
-
class_names=None,
|
88 |
-
score_thr=0,
|
89 |
-
bbox_color='green',
|
90 |
-
text_color='green',
|
91 |
-
thickness=1,
|
92 |
-
font_scale=0.5,
|
93 |
-
show=True,
|
94 |
-
win_name='',
|
95 |
-
wait_time=0,
|
96 |
-
out_file=None):
|
97 |
-
"""Draw bboxes and class labels (with scores) on an image.
|
98 |
-
|
99 |
-
Args:
|
100 |
-
img (str or ndarray): The image to be displayed.
|
101 |
-
bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
|
102 |
-
(n, 5).
|
103 |
-
labels (ndarray): Labels of bboxes.
|
104 |
-
class_names (list[str]): Names of each classes.
|
105 |
-
score_thr (float): Minimum score of bboxes to be shown.
|
106 |
-
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
|
107 |
-
text_color (str or tuple or :obj:`Color`): Color of texts.
|
108 |
-
thickness (int): Thickness of lines.
|
109 |
-
font_scale (float): Font scales of texts.
|
110 |
-
show (bool): Whether to show the image.
|
111 |
-
win_name (str): The window name.
|
112 |
-
wait_time (int): Value of waitKey param.
|
113 |
-
out_file (str or None): The filename to write the image.
|
114 |
-
|
115 |
-
Returns:
|
116 |
-
ndarray: The image with bboxes drawn on it.
|
117 |
-
"""
|
118 |
-
assert bboxes.ndim == 2
|
119 |
-
assert labels.ndim == 1
|
120 |
-
assert bboxes.shape[0] == labels.shape[0]
|
121 |
-
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
|
122 |
-
img = imread(img)
|
123 |
-
img = np.ascontiguousarray(img)
|
124 |
-
|
125 |
-
if score_thr > 0:
|
126 |
-
assert bboxes.shape[1] == 5
|
127 |
-
scores = bboxes[:, -1]
|
128 |
-
inds = scores > score_thr
|
129 |
-
bboxes = bboxes[inds, :]
|
130 |
-
labels = labels[inds]
|
131 |
-
|
132 |
-
bbox_color = color_val(bbox_color)
|
133 |
-
text_color = color_val(text_color)
|
134 |
-
|
135 |
-
for bbox, label in zip(bboxes, labels):
|
136 |
-
bbox_int = bbox.astype(np.int32)
|
137 |
-
left_top = (bbox_int[0], bbox_int[1])
|
138 |
-
right_bottom = (bbox_int[2], bbox_int[3])
|
139 |
-
cv2.rectangle(
|
140 |
-
img, left_top, right_bottom, bbox_color, thickness=thickness)
|
141 |
-
label_text = class_names[
|
142 |
-
label] if class_names is not None else f'cls {label}'
|
143 |
-
if len(bbox) > 4:
|
144 |
-
label_text += f'|{bbox[-1]:.02f}'
|
145 |
-
cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),
|
146 |
-
cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
|
147 |
-
|
148 |
-
if show:
|
149 |
-
imshow(img, win_name, wait_time)
|
150 |
-
if out_file is not None:
|
151 |
-
imwrite(img, out_file)
|
152 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/filters.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
from numpy import ndarray
|
2 |
-
from abc import ABC, abstractmethod
|
3 |
-
from .critics import colorize_crit_learner
|
4 |
-
from fastai.core import *
|
5 |
-
from fastai.vision import *
|
6 |
-
from fastai.vision.image import *
|
7 |
-
from fastai.vision.data import *
|
8 |
-
from fastai import *
|
9 |
-
import math
|
10 |
-
from scipy import misc
|
11 |
-
import cv2
|
12 |
-
from PIL import Image as PilImage
|
13 |
-
|
14 |
-
|
15 |
-
class IFilter(ABC):
|
16 |
-
@abstractmethod
|
17 |
-
def filter(
|
18 |
-
self, orig_image: PilImage, filtered_image: PilImage, render_factor: int
|
19 |
-
) -> PilImage:
|
20 |
-
pass
|
21 |
-
|
22 |
-
|
23 |
-
class BaseFilter(IFilter):
|
24 |
-
def __init__(self, learn: Learner, stats: tuple = imagenet_stats):
|
25 |
-
super().__init__()
|
26 |
-
self.learn = learn
|
27 |
-
self.device = next(self.learn.model.parameters()).device
|
28 |
-
self.norm, self.denorm = normalize_funcs(*stats)
|
29 |
-
|
30 |
-
def _transform(self, image: PilImage) -> PilImage:
|
31 |
-
return image
|
32 |
-
|
33 |
-
def _scale_to_square(self, orig: PilImage, targ: int) -> PilImage:
|
34 |
-
# a simple stretch to fit a square really makes a big difference in rendering quality/consistency.
|
35 |
-
# I've tried padding to the square as well (reflect, symetric, constant, etc). Not as good!
|
36 |
-
targ_sz = (targ, targ)
|
37 |
-
return orig.resize(targ_sz, resample=PIL.Image.BILINEAR)
|
38 |
-
|
39 |
-
def _get_model_ready_image(self, orig: PilImage, sz: int) -> PilImage:
|
40 |
-
result = self._scale_to_square(orig, sz)
|
41 |
-
result = self._transform(result)
|
42 |
-
return result
|
43 |
-
|
44 |
-
def _model_process(self, orig: PilImage, sz: int) -> PilImage:
|
45 |
-
model_image = self._get_model_ready_image(orig, sz)
|
46 |
-
x = pil2tensor(model_image, np.float32)
|
47 |
-
x = x.to(self.device)
|
48 |
-
x.div_(255)
|
49 |
-
x, y = self.norm((x, x), do_x=True)
|
50 |
-
|
51 |
-
try:
|
52 |
-
result = self.learn.pred_batch(
|
53 |
-
ds_type=DatasetType.Valid, batch=(x[None], y[None]), reconstruct=True
|
54 |
-
)
|
55 |
-
except RuntimeError as rerr:
|
56 |
-
if 'memory' not in str(rerr):
|
57 |
-
raise rerr
|
58 |
-
print('Warning: render_factor was set too high, and out of memory error resulted. Returning original image.')
|
59 |
-
return model_image
|
60 |
-
|
61 |
-
out = result[0]
|
62 |
-
out = self.denorm(out.px, do_x=False)
|
63 |
-
out = image2np(out * 255).astype(np.uint8)
|
64 |
-
return PilImage.fromarray(out)
|
65 |
-
|
66 |
-
def _unsquare(self, image: PilImage, orig: PilImage) -> PilImage:
|
67 |
-
targ_sz = orig.size
|
68 |
-
image = image.resize(targ_sz, resample=PIL.Image.BILINEAR)
|
69 |
-
return image
|
70 |
-
|
71 |
-
|
72 |
-
class ColorizerFilter(BaseFilter):
|
73 |
-
def __init__(self, learn: Learner, stats: tuple = imagenet_stats):
|
74 |
-
super().__init__(learn=learn, stats=stats)
|
75 |
-
self.render_base = 16
|
76 |
-
|
77 |
-
def filter(
|
78 |
-
self, orig_image: PilImage, filtered_image: PilImage, render_factor: int, post_process: bool = True) -> PilImage:
|
79 |
-
render_sz = render_factor * self.render_base
|
80 |
-
model_image = self._model_process(orig=filtered_image, sz=render_sz)
|
81 |
-
raw_color = self._unsquare(model_image, orig_image)
|
82 |
-
|
83 |
-
if post_process:
|
84 |
-
return self._post_process(raw_color, orig_image)
|
85 |
-
else:
|
86 |
-
return raw_color
|
87 |
-
|
88 |
-
def _transform(self, image: PilImage) -> PilImage:
|
89 |
-
return image.convert('LA').convert('RGB')
|
90 |
-
|
91 |
-
# This takes advantage of the fact that human eyes are much less sensitive to
|
92 |
-
# imperfections in chrominance compared to luminance. This means we can
|
93 |
-
# save a lot on memory and processing in the model, yet get a great high
|
94 |
-
# resolution result at the end. This is primarily intended just for
|
95 |
-
# inference
|
96 |
-
def _post_process(self, raw_color: PilImage, orig: PilImage) -> PilImage:
|
97 |
-
color_np = np.asarray(raw_color)
|
98 |
-
orig_np = np.asarray(orig)
|
99 |
-
color_yuv = cv2.cvtColor(color_np, cv2.COLOR_BGR2YUV)
|
100 |
-
# do a black and white transform first to get better luminance values
|
101 |
-
orig_yuv = cv2.cvtColor(orig_np, cv2.COLOR_BGR2YUV)
|
102 |
-
hires = np.copy(orig_yuv)
|
103 |
-
hires[:, :, 1:3] = color_yuv[:, :, 1:3]
|
104 |
-
final = cv2.cvtColor(hires, cv2.COLOR_YUV2BGR)
|
105 |
-
final = PilImage.fromarray(final)
|
106 |
-
return final
|
107 |
-
|
108 |
-
|
109 |
-
class MasterFilter(BaseFilter):
|
110 |
-
def __init__(self, filters: [IFilter], render_factor: int):
|
111 |
-
self.filters = filters
|
112 |
-
self.render_factor = render_factor
|
113 |
-
|
114 |
-
def filter(
|
115 |
-
self, orig_image: PilImage, filtered_image: PilImage, render_factor: int = None, post_process: bool = True) -> PilImage:
|
116 |
-
render_factor = self.render_factor if render_factor is None else render_factor
|
117 |
-
for filter in self.filters:
|
118 |
-
filtered_image = filter.filter(orig_image, filtered_image, render_factor, post_process)
|
119 |
-
|
120 |
-
return filtered_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
-
import parselmouth
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
|
6 |
-
class PMF0Predictor(F0Predictor):
|
7 |
-
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
-
self.hop_length = hop_length
|
9 |
-
self.f0_min = f0_min
|
10 |
-
self.f0_max = f0_max
|
11 |
-
self.sampling_rate = sampling_rate
|
12 |
-
|
13 |
-
def interpolate_f0(self, f0):
|
14 |
-
"""
|
15 |
-
对F0进行插值处理
|
16 |
-
"""
|
17 |
-
|
18 |
-
data = np.reshape(f0, (f0.size, 1))
|
19 |
-
|
20 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
-
vuv_vector[data > 0.0] = 1.0
|
22 |
-
vuv_vector[data <= 0.0] = 0.0
|
23 |
-
|
24 |
-
ip_data = data
|
25 |
-
|
26 |
-
frame_number = data.size
|
27 |
-
last_value = 0.0
|
28 |
-
for i in range(frame_number):
|
29 |
-
if data[i] <= 0.0:
|
30 |
-
j = i + 1
|
31 |
-
for j in range(i + 1, frame_number):
|
32 |
-
if data[j] > 0.0:
|
33 |
-
break
|
34 |
-
if j < frame_number - 1:
|
35 |
-
if last_value > 0.0:
|
36 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
-
for k in range(i, j):
|
38 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
-
else:
|
40 |
-
for k in range(i, j):
|
41 |
-
ip_data[k] = data[j]
|
42 |
-
else:
|
43 |
-
for k in range(i, frame_number):
|
44 |
-
ip_data[k] = last_value
|
45 |
-
else:
|
46 |
-
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
-
last_value = data[i]
|
48 |
-
|
49 |
-
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
-
|
51 |
-
def compute_f0(self, wav, p_len=None):
|
52 |
-
x = wav
|
53 |
-
if p_len is None:
|
54 |
-
p_len = x.shape[0] // self.hop_length
|
55 |
-
else:
|
56 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
57 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
58 |
-
f0 = (
|
59 |
-
parselmouth.Sound(x, self.sampling_rate)
|
60 |
-
.to_pitch_ac(
|
61 |
-
time_step=time_step / 1000,
|
62 |
-
voicing_threshold=0.6,
|
63 |
-
pitch_floor=self.f0_min,
|
64 |
-
pitch_ceiling=self.f0_max,
|
65 |
-
)
|
66 |
-
.selected_array["frequency"]
|
67 |
-
)
|
68 |
-
|
69 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
70 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
71 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
72 |
-
f0, uv = self.interpolate_f0(f0)
|
73 |
-
return f0
|
74 |
-
|
75 |
-
def compute_f0_uv(self, wav, p_len=None):
|
76 |
-
x = wav
|
77 |
-
if p_len is None:
|
78 |
-
p_len = x.shape[0] // self.hop_length
|
79 |
-
else:
|
80 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
81 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
82 |
-
f0 = (
|
83 |
-
parselmouth.Sound(x, self.sampling_rate)
|
84 |
-
.to_pitch_ac(
|
85 |
-
time_step=time_step / 1000,
|
86 |
-
voicing_threshold=0.6,
|
87 |
-
pitch_floor=self.f0_min,
|
88 |
-
pitch_ceiling=self.f0_max,
|
89 |
-
)
|
90 |
-
.selected_array["frequency"]
|
91 |
-
)
|
92 |
-
|
93 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
94 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
95 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
96 |
-
f0, uv = self.interpolate_f0(f0)
|
97 |
-
return f0, uv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArnePan/German-LLM-leaderboard/app.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os, json
|
3 |
-
import pandas as pd
|
4 |
-
from constants import ALL_COLUMNS, DATA_TYPES, DEFAULT_CHECK, MODEL_SIZES, MODEL_TYPES, SOURCE_TYPES, DATASETS
|
5 |
-
|
6 |
-
|
7 |
-
def read_jsons(directory_name, col_names):
|
8 |
-
json_files = [pos_json for pos_json in os.listdir(directory_name)]
|
9 |
-
table = []
|
10 |
-
for js in json_files:
|
11 |
-
with open(os.path.join(directory_name, js)) as json_file:
|
12 |
-
json_text = json.load(json_file)
|
13 |
-
table.append(json_text)
|
14 |
-
df = pd.DataFrame(table)
|
15 |
-
df = df[df.columns.intersection(col_names)] # filter based on col_names
|
16 |
-
return df
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
def update_cols(leaderboard: pd.DataFrame, datasets: list,):
|
21 |
-
cols = ["Model","Type","Source-type","Size"]
|
22 |
-
cols.extend(datasets)
|
23 |
-
return gr.Dataframe(
|
24 |
-
value = read_jsons(directory_name="model_data", col_names=cols),
|
25 |
-
headers = cols,
|
26 |
-
datatype=[DATA_TYPES[x] for x in ALL_COLUMNS],
|
27 |
-
interactive=False,
|
28 |
-
)
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
def update_rows(leaderboard: pd.DataFrame, datasets: list, types : list, source_types : list, sizes : list):
|
33 |
-
|
34 |
-
cols = ["Model","Type","Source-type","Size"]
|
35 |
-
cols.extend(datasets)
|
36 |
-
|
37 |
-
unfiltered = read_jsons(directory_name="model_data", col_names=cols)
|
38 |
-
filtered = filter(unfiltered, types, source_types, sizes)
|
39 |
-
|
40 |
-
df = gr.Dataframe(
|
41 |
-
value = filtered,
|
42 |
-
headers = cols,
|
43 |
-
datatype=[DATA_TYPES[x] for x in ALL_COLUMNS],
|
44 |
-
interactive=False,
|
45 |
-
)
|
46 |
-
|
47 |
-
return df
|
48 |
-
|
49 |
-
def filter(df:pd.DataFrame , types : list, source_types : list, sizes : list):
|
50 |
-
df = df[df["Size"].isin(sizes)]
|
51 |
-
df = df[df["Source-type"].isin(source_types)]
|
52 |
-
df = df[df["Type"].isin(types)]
|
53 |
-
return df
|
54 |
-
|
55 |
-
with gr.Blocks() as demo:
|
56 |
-
|
57 |
-
gr.Markdown("# Welcome to the German LLM leaderboard!")
|
58 |
-
|
59 |
-
with gr.Row():
|
60 |
-
with gr.Column():
|
61 |
-
with gr.Row():
|
62 |
-
parameter_size = gr.CheckboxGroup(
|
63 |
-
choices=MODEL_SIZES,
|
64 |
-
value=MODEL_SIZES,
|
65 |
-
label="Model sizes",
|
66 |
-
elem_id="size-select",
|
67 |
-
interactive=True,
|
68 |
-
)
|
69 |
-
|
70 |
-
with gr.Row():
|
71 |
-
model_type = gr.CheckboxGroup(
|
72 |
-
choices=MODEL_TYPES,
|
73 |
-
value=MODEL_TYPES,
|
74 |
-
label="Model types",
|
75 |
-
elem_id="type-select",
|
76 |
-
interactive=True,
|
77 |
-
)
|
78 |
-
|
79 |
-
with gr.Column():
|
80 |
-
with gr.Row():
|
81 |
-
source_type = gr.CheckboxGroup(
|
82 |
-
choices=SOURCE_TYPES,
|
83 |
-
value=SOURCE_TYPES,
|
84 |
-
label="Source types",
|
85 |
-
elem_id="source-select",
|
86 |
-
interactive=True,
|
87 |
-
)
|
88 |
-
|
89 |
-
with gr.Row():
|
90 |
-
shown_columns = gr.CheckboxGroup(
|
91 |
-
choices=DATASETS,
|
92 |
-
value=DATASETS,
|
93 |
-
label="Select datasets to show",
|
94 |
-
elem_id="column-select",
|
95 |
-
interactive=True,
|
96 |
-
)
|
97 |
-
|
98 |
-
|
99 |
-
leaderboard_table = gr.Dataframe(
|
100 |
-
headers = ALL_COLUMNS,
|
101 |
-
value = read_jsons(directory_name="model_data", col_names=ALL_COLUMNS),
|
102 |
-
datatype=[DATA_TYPES[x] for x in ALL_COLUMNS],
|
103 |
-
interactive=False,
|
104 |
-
)
|
105 |
-
|
106 |
-
shown_columns.change(
|
107 |
-
fn=update_cols,
|
108 |
-
inputs=[
|
109 |
-
leaderboard_table,
|
110 |
-
shown_columns,
|
111 |
-
],
|
112 |
-
outputs=leaderboard_table,
|
113 |
-
)
|
114 |
-
|
115 |
-
|
116 |
-
parameter_size.change(
|
117 |
-
fn=update_rows,
|
118 |
-
inputs=[
|
119 |
-
leaderboard_table,
|
120 |
-
shown_columns,
|
121 |
-
model_type,
|
122 |
-
source_type,
|
123 |
-
parameter_size,
|
124 |
-
],
|
125 |
-
outputs=leaderboard_table,
|
126 |
-
)
|
127 |
-
|
128 |
-
source_type.change(
|
129 |
-
fn=update_rows,
|
130 |
-
inputs=[
|
131 |
-
leaderboard_table,
|
132 |
-
shown_columns,
|
133 |
-
model_type,
|
134 |
-
source_type,
|
135 |
-
parameter_size,
|
136 |
-
],
|
137 |
-
outputs=leaderboard_table,
|
138 |
-
)
|
139 |
-
|
140 |
-
model_type.change(
|
141 |
-
fn=update_rows,
|
142 |
-
inputs=[
|
143 |
-
leaderboard_table,
|
144 |
-
shown_columns,
|
145 |
-
model_type,
|
146 |
-
source_type,
|
147 |
-
parameter_size,
|
148 |
-
],
|
149 |
-
outputs=leaderboard_table,
|
150 |
-
)
|
151 |
-
|
152 |
-
if __name__ == "__main__":
|
153 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/resolver.py
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
import logging
|
3 |
-
import os
|
4 |
-
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
|
5 |
-
|
6 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
7 |
-
from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
|
8 |
-
from pip._vendor.resolvelib import Resolver as RLResolver
|
9 |
-
from pip._vendor.resolvelib.structs import DirectedGraph
|
10 |
-
|
11 |
-
from pip._internal.cache import WheelCache
|
12 |
-
from pip._internal.index.package_finder import PackageFinder
|
13 |
-
from pip._internal.operations.prepare import RequirementPreparer
|
14 |
-
from pip._internal.req.req_install import InstallRequirement
|
15 |
-
from pip._internal.req.req_set import RequirementSet
|
16 |
-
from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
|
17 |
-
from pip._internal.resolution.resolvelib.provider import PipProvider
|
18 |
-
from pip._internal.resolution.resolvelib.reporter import (
|
19 |
-
PipDebuggingReporter,
|
20 |
-
PipReporter,
|
21 |
-
)
|
22 |
-
|
23 |
-
from .base import Candidate, Requirement
|
24 |
-
from .factory import Factory
|
25 |
-
|
26 |
-
if TYPE_CHECKING:
|
27 |
-
from pip._vendor.resolvelib.resolvers import Result as RLResult
|
28 |
-
|
29 |
-
Result = RLResult[Requirement, Candidate, str]
|
30 |
-
|
31 |
-
|
32 |
-
logger = logging.getLogger(__name__)
|
33 |
-
|
34 |
-
|
35 |
-
class Resolver(BaseResolver):
|
36 |
-
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
|
37 |
-
|
38 |
-
def __init__(
|
39 |
-
self,
|
40 |
-
preparer: RequirementPreparer,
|
41 |
-
finder: PackageFinder,
|
42 |
-
wheel_cache: Optional[WheelCache],
|
43 |
-
make_install_req: InstallRequirementProvider,
|
44 |
-
use_user_site: bool,
|
45 |
-
ignore_dependencies: bool,
|
46 |
-
ignore_installed: bool,
|
47 |
-
ignore_requires_python: bool,
|
48 |
-
force_reinstall: bool,
|
49 |
-
upgrade_strategy: str,
|
50 |
-
py_version_info: Optional[Tuple[int, ...]] = None,
|
51 |
-
):
|
52 |
-
super().__init__()
|
53 |
-
assert upgrade_strategy in self._allowed_strategies
|
54 |
-
|
55 |
-
self.factory = Factory(
|
56 |
-
finder=finder,
|
57 |
-
preparer=preparer,
|
58 |
-
make_install_req=make_install_req,
|
59 |
-
wheel_cache=wheel_cache,
|
60 |
-
use_user_site=use_user_site,
|
61 |
-
force_reinstall=force_reinstall,
|
62 |
-
ignore_installed=ignore_installed,
|
63 |
-
ignore_requires_python=ignore_requires_python,
|
64 |
-
py_version_info=py_version_info,
|
65 |
-
)
|
66 |
-
self.ignore_dependencies = ignore_dependencies
|
67 |
-
self.upgrade_strategy = upgrade_strategy
|
68 |
-
self._result: Optional[Result] = None
|
69 |
-
|
70 |
-
def resolve(
|
71 |
-
self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
|
72 |
-
) -> RequirementSet:
|
73 |
-
collected = self.factory.collect_root_requirements(root_reqs)
|
74 |
-
provider = PipProvider(
|
75 |
-
factory=self.factory,
|
76 |
-
constraints=collected.constraints,
|
77 |
-
ignore_dependencies=self.ignore_dependencies,
|
78 |
-
upgrade_strategy=self.upgrade_strategy,
|
79 |
-
user_requested=collected.user_requested,
|
80 |
-
)
|
81 |
-
if "PIP_RESOLVER_DEBUG" in os.environ:
|
82 |
-
reporter: BaseReporter = PipDebuggingReporter()
|
83 |
-
else:
|
84 |
-
reporter = PipReporter()
|
85 |
-
resolver: RLResolver[Requirement, Candidate, str] = RLResolver(
|
86 |
-
provider,
|
87 |
-
reporter,
|
88 |
-
)
|
89 |
-
|
90 |
-
try:
|
91 |
-
limit_how_complex_resolution_can_be = 200000
|
92 |
-
result = self._result = resolver.resolve(
|
93 |
-
collected.requirements, max_rounds=limit_how_complex_resolution_can_be
|
94 |
-
)
|
95 |
-
|
96 |
-
except ResolutionImpossible as e:
|
97 |
-
error = self.factory.get_installation_error(
|
98 |
-
cast("ResolutionImpossible[Requirement, Candidate]", e),
|
99 |
-
collected.constraints,
|
100 |
-
)
|
101 |
-
raise error from e
|
102 |
-
|
103 |
-
req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
|
104 |
-
for candidate in result.mapping.values():
|
105 |
-
ireq = candidate.get_install_requirement()
|
106 |
-
if ireq is None:
|
107 |
-
continue
|
108 |
-
|
109 |
-
# Check if there is already an installation under the same name,
|
110 |
-
# and set a flag for later stages to uninstall it, if needed.
|
111 |
-
installed_dist = self.factory.get_dist_to_uninstall(candidate)
|
112 |
-
if installed_dist is None:
|
113 |
-
# There is no existing installation -- nothing to uninstall.
|
114 |
-
ireq.should_reinstall = False
|
115 |
-
elif self.factory.force_reinstall:
|
116 |
-
# The --force-reinstall flag is set -- reinstall.
|
117 |
-
ireq.should_reinstall = True
|
118 |
-
elif installed_dist.version != candidate.version:
|
119 |
-
# The installation is different in version -- reinstall.
|
120 |
-
ireq.should_reinstall = True
|
121 |
-
elif candidate.is_editable or installed_dist.editable:
|
122 |
-
# The incoming distribution is editable, or different in
|
123 |
-
# editable-ness to installation -- reinstall.
|
124 |
-
ireq.should_reinstall = True
|
125 |
-
elif candidate.source_link and candidate.source_link.is_file:
|
126 |
-
# The incoming distribution is under file://
|
127 |
-
if candidate.source_link.is_wheel:
|
128 |
-
# is a local wheel -- do nothing.
|
129 |
-
logger.info(
|
130 |
-
"%s is already installed with the same version as the "
|
131 |
-
"provided wheel. Use --force-reinstall to force an "
|
132 |
-
"installation of the wheel.",
|
133 |
-
ireq.name,
|
134 |
-
)
|
135 |
-
continue
|
136 |
-
|
137 |
-
# is a local sdist or path -- reinstall
|
138 |
-
ireq.should_reinstall = True
|
139 |
-
else:
|
140 |
-
continue
|
141 |
-
|
142 |
-
link = candidate.source_link
|
143 |
-
if link and link.is_yanked:
|
144 |
-
# The reason can contain non-ASCII characters, Unicode
|
145 |
-
# is required for Python 2.
|
146 |
-
msg = (
|
147 |
-
"The candidate selected for download or install is a "
|
148 |
-
"yanked version: {name!r} candidate (version {version} "
|
149 |
-
"at {link})\nReason for being yanked: {reason}"
|
150 |
-
).format(
|
151 |
-
name=candidate.name,
|
152 |
-
version=candidate.version,
|
153 |
-
link=link,
|
154 |
-
reason=link.yanked_reason or "<none given>",
|
155 |
-
)
|
156 |
-
logger.warning(msg)
|
157 |
-
|
158 |
-
req_set.add_named_requirement(ireq)
|
159 |
-
|
160 |
-
reqs = req_set.all_requirements
|
161 |
-
self.factory.preparer.prepare_linked_requirements_more(reqs)
|
162 |
-
return req_set
|
163 |
-
|
164 |
-
def get_installation_order(
|
165 |
-
self, req_set: RequirementSet
|
166 |
-
) -> List[InstallRequirement]:
|
167 |
-
"""Get order for installation of requirements in RequirementSet.
|
168 |
-
|
169 |
-
The returned list contains a requirement before another that depends on
|
170 |
-
it. This helps ensure that the environment is kept consistent as they
|
171 |
-
get installed one-by-one.
|
172 |
-
|
173 |
-
The current implementation creates a topological ordering of the
|
174 |
-
dependency graph, giving more weight to packages with less
|
175 |
-
or no dependencies, while breaking any cycles in the graph at
|
176 |
-
arbitrary points. We make no guarantees about where the cycle
|
177 |
-
would be broken, other than it *would* be broken.
|
178 |
-
"""
|
179 |
-
assert self._result is not None, "must call resolve() first"
|
180 |
-
|
181 |
-
if not req_set.requirements:
|
182 |
-
# Nothing is left to install, so we do not need an order.
|
183 |
-
return []
|
184 |
-
|
185 |
-
graph = self._result.graph
|
186 |
-
weights = get_topological_weights(graph, set(req_set.requirements.keys()))
|
187 |
-
|
188 |
-
sorted_items = sorted(
|
189 |
-
req_set.requirements.items(),
|
190 |
-
key=functools.partial(_req_set_item_sorter, weights=weights),
|
191 |
-
reverse=True,
|
192 |
-
)
|
193 |
-
return [ireq for _, ireq in sorted_items]
|
194 |
-
|
195 |
-
|
196 |
-
def get_topological_weights(
|
197 |
-
graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str]
|
198 |
-
) -> Dict[Optional[str], int]:
|
199 |
-
"""Assign weights to each node based on how "deep" they are.
|
200 |
-
|
201 |
-
This implementation may change at any point in the future without prior
|
202 |
-
notice.
|
203 |
-
|
204 |
-
We first simplify the dependency graph by pruning any leaves and giving them
|
205 |
-
the highest weight: a package without any dependencies should be installed
|
206 |
-
first. This is done again and again in the same way, giving ever less weight
|
207 |
-
to the newly found leaves. The loop stops when no leaves are left: all
|
208 |
-
remaining packages have at least one dependency left in the graph.
|
209 |
-
|
210 |
-
Then we continue with the remaining graph, by taking the length for the
|
211 |
-
longest path to any node from root, ignoring any paths that contain a single
|
212 |
-
node twice (i.e. cycles). This is done through a depth-first search through
|
213 |
-
the graph, while keeping track of the path to the node.
|
214 |
-
|
215 |
-
Cycles in the graph result would result in node being revisited while also
|
216 |
-
being on its own path. In this case, take no action. This helps ensure we
|
217 |
-
don't get stuck in a cycle.
|
218 |
-
|
219 |
-
When assigning weight, the longer path (i.e. larger length) is preferred.
|
220 |
-
|
221 |
-
We are only interested in the weights of packages that are in the
|
222 |
-
requirement_keys.
|
223 |
-
"""
|
224 |
-
path: Set[Optional[str]] = set()
|
225 |
-
weights: Dict[Optional[str], int] = {}
|
226 |
-
|
227 |
-
def visit(node: Optional[str]) -> None:
|
228 |
-
if node in path:
|
229 |
-
# We hit a cycle, so we'll break it here.
|
230 |
-
return
|
231 |
-
|
232 |
-
# Time to visit the children!
|
233 |
-
path.add(node)
|
234 |
-
for child in graph.iter_children(node):
|
235 |
-
visit(child)
|
236 |
-
path.remove(node)
|
237 |
-
|
238 |
-
if node not in requirement_keys:
|
239 |
-
return
|
240 |
-
|
241 |
-
last_known_parent_count = weights.get(node, 0)
|
242 |
-
weights[node] = max(last_known_parent_count, len(path))
|
243 |
-
|
244 |
-
# Simplify the graph, pruning leaves that have no dependencies.
|
245 |
-
# This is needed for large graphs (say over 200 packages) because the
|
246 |
-
# `visit` function is exponentially slower then, taking minutes.
|
247 |
-
# See https://github.com/pypa/pip/issues/10557
|
248 |
-
# We will loop until we explicitly break the loop.
|
249 |
-
while True:
|
250 |
-
leaves = set()
|
251 |
-
for key in graph:
|
252 |
-
if key is None:
|
253 |
-
continue
|
254 |
-
for _child in graph.iter_children(key):
|
255 |
-
# This means we have at least one child
|
256 |
-
break
|
257 |
-
else:
|
258 |
-
# No child.
|
259 |
-
leaves.add(key)
|
260 |
-
if not leaves:
|
261 |
-
# We are done simplifying.
|
262 |
-
break
|
263 |
-
# Calculate the weight for the leaves.
|
264 |
-
weight = len(graph) - 1
|
265 |
-
for leaf in leaves:
|
266 |
-
if leaf not in requirement_keys:
|
267 |
-
continue
|
268 |
-
weights[leaf] = weight
|
269 |
-
# Remove the leaves from the graph, making it simpler.
|
270 |
-
for leaf in leaves:
|
271 |
-
graph.remove(leaf)
|
272 |
-
|
273 |
-
# Visit the remaining graph.
|
274 |
-
# `None` is guaranteed to be the root node by resolvelib.
|
275 |
-
visit(None)
|
276 |
-
|
277 |
-
# Sanity check: all requirement keys should be in the weights,
|
278 |
-
# and no other keys should be in the weights.
|
279 |
-
difference = set(weights.keys()).difference(requirement_keys)
|
280 |
-
assert not difference, difference
|
281 |
-
|
282 |
-
return weights
|
283 |
-
|
284 |
-
|
285 |
-
def _req_set_item_sorter(
|
286 |
-
item: Tuple[str, InstallRequirement],
|
287 |
-
weights: Dict[Optional[str], int],
|
288 |
-
) -> Tuple[int, str]:
|
289 |
-
"""Key function used to sort install requirements for installation.
|
290 |
-
|
291 |
-
Based on the "weight" mapping calculated in ``get_installation_order()``.
|
292 |
-
The canonical package name is returned as the second member as a tie-
|
293 |
-
breaker to ensure the result is predictable, which is useful in tests.
|
294 |
-
"""
|
295 |
-
name = canonicalize_name(item[0])
|
296 |
-
return weights[name], name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/msgpack/ext.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
from collections import namedtuple
|
3 |
-
import datetime
|
4 |
-
import sys
|
5 |
-
import struct
|
6 |
-
|
7 |
-
|
8 |
-
PY2 = sys.version_info[0] == 2
|
9 |
-
|
10 |
-
if PY2:
|
11 |
-
int_types = (int, long)
|
12 |
-
_utc = None
|
13 |
-
else:
|
14 |
-
int_types = int
|
15 |
-
try:
|
16 |
-
_utc = datetime.timezone.utc
|
17 |
-
except AttributeError:
|
18 |
-
_utc = datetime.timezone(datetime.timedelta(0))
|
19 |
-
|
20 |
-
|
21 |
-
class ExtType(namedtuple("ExtType", "code data")):
|
22 |
-
"""ExtType represents ext type in msgpack."""
|
23 |
-
|
24 |
-
def __new__(cls, code, data):
|
25 |
-
if not isinstance(code, int):
|
26 |
-
raise TypeError("code must be int")
|
27 |
-
if not isinstance(data, bytes):
|
28 |
-
raise TypeError("data must be bytes")
|
29 |
-
if not 0 <= code <= 127:
|
30 |
-
raise ValueError("code must be 0~127")
|
31 |
-
return super(ExtType, cls).__new__(cls, code, data)
|
32 |
-
|
33 |
-
|
34 |
-
class Timestamp(object):
|
35 |
-
"""Timestamp represents the Timestamp extension type in msgpack.
|
36 |
-
|
37 |
-
When built with Cython, msgpack uses C methods to pack and unpack `Timestamp`. When using pure-Python
|
38 |
-
msgpack, :func:`to_bytes` and :func:`from_bytes` are used to pack and unpack `Timestamp`.
|
39 |
-
|
40 |
-
This class is immutable: Do not override seconds and nanoseconds.
|
41 |
-
"""
|
42 |
-
|
43 |
-
__slots__ = ["seconds", "nanoseconds"]
|
44 |
-
|
45 |
-
def __init__(self, seconds, nanoseconds=0):
|
46 |
-
"""Initialize a Timestamp object.
|
47 |
-
|
48 |
-
:param int seconds:
|
49 |
-
Number of seconds since the UNIX epoch (00:00:00 UTC Jan 1 1970, minus leap seconds).
|
50 |
-
May be negative.
|
51 |
-
|
52 |
-
:param int nanoseconds:
|
53 |
-
Number of nanoseconds to add to `seconds` to get fractional time.
|
54 |
-
Maximum is 999_999_999. Default is 0.
|
55 |
-
|
56 |
-
Note: Negative times (before the UNIX epoch) are represented as negative seconds + positive ns.
|
57 |
-
"""
|
58 |
-
if not isinstance(seconds, int_types):
|
59 |
-
raise TypeError("seconds must be an integer")
|
60 |
-
if not isinstance(nanoseconds, int_types):
|
61 |
-
raise TypeError("nanoseconds must be an integer")
|
62 |
-
if not (0 <= nanoseconds < 10**9):
|
63 |
-
raise ValueError(
|
64 |
-
"nanoseconds must be a non-negative integer less than 999999999."
|
65 |
-
)
|
66 |
-
self.seconds = seconds
|
67 |
-
self.nanoseconds = nanoseconds
|
68 |
-
|
69 |
-
def __repr__(self):
|
70 |
-
"""String representation of Timestamp."""
|
71 |
-
return "Timestamp(seconds={0}, nanoseconds={1})".format(
|
72 |
-
self.seconds, self.nanoseconds
|
73 |
-
)
|
74 |
-
|
75 |
-
def __eq__(self, other):
|
76 |
-
"""Check for equality with another Timestamp object"""
|
77 |
-
if type(other) is self.__class__:
|
78 |
-
return (
|
79 |
-
self.seconds == other.seconds and self.nanoseconds == other.nanoseconds
|
80 |
-
)
|
81 |
-
return False
|
82 |
-
|
83 |
-
def __ne__(self, other):
|
84 |
-
"""not-equals method (see :func:`__eq__()`)"""
|
85 |
-
return not self.__eq__(other)
|
86 |
-
|
87 |
-
def __hash__(self):
|
88 |
-
return hash((self.seconds, self.nanoseconds))
|
89 |
-
|
90 |
-
@staticmethod
|
91 |
-
def from_bytes(b):
|
92 |
-
"""Unpack bytes into a `Timestamp` object.
|
93 |
-
|
94 |
-
Used for pure-Python msgpack unpacking.
|
95 |
-
|
96 |
-
:param b: Payload from msgpack ext message with code -1
|
97 |
-
:type b: bytes
|
98 |
-
|
99 |
-
:returns: Timestamp object unpacked from msgpack ext payload
|
100 |
-
:rtype: Timestamp
|
101 |
-
"""
|
102 |
-
if len(b) == 4:
|
103 |
-
seconds = struct.unpack("!L", b)[0]
|
104 |
-
nanoseconds = 0
|
105 |
-
elif len(b) == 8:
|
106 |
-
data64 = struct.unpack("!Q", b)[0]
|
107 |
-
seconds = data64 & 0x00000003FFFFFFFF
|
108 |
-
nanoseconds = data64 >> 34
|
109 |
-
elif len(b) == 12:
|
110 |
-
nanoseconds, seconds = struct.unpack("!Iq", b)
|
111 |
-
else:
|
112 |
-
raise ValueError(
|
113 |
-
"Timestamp type can only be created from 32, 64, or 96-bit byte objects"
|
114 |
-
)
|
115 |
-
return Timestamp(seconds, nanoseconds)
|
116 |
-
|
117 |
-
def to_bytes(self):
|
118 |
-
"""Pack this Timestamp object into bytes.
|
119 |
-
|
120 |
-
Used for pure-Python msgpack packing.
|
121 |
-
|
122 |
-
:returns data: Payload for EXT message with code -1 (timestamp type)
|
123 |
-
:rtype: bytes
|
124 |
-
"""
|
125 |
-
if (self.seconds >> 34) == 0: # seconds is non-negative and fits in 34 bits
|
126 |
-
data64 = self.nanoseconds << 34 | self.seconds
|
127 |
-
if data64 & 0xFFFFFFFF00000000 == 0:
|
128 |
-
# nanoseconds is zero and seconds < 2**32, so timestamp 32
|
129 |
-
data = struct.pack("!L", data64)
|
130 |
-
else:
|
131 |
-
# timestamp 64
|
132 |
-
data = struct.pack("!Q", data64)
|
133 |
-
else:
|
134 |
-
# timestamp 96
|
135 |
-
data = struct.pack("!Iq", self.nanoseconds, self.seconds)
|
136 |
-
return data
|
137 |
-
|
138 |
-
@staticmethod
|
139 |
-
def from_unix(unix_sec):
|
140 |
-
"""Create a Timestamp from posix timestamp in seconds.
|
141 |
-
|
142 |
-
:param unix_float: Posix timestamp in seconds.
|
143 |
-
:type unix_float: int or float.
|
144 |
-
"""
|
145 |
-
seconds = int(unix_sec // 1)
|
146 |
-
nanoseconds = int((unix_sec % 1) * 10**9)
|
147 |
-
return Timestamp(seconds, nanoseconds)
|
148 |
-
|
149 |
-
def to_unix(self):
|
150 |
-
"""Get the timestamp as a floating-point value.
|
151 |
-
|
152 |
-
:returns: posix timestamp
|
153 |
-
:rtype: float
|
154 |
-
"""
|
155 |
-
return self.seconds + self.nanoseconds / 1e9
|
156 |
-
|
157 |
-
@staticmethod
|
158 |
-
def from_unix_nano(unix_ns):
|
159 |
-
"""Create a Timestamp from posix timestamp in nanoseconds.
|
160 |
-
|
161 |
-
:param int unix_ns: Posix timestamp in nanoseconds.
|
162 |
-
:rtype: Timestamp
|
163 |
-
"""
|
164 |
-
return Timestamp(*divmod(unix_ns, 10**9))
|
165 |
-
|
166 |
-
def to_unix_nano(self):
|
167 |
-
"""Get the timestamp as a unixtime in nanoseconds.
|
168 |
-
|
169 |
-
:returns: posix timestamp in nanoseconds
|
170 |
-
:rtype: int
|
171 |
-
"""
|
172 |
-
return self.seconds * 10**9 + self.nanoseconds
|
173 |
-
|
174 |
-
def to_datetime(self):
|
175 |
-
"""Get the timestamp as a UTC datetime.
|
176 |
-
|
177 |
-
Python 2 is not supported.
|
178 |
-
|
179 |
-
:rtype: datetime.
|
180 |
-
"""
|
181 |
-
return datetime.datetime.fromtimestamp(0, _utc) + datetime.timedelta(
|
182 |
-
seconds=self.to_unix()
|
183 |
-
)
|
184 |
-
|
185 |
-
@staticmethod
|
186 |
-
def from_datetime(dt):
|
187 |
-
"""Create a Timestamp from datetime with tzinfo.
|
188 |
-
|
189 |
-
Python 2 is not supported.
|
190 |
-
|
191 |
-
:rtype: Timestamp
|
192 |
-
"""
|
193 |
-
return Timestamp.from_unix(dt.timestamp())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/train_net.py
DELETED
@@ -1,228 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
from collections import OrderedDict
|
4 |
-
import torch
|
5 |
-
from torch.nn.parallel import DistributedDataParallel
|
6 |
-
import time
|
7 |
-
import datetime
|
8 |
-
import json
|
9 |
-
|
10 |
-
from fvcore.common.timer import Timer
|
11 |
-
import detectron2.utils.comm as comm
|
12 |
-
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
|
13 |
-
from detectron2.config import get_cfg
|
14 |
-
from detectron2.data import (
|
15 |
-
MetadataCatalog,
|
16 |
-
build_detection_test_loader,
|
17 |
-
)
|
18 |
-
from detectron2.engine import default_argument_parser, default_setup, launch
|
19 |
-
|
20 |
-
from detectron2.evaluation import (
|
21 |
-
COCOEvaluator,
|
22 |
-
LVISEvaluator,
|
23 |
-
inference_on_dataset,
|
24 |
-
print_csv_format,
|
25 |
-
)
|
26 |
-
from detectron2.modeling import build_model
|
27 |
-
from detectron2.solver import build_lr_scheduler, build_optimizer
|
28 |
-
from detectron2.utils.events import (
|
29 |
-
CommonMetricPrinter,
|
30 |
-
EventStorage,
|
31 |
-
JSONWriter,
|
32 |
-
TensorboardXWriter,
|
33 |
-
)
|
34 |
-
from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA
|
35 |
-
from detectron2.data.dataset_mapper import DatasetMapper
|
36 |
-
from detectron2.data.build import build_detection_train_loader
|
37 |
-
|
38 |
-
from centernet.config import add_centernet_config
|
39 |
-
from centernet.data.custom_build_augmentation import build_custom_augmentation
|
40 |
-
|
41 |
-
logger = logging.getLogger("detectron2")
|
42 |
-
|
43 |
-
def do_test(cfg, model):
|
44 |
-
results = OrderedDict()
|
45 |
-
for dataset_name in cfg.DATASETS.TEST:
|
46 |
-
mapper = None if cfg.INPUT.TEST_INPUT_TYPE == 'default' else \
|
47 |
-
DatasetMapper(
|
48 |
-
cfg, False, augmentations=build_custom_augmentation(cfg, False))
|
49 |
-
data_loader = build_detection_test_loader(cfg, dataset_name, mapper=mapper)
|
50 |
-
output_folder = os.path.join(
|
51 |
-
cfg.OUTPUT_DIR, "inference_{}".format(dataset_name))
|
52 |
-
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
|
53 |
-
|
54 |
-
if evaluator_type == "lvis":
|
55 |
-
evaluator = LVISEvaluator(dataset_name, cfg, True, output_folder)
|
56 |
-
elif evaluator_type == 'coco':
|
57 |
-
evaluator = COCOEvaluator(dataset_name, cfg, True, output_folder)
|
58 |
-
else:
|
59 |
-
assert 0, evaluator_type
|
60 |
-
|
61 |
-
results[dataset_name] = inference_on_dataset(
|
62 |
-
model, data_loader, evaluator)
|
63 |
-
if comm.is_main_process():
|
64 |
-
logger.info("Evaluation results for {} in csv format:".format(
|
65 |
-
dataset_name))
|
66 |
-
print_csv_format(results[dataset_name])
|
67 |
-
if len(results) == 1:
|
68 |
-
results = list(results.values())[0]
|
69 |
-
return results
|
70 |
-
|
71 |
-
def do_train(cfg, model, resume=False):
|
72 |
-
model.train()
|
73 |
-
optimizer = build_optimizer(cfg, model)
|
74 |
-
scheduler = build_lr_scheduler(cfg, optimizer)
|
75 |
-
|
76 |
-
checkpointer = DetectionCheckpointer(
|
77 |
-
model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
|
78 |
-
)
|
79 |
-
|
80 |
-
start_iter = (
|
81 |
-
checkpointer.resume_or_load(
|
82 |
-
cfg.MODEL.WEIGHTS, resume=resume,
|
83 |
-
).get("iteration", -1) + 1
|
84 |
-
)
|
85 |
-
if cfg.SOLVER.RESET_ITER:
|
86 |
-
logger.info('Reset loaded iteration. Start training from iteration 0.')
|
87 |
-
start_iter = 0
|
88 |
-
max_iter = cfg.SOLVER.MAX_ITER if cfg.SOLVER.TRAIN_ITER < 0 else cfg.SOLVER.TRAIN_ITER
|
89 |
-
|
90 |
-
periodic_checkpointer = PeriodicCheckpointer(
|
91 |
-
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
|
92 |
-
)
|
93 |
-
|
94 |
-
writers = (
|
95 |
-
[
|
96 |
-
CommonMetricPrinter(max_iter),
|
97 |
-
JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
|
98 |
-
TensorboardXWriter(cfg.OUTPUT_DIR),
|
99 |
-
]
|
100 |
-
if comm.is_main_process()
|
101 |
-
else []
|
102 |
-
)
|
103 |
-
|
104 |
-
|
105 |
-
mapper = DatasetMapper(cfg, True) if cfg.INPUT.CUSTOM_AUG == '' else \
|
106 |
-
DatasetMapper(cfg, True, augmentations=build_custom_augmentation(cfg, True))
|
107 |
-
if cfg.DATALOADER.SAMPLER_TRAIN in ['TrainingSampler', 'RepeatFactorTrainingSampler']:
|
108 |
-
data_loader = build_detection_train_loader(cfg, mapper=mapper)
|
109 |
-
else:
|
110 |
-
from centernet.data.custom_dataset_dataloader import build_custom_train_loader
|
111 |
-
data_loader = build_custom_train_loader(cfg, mapper=mapper)
|
112 |
-
|
113 |
-
|
114 |
-
logger.info("Starting training from iteration {}".format(start_iter))
|
115 |
-
with EventStorage(start_iter) as storage:
|
116 |
-
step_timer = Timer()
|
117 |
-
data_timer = Timer()
|
118 |
-
start_time = time.perf_counter()
|
119 |
-
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
|
120 |
-
data_time = data_timer.seconds()
|
121 |
-
storage.put_scalars(data_time=data_time)
|
122 |
-
step_timer.reset()
|
123 |
-
iteration = iteration + 1
|
124 |
-
storage.step()
|
125 |
-
loss_dict = model(data)
|
126 |
-
|
127 |
-
losses = sum(
|
128 |
-
loss for k, loss in loss_dict.items())
|
129 |
-
assert torch.isfinite(losses).all(), loss_dict
|
130 |
-
|
131 |
-
loss_dict_reduced = {k: v.item() \
|
132 |
-
for k, v in comm.reduce_dict(loss_dict).items()}
|
133 |
-
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
|
134 |
-
if comm.is_main_process():
|
135 |
-
storage.put_scalars(
|
136 |
-
total_loss=losses_reduced, **loss_dict_reduced)
|
137 |
-
|
138 |
-
optimizer.zero_grad()
|
139 |
-
losses.backward()
|
140 |
-
optimizer.step()
|
141 |
-
|
142 |
-
storage.put_scalar(
|
143 |
-
"lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
|
144 |
-
|
145 |
-
step_time = step_timer.seconds()
|
146 |
-
storage.put_scalars(time=step_time)
|
147 |
-
data_timer.reset()
|
148 |
-
scheduler.step()
|
149 |
-
|
150 |
-
if (
|
151 |
-
cfg.TEST.EVAL_PERIOD > 0
|
152 |
-
and iteration % cfg.TEST.EVAL_PERIOD == 0
|
153 |
-
and iteration != max_iter
|
154 |
-
):
|
155 |
-
do_test(cfg, model)
|
156 |
-
comm.synchronize()
|
157 |
-
|
158 |
-
if iteration - start_iter > 5 and \
|
159 |
-
(iteration % 20 == 0 or iteration == max_iter):
|
160 |
-
for writer in writers:
|
161 |
-
writer.write()
|
162 |
-
periodic_checkpointer.step(iteration)
|
163 |
-
|
164 |
-
total_time = time.perf_counter() - start_time
|
165 |
-
logger.info(
|
166 |
-
"Total training time: {}".format(
|
167 |
-
str(datetime.timedelta(seconds=int(total_time)))))
|
168 |
-
|
169 |
-
def setup(args):
|
170 |
-
"""
|
171 |
-
Create configs and perform basic setups.
|
172 |
-
"""
|
173 |
-
cfg = get_cfg()
|
174 |
-
add_centernet_config(cfg)
|
175 |
-
cfg.merge_from_file(args.config_file)
|
176 |
-
cfg.merge_from_list(args.opts)
|
177 |
-
if '/auto' in cfg.OUTPUT_DIR:
|
178 |
-
file_name = os.path.basename(args.config_file)[:-5]
|
179 |
-
cfg.OUTPUT_DIR = cfg.OUTPUT_DIR.replace('/auto', '/{}'.format(file_name))
|
180 |
-
logger.info('OUTPUT_DIR: {}'.format(cfg.OUTPUT_DIR))
|
181 |
-
cfg.freeze()
|
182 |
-
default_setup(cfg, args)
|
183 |
-
return cfg
|
184 |
-
|
185 |
-
|
186 |
-
def main(args):
|
187 |
-
cfg = setup(args)
|
188 |
-
|
189 |
-
model = build_model(cfg)
|
190 |
-
logger.info("Model:\n{}".format(model))
|
191 |
-
if args.eval_only:
|
192 |
-
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
|
193 |
-
cfg.MODEL.WEIGHTS, resume=args.resume
|
194 |
-
)
|
195 |
-
if cfg.TEST.AUG.ENABLED:
|
196 |
-
logger.info("Running inference with test-time augmentation ...")
|
197 |
-
model = GeneralizedRCNNWithTTA(cfg, model, batch_size=1)
|
198 |
-
|
199 |
-
return do_test(cfg, model)
|
200 |
-
|
201 |
-
distributed = comm.get_world_size() > 1
|
202 |
-
if distributed:
|
203 |
-
model = DistributedDataParallel(
|
204 |
-
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False,
|
205 |
-
find_unused_parameters=True
|
206 |
-
)
|
207 |
-
|
208 |
-
do_train(cfg, model, resume=args.resume)
|
209 |
-
return do_test(cfg, model)
|
210 |
-
|
211 |
-
|
212 |
-
if __name__ == "__main__":
|
213 |
-
args = default_argument_parser()
|
214 |
-
args.add_argument('--manual_device', default='')
|
215 |
-
args = args.parse_args()
|
216 |
-
if args.manual_device != '':
|
217 |
-
os.environ['CUDA_VISIBLE_DEVICES'] = args.manual_device
|
218 |
-
args.dist_url = 'tcp://127.0.0.1:{}'.format(
|
219 |
-
torch.randint(11111, 60000, (1,))[0].item())
|
220 |
-
print("Command Line Args:", args)
|
221 |
-
launch(
|
222 |
-
main,
|
223 |
-
args.num_gpus,
|
224 |
-
num_machines=args.num_machines,
|
225 |
-
machine_rank=args.machine_rank,
|
226 |
-
dist_url=args.dist_url,
|
227 |
-
args=(args,),
|
228 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/app.py
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Benson/text-generation/Examples/Aethersx2 Apk Version 6.0.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>AetherSX2 APK versión 6.0: Una nueva forma de jugar juegos de PS2 en Android</h1>
|
3 |
-
<p>¿Te encanta jugar juegos de PS2 pero no tienes una consola o un PC para ejecutarlos? ¿Te gustaría poder disfrutar de tus títulos favoritos de PS2 en tu dispositivo Android en cualquier momento y en cualquier lugar? Si usted respondió sí a cualquiera de estas preguntas, entonces usted podría estar interesado en AetherSX2 APK versión 6.0, un nuevo y mejorado emulador de PS2 para dispositivos Android. </p>
|
4 |
-
<h2>¿Qué es AetherSX2? </h2>
|
5 |
-
<h3>Un emulador de PS2 para dispositivos Android</h3>
|
6 |
-
<p>AetherSX2 es un emulador de PS2 para dispositivos Android que te permite jugar juegos de PS2 en tu smartphone o tablet. Se basa en el popular emulador PCSX2 para PC, pero optimizado para dispositivos móviles. Soporta la mayoría de juegos de PS2, incluyendo títulos populares como Final Fantasy X, Kingdom Hearts, God of War, y más. </p>
|
7 |
-
<h2>aethersx2 apk version 6.0</h2><br /><p><b><b>Download Zip</b> ===== <a href="https://bltlly.com/2v6Mty">https://bltlly.com/2v6Mty</a></b></p><br /><br />
|
8 |
-
<h3>Características y beneficios de AetherSX2</h3>
|
9 |
-
<p>Algunas de las características y beneficios de AetherSX2 son:</p>
|
10 |
-
<ul>
|
11 |
-
<li> Es libre y de código abierto, lo que significa que puede descargarlo sin pagar nada o preocuparse por el malware o los anuncios. </li>
|
12 |
-
<li> Tiene una interfaz fácil de usar que facilita la navegación y el uso. </li>
|
13 |
-
<li> Tiene alta compatibilidad y rendimiento, lo que significa que puede ejecutar la mayoría de los juegos de PS2 sin problemas y sin problemas. </li>
|
14 |
-
<li> Tiene varias configuraciones y opciones que le permiten personalizar su experiencia de juego, como gráficos, sonido, controles, trucos y más. </li>
|
15 |
-
<li> Tiene soporte multijugador en línea, lo que significa que puede jugar con sus amigos a través de Internet utilizando una conexión Wi-Fi. </li>
|
16 |
-
<li> Tiene soporte de almacenamiento en la nube, lo que significa que puede guardar su progreso en línea y acceder a él desde cualquier dispositivo. </li>
|
17 |
-
</ul>
|
18 |
-
<h2>¿Cómo descargar e instalar AetherSX2 APK versión 6.0? </h2>
|
19 |
-
<h3>Requisitos y compatibilidad</h3>
|
20 |
-
<p>Para descargar e instalar AetherSX2 APK versión 6.0, es necesario tener:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Un dispositivo Android con Android 5.0 o superior. </li>
|
23 |
-
<li>Al menos 4 GB de RAM y 16 GB de espacio de almacenamiento. </li>
|
24 |
-
<li>Una conexión a Internet estable. </li>
|
25 |
-
|
26 |
-
<li>Un archivo ISO de juegos de PS2 (que puede extraer de su propio disco de PS2 o descargar de Internet). </li>
|
27 |
-
</ul>
|
28 |
-
<p>Tenga en cuenta que no todos los dispositivos Android son compatibles con AetherSX2, y algunos juegos pueden no funcionar correctamente o en absoluto. Puedes consultar la lista de compatibilidad en el sitio web oficial de AetherSX2 para ver si tu dispositivo y juego son compatibles. </p>
|
29 |
-
<h3>Pasos para descargar e instalar</h3>
|
30 |
-
<p>Para descargar e instalar AetherSX2 APK versión 6.0, siga estos pasos:</p>
|
31 |
-
<ol>
|
32 |
-
<li>Ir al sitio web oficial de AetherSX2 y haga clic en el botón "Descargar". </li>
|
33 |
-
<li>Permitir que su navegador para descargar el archivo APK (que es de unos 20 MB de tamaño). </li>
|
34 |
-
<li>Una vez que la descarga se haya completado, busque el archivo APK en el administrador de archivos de su dispositivo y toque en él. </li>
|
35 |
-
<li>Si se le solicita, active la opción "Fuentes desconocidas" en la configuración de su dispositivo para permitir la instalación de aplicaciones desde fuera de Google Play Store.</li>
|
36 |
-
<li>Siga las instrucciones en la pantalla para instalar la aplicación. </ <p>Felicidades, que ha instalado con éxito AetherSX2 APK versión 6.0 en su dispositivo Android. </p>
|
37 |
-
<h2>Cómo utilizar AetherSX2 APK versión 6.0? </h2>
|
38 |
-
<h3>Cómo cargar juegos de PS2 en AetherSX2</h3>
|
39 |
-
<p>Para cargar juegos de PS2 en AetherSX2, necesitas tener el archivo BIOS de PS2 y el archivo ISO de juegos de PS2 en el almacenamiento de tu dispositivo. Puede copiarlos desde su PC utilizando un cable USB o un servicio en la nube. Alternativamente, puede descargarlos de Internet, pero asegúrese de que sean legales y seguros. </p>
|
40 |
-
<p></p>
|
41 |
-
<p>Una vez que tengas los archivos, sigue estos pasos:</p>
|
42 |
-
<ol>
|
43 |
-
<li>Inicie la aplicación AetherSX2 y otorgue los permisos necesarios. </li>
|
44 |
-
<li>Toque en el icono "Configuración" en la esquina superior derecha de la pantalla. </li>
|
45 |
-
<li>Toque en la opción "BIOS" y seleccione el archivo BIOS de PS2 desde el almacenamiento de su dispositivo. </li>
|
46 |
-
<li>Toque en el botón "Atrás" para volver al menú principal. </li>
|
47 |
-
<li>Toque en el icono "Juegos" en la esquina inferior izquierda de la pantalla. </li>
|
48 |
-
|
49 |
-
<li>Toque en el arte de la portada del juego para empezar a jugar. </li>
|
50 |
-
</ol>
|
51 |
-
<h3>Cómo configurar ajustes y controles en AetherSX2</h3>
|
52 |
-
<p>Para configurar los ajustes y controles en AetherSX2, puede acceder al menú "Configuración" desde el menú principal o tocando el botón "Menú" mientras juega un juego. A partir de ahí, puede ajustar varias opciones, como:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Gráficos: Puede cambiar la resolución, relación de aspecto, velocidad de fotogramas, anti-aliasing, filtrado de texturas y más. </li>
|
55 |
-
<li>Sonido: Puede activar o desactivar efectos de sonido, música y voz, así como ajustar el volumen y la latencia. </li>
|
56 |
-
<li>Controles: Puede personalizar el diseño, tamaño, opacidad y vibración de los botones virtuales, así como utilizar un controlador físico o un teclado si tiene uno conectado a su dispositivo. </li>
|
57 |
-
<li>Trucos: Puedes activar o desactivar trucos para tus juegos, como salud infinita, dinero, munición y más. </li>
|
58 |
-
<li>Avanzado: Puede ajustar algunos ajustes avanzados que pueden mejorar o empeorar su experiencia de juego, como trucos de velocidad, parches, plugins y más. </li>
|
59 |
-
</ul>
|
60 |
-
<p>Tenga en cuenta que algunos ajustes pueden requerir un reinicio de la aplicación o el juego para que surta efecto. Además, algunos ajustes pueden no funcionar para todos los juegos o dispositivos, así que experimenta con ellos bajo tu propio riesgo. </p>
|
61 |
-
<h2>Pros y contras de la versión AetherSX2 APK 6.0</h2>
|
62 |
-
<h3>Pros</h3>
|
63 |
-
<p>Algunos de los pros de AetherSX2 APK versión 6.0 son:</p>
|
64 |
-
<ul>
|
65 |
-
<li> Es libre y de código abierto, lo que significa que no tiene que pagar nada o preocuparse por el malware o los anuncios. </li>
|
66 |
-
<li> Tiene alta compatibilidad y rendimiento, lo que significa que puede ejecutar la mayoría de los juegos de PS2 sin problemas y sin problemas. </li>
|
67 |
-
<li> Tiene varias configuraciones y opciones que le permiten personalizar su experiencia de juego, como gráficos, sonido, controles, trucos y más. </li>
|
68 |
-
<li> Tiene soporte multijugador en línea, lo que significa que puede jugar con sus amigos a través de Internet utilizando una conexión Wi-Fi. </li>
|
69 |
-
|
70 |
-
</ul>
|
71 |
-
<h3>Contras</h3>
|
72 |
-
<p>Algunos de los contras de AetherSX2 APK versión 6.0 son:</p>
|
73 |
-
<ul>
|
74 |
-
<li> Requiere un dispositivo potente para funcionar correctamente, lo que significa que puede no funcionar bien en dispositivos de gama baja o antiguos. </li>
|
75 |
-
<li>Requiere un archivo BIOS de PS2 y un archivo ISO de juegos de PS2 para jugar, lo que significa que necesita tener acceso a una consola PS2 o un PC para obtenerlos legalmente. </li>
|
76 |
-
<li>Puede que no sea compatible con todos los juegos o dispositivos de PS2, lo que significa que algunos juegos pueden no funcionar correctamente o en absoluto. </li>
|
77 |
-
<li> Puede tener algunos errores o errores que pueden afectar su experiencia de juego, tales como bloqueos, congelaciones, problemas técnicos, etc.</li>
|
78 |
-
</ul>
|
79 |
-
<h2>Conclusión</h2>
|
80 |
-
<p>AetherSX2 APK versión 6.0 es un emulador de PS2 nuevo y mejorado para dispositivos Android que le permite jugar juegos de PS2 en su teléfono inteligente o tableta. Se basa en el popular emulador PCSX2 para PC pero optimizado para dispositivos móviles. Soporta la mayoría de los juegos de PS2 incluyendo títulos populares como Final Fantasy X Kingdom Hearts God of War y más. Tiene varias características y beneficios, como alta compatibilidad y rendimiento interfaz fácil de usar de soporte multijugador en línea de almacenamiento en la nube de soporte y más. También tiene algunos inconvenientes, como requerir un dispositivo potente un archivo BIOS de PS2 y un archivo ISO de juegos de PS2 y tener algunos errores o errores. Sin embargo, si usted es un fan de los juegos de PS2 y quiere jugar en su dispositivo Android, AetherSX2 APK versión 6.0 vale la pena intentarlo. </p>
|
81 |
-
<h2>Preguntas frecuentes</h2>
|
82 |
-
<p>Aquí hay algunas preguntas frecuentes sobre AetherSX2 APK versión 6.0:</p>
|
83 |
-
<ol>
|
84 |
-
<li> ¿Es AetherSX2 APK versión 6.0 seguro y legal? </li>
|
85 |
-
<p>AetherSX2 APK versión 6.0 es seguro y legal, siempre y cuando lo descargue desde el sitio web oficial y utilice su propio archivo BIOS PS2 y archivo ISO del juego PS2. Sin embargo, descargar el archivo BIOS de PS2 y el archivo ISO de juegos de PS2 desde Internet puede ser ilegal en algunos países, así que hazlo bajo tu propio riesgo. </p>
|
86 |
-
<li> ¿Cómo puedo mejorar el rendimiento de AetherSX2 APK versión 6.0? </li>
|
87 |
-
|
88 |
-
<li> ¿Cómo puedo jugar juegos multijugador en línea en AetherSX2 APK versión 6.0? </li>
|
89 |
-
<p>Puede jugar juegos multijugador en línea en AetherSX2 APK versión 6.0 mediante el uso de una conexión Wi-Fi y habilitar la opción "Multijugador en línea" en la aplicación. A continuación, puede unirse o crear una habitación con otros jugadores que están utilizando la misma aplicación y juego. </p>
|
90 |
-
<li> ¿Cómo puedo guardar y cargar mi progreso en AetherSX2 APK versión 6.0? </li>
|
91 |
-
<p>Puede guardar y cargar su progreso en AetherSX2 APK Versión 6.0 mediante el uso de las opciones "Guardar estado" y "Estado de carga" en la aplicación. También puede utilizar la opción "Cloud Saving" para guardar su progreso en línea y acceder a él desde cualquier dispositivo. </p>
|
92 |
-
<li> ¿Dónde puedo obtener más información y soporte para AetherSX2 APK versión 6.0? </li>
|
93 |
-
<p>Puede obtener más información y soporte para AetherSX2 APK Versión 6.0 visitando el sitio web oficial, el servidor oficial de Discord, o la comunidad oficial de Reddit. También puede ponerse en contacto con los desarrolladores por correo electrónico a [email protected]. </p>
|
94 |
-
</ol></p> 64aa2da5cf<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Fifa 4 En Lnea.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar FIFA 4 Online: Una guía para principiantes</h1>
|
3 |
-
<p>Si eres un fan de los juegos de fútbol, es posible que hayas oído hablar de FIFA 4 Online, la última entrega de la popular serie EA Sports. FIFA 4 Online es un juego de fútbol en línea gratuito que te permite crear tu propio equipo, competir con otros jugadores y disfrutar de la emoción del hermoso juego. En este artículo, te mostraremos cómo descargar FIFA 4 Online, cómo jugarlo y por qué deberías probarlo. </p>
|
4 |
-
<h2>¿Qué es FIFA 4 Online? </h2>
|
5 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
6 |
-
<p>FIFA 4 Online es un juego de fútbol multijugador en línea desarrollado por EA Spearhead y publicado por Nexon. Se basa en la serie FIFA, pero con algunas características y mejoras únicas. Algunas de las características de FIFA 4 Online son:</p>
|
7 |
-
<h2>descargar fifa 4 en línea</h2><br /><p><b><b>DOWNLOAD</b> • <a href="https://bltlly.com/2v6Lem">https://bltlly.com/2v6Lem</a></b></p><br /><br />
|
8 |
-
<ul>
|
9 |
-
<li>Gráficos y animaciones realistas que capturan la esencia del fútbol</li>
|
10 |
-
<li>Más de 15.000 jugadores de más de 40 ligas y equipos nacionales</li>
|
11 |
-
<li>Una variedad de modos de juego, tales como modo de temporada, modo de torneo, modo de partido, y el modo de práctica</li>
|
12 |
-
<li>Un constructor de equipo personalizado que te permite crear tu propio equipo, elegir tu formación, tácticas y kits</li>
|
13 |
-
<li>Un sistema de desarrollo de jugadores que te permite mejorar las habilidades, habilidades y atributos de tus jugadores</li>
|
14 |
-
<li>Un sistema de mercado que te permite comprar y vender jugadores, artículos y monedas</li>
|
15 |
-
<li>Un sistema de clasificación que mide tu rendimiento y te empareja con jugadores de nivel de habilidad similar</li>
|
16 |
-
<li>Un sistema social que te permite chatear, interactuar y cooperar con otros jugadores</li>
|
17 |
-
</ul>
|
18 |
-
<h3>Cómo descargar e instalar FIFA 4 Online</h3>
|
19 |
-
<p>Para jugar a FIFA 4 Online, necesitas descargar e instalar el cliente del juego en tu PC. Estos son los pasos para hacerlo:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Vaya al sitio web oficial de FIFA 4 Online y elija su región. </li>
|
22 |
-
<li>Haga clic en el botón "Descargar" y siga las instrucciones para descargar el instalador del juego. </li>
|
23 |
-
|
24 |
-
<li>Espere a que la instalación termine y lance el juego. </li>
|
25 |
-
</ol>
|
26 |
-
<h3>Cómo crear una cuenta e iniciar sesión</h3>
|
27 |
-
<p>Para jugar a FIFA 4 Online, necesitas crear una cuenta e iniciar sesión con tus credenciales. Estos son los pasos para hacerlo:</p>
|
28 |
-
<ol>
|
29 |
-
<li>En el lanzador del juego, haga clic en el botón "Registrarse" y complete su dirección de correo electrónico, contraseña, apodo y pregunta de seguridad. </li>
|
30 |
-
<li>Verifique su dirección de correo electrónico haciendo clic en el enlace enviado a su bandeja de entrada. </li>
|
31 |
-
<li>Inicie sesión con su dirección de correo electrónico y contraseña en el lanzador del juego. </li>
|
32 |
-
<li>Elige un servidor y un canal para entrar en el juego. </li>
|
33 |
-
</ol>
|
34 |
-
<h2>Cómo jugar FIFA 4 Online</h2>
|
35 |
-
<h3>Los modos y opciones del juego</h3>
|
36 |
-
<p>FIFA 4 Online ofrece una variedad de modos de juego y opciones para diferentes preferencias y estilos de juego. Algunos de los modos de juego y opciones son:</p>
|
37 |
-
<ul>
|
38 |
-
<li>Modo temporada: Este es el modo principal del juego, donde se puede jugar a través de una temporada completa con su equipo. Puedes elegir entre diferentes ligas, como Premier League, Bundesliga, La Liga, Serie A, Ligue 1, K League, CSL, etc. También puedes participar en competiciones de copa, como FA Cup, Champions League, Europa League, etc. Puedes ganar monedas, objetos, jugadores y trofeos completando partidos y logros. </li>
|
39 |
-
<li>Modo de torneo: Este es un modo donde puedes unirte o crear un torneo con otros jugadores. Puedes elegir entre diferentes formatos, como knockout, round-robin, league, etc. También puedes establecer las reglas, como duración del partido, dificultad, clasificación por equipos, etc. Puedes ganar premios y recompensas avanzando en el torneo. </li>
|
40 |
-
<li>Modo de partido: Este es un modo en el que puede jugar un solo partido con otro jugador o un ordenador. Puede elegir entre diferentes opciones, como partido amistoso, partido clasificado, partido personalizado, etc. También puede seleccionar el estadio, el clima, el tiempo, etc. Puede ganar monedas y experiencia jugando partidos. </li>
|
41 |
-
|
42 |
-
</ul>
|
43 |
-
<h3>Los controles y la interfaz</h3>
|
44 |
-
<p>FIFA 4 Online tiene un sistema de control sencillo e intuitivo y una interfaz que facilita el juego. Algunos de los controles y elementos de interfaz son:</p>
|
45 |
-
<ul>
|
46 |
-
<li>Teclado y ratón: Puede utilizar el teclado y el ratón para controlar sus reproductores y navegar por los menús. Las teclas por defecto son W, A, S, D para el movimiento, Q y E para cambiar jugadores, espacio para correr, clic izquierdo para pasar y disparar, clic derecho para abordar y deslizar, etc. También puede personalizar las teclas en el menú de configuración. </li>
|
47 |
-
<li>Gamepad: Puedes usar un gamepad para controlar a tus jugadores y navegar por los menús. Los botones por defecto son stick izquierdo para el movimiento, stick derecho para movimientos de habilidad, L1 y R1 para cambiar jugadores, L2 para correr, X para pasar y disparar, O para abordar y deslizarse, etc. También puede personalizar los botones en el menú de configuración. </li>
|
48 |
-
<li>HUD: El HUD (visualización frontal) le muestra la información y las opciones que necesita durante el juego. Los elementos de HUD son puntuación, tiempo, resistencia, radar, nombres de jugadores, calificaciones de jugadores, etc. También puede acceder al menú de pausa, ventana de chat, comandos rápidos, etc. desde el HUD.</li>
|
49 |
-
</ul>
|
50 |
-
<h3>Consejos y trucos para principiantes</h3>
|
51 |
-
<p>FIFA 4 Online es un juego divertido y desafiante que requiere habilidad y estrategia para dominar. Aquí hay algunos consejos y trucos para principiantes que pueden ayudarte a mejorar tu juego:</p>
|
52 |
-
<ul>
|
53 |
-
<li>Elige tu equipo sabiamente: Tu equipo es tu activo más importante en FIFA 4 Online. Usted debe elegir un equipo que se adapte a su estilo de juego y preferencias. También puedes personalizar a tu equipo comprando y vendiendo jugadores, cambiando formaciones y tácticas, mejorando habilidades y atributos, etc.</li>
|
54 |
-
|
55 |
-
<li>Juega inteligente: FIFA 4 Online no se trata solo de marcar goles y ganar partidos. También se trata de jugar inteligente y usar tu cerebro. Debes analizar las fortalezas y debilidades de tu oponente, adaptarte a diferentes situaciones y escenarios, usar diferentes estrategias y formaciones, etc.</li>
|
56 |
-
<li>Diviértete: El consejo más importante para los principiantes es divertirse jugando a FIFA 4 Online. No te frustres ni te enojes si pierdes o cometes errores. En cambio, aprende de tus errores y mejora tu juego. Disfruta de la emoción del fútbol y diviértete con otros jugadores. </li>
|
57 |
-
</ul>
|
58 |
-
<h2>Por qué deberías jugar a FIFA 4 Online</h2>
|
59 |
-
<h3>Los beneficios de jugar juegos de fútbol en línea</h3>
|
60 |
-
<p>Jugar juegos de fútbol online como FIFA 4 Online tiene muchos beneficios que pueden mejorar tu vida de varias maneras. Algunos de los beneficios son:</p>
|
61 |
-
<ul>
|
62 |
-
<li>Entretenimiento: Jugar juegos de fútbol en línea es una gran manera de entretenerse y divertirse. Usted puede disfrutar de la emoción del fútbol sin salir de su casa o gastar dinero en entradas o equipos. </li>
|
63 |
-
<li>Educación: Jugar juegos de fútbol en línea también puede educar sobre el fútbol y otros aspectos de la vida. Puedes aprender sobre diferentes equipos, jugadores, ligas, culturas, historia, geografía, etc. También puedes mejorar tus habilidades cognitivas, como memoria, concentración, resolución de problemas, etc.</li>
|
64 |
-
<li>Ejercicio: Jugar juegos de fútbol online también puede ayudarte a ejercitar tu cuerpo y tu mente. Puede quemar calorías, fortalecer sus músculos, mejorar su coordinación, etc. moviendo los dedos, las manos, los brazos, etc. También puede estimular su cerebro, liberar el estrés, aumentar su estado de ánimo, etc. jugando juegos de fútbol en línea. </li>
|
65 |
-
|
66 |
-
</ul>
|
67 |
-
<h3>La comunidad y los eventos de FIFA 4 Online</h3>
|
68 |
-
<p>FIFA 4 Online tiene una gran y activa comunidad de jugadores que comparten una pasión común por el fútbol y los juegos. Puedes unirte a la comunidad y disfrutar de los diferentes eventos y actividades que ofrece FIFA 4 Online. Algunos de la comunidad y eventos de FIFA 4 Online son:</p>
|
69 |
-
<ul>
|
70 |
-
<li>Foro: El foro es el lugar donde puedes comunicarte con otros jugadores y los desarrolladores de FIFA 4 Online. Puede publicar sus preguntas, sugerencias, comentarios, informes de errores, etc. También puede leer las últimas noticias, anuncios, guías, consejos, etc. del personal oficial. </li>
|
71 |
-
<li>Blog: El blog es el lugar donde puedes leer las historias y experiencias de otros jugadores y los desarrolladores de FIFA 4 Online. También puedes compartir tus propias historias y experiencias escribiendo un artículo de blog. También puedes comentar otros artículos e interactuar con otros blogueros. </li>
|
72 |
-
<li>Facebook: La página de Facebook es el lugar donde puedes seguir las actualizaciones y eventos de FIFA 4 Online. También puede gustar, compartir y comentar las publicaciones y fotos. También puede participar en concursos y regalos y ganar premios y recompensas. </li>
|
73 |
-
<li>YouTube: El canal de YouTube es el lugar donde puedes ver los videos y transmisiones en vivo de FIFA 4 Online. También puede suscribirse y comentar los vídeos. También puede participar en chats y encuestas e interactuar con otros espectadores. </li>
|
74 |
-
<li>Discordia: El servidor de discordia es el lugar donde puedes unirte a los chats de voz y texto de FIFA 4 Online. También puede crear o unir salas y canales para diferentes temas y propósitos. También puede usar bots y comandos para mejorar su experiencia. </li>
|
75 |
-
</ul>
|
76 |
-
<h3>Las recompensas y logros de FIFA 4 Online</h3>
|
77 |
-
|
78 |
-
<ul>
|
79 |
-
<li>Monedas: Las monedas son la moneda de FIFA 4 Online que puedes usar para comprar jugadores, objetos, monedas, etc. Puedes ganar monedas jugando partidos, completando logros, participando en eventos, etc.</li>
|
80 |
-
<li>Artículos: Los artículos son los consumibles de FIFA 4 Online que puedes usar para mejorar tu equipo o jugadores. Puedes ganar objetos jugando partidos, completando logros, participando en eventos, etc.</li>
|
81 |
-
<li>Jugadores: Los jugadores son el núcleo de FIFA 4 Online que puedes utilizar para construir tu equipo o vender monedas. Puedes ganar jugadores jugando partidos, completando logros, participando en eventos, etc.</li>
|
82 |
-
<li>Trofeos: Los trofeos son los símbolos de tus logros y progreso en FIFA 4 Online. Puedes ganar trofeos jugando partidos, completando logros, participando en eventos, etc.</li>
|
83 |
-
<li>Logros: Los logros son los retos y metas que puedes completar en FIFA 4 Online. Usted puede ganar logros mediante la realización de diversas tareas y acciones en el juego, tales como goles, ganar partidos, la creación de equipos, etc.</li>
|
84 |
-
</ul>
|
85 |
-
<h2>Conclusión</h2>
|
86 |
-
<p>FIFA 4 Online es un juego de fútbol en línea gratuito que ofrece una experiencia realista e inmersiva del hermoso juego. Puedes descargar FIFA 4 Online, crear tu propio equipo, jugar con otros jugadores y disfrutar de las diferentes características y modos del juego. También puedes unirte a la comunidad y a los eventos de FIFA 4 Online y ganar recompensas y logros por tu rendimiento y progreso. FIFA 4 Online es un juego que no debes perderte si eres fanático del fútbol y los juegos. </p>
|
87 |
-
<p></p>
|
88 |
-
<p>Entonces, ¿qué estás esperando? Descarga FIFA 4 Online hoy y comienza tu viaje de fútbol! </p>
|
89 |
-
<h2>Preguntas frecuentes</h2>
|
90 |
-
<h3>Q: ¿Cuáles son los requisitos del sistema para FIFA 4 Online? </h3>
|
91 |
-
<p>A: Los requisitos mínimos del sistema para FIFA 4 Online son:</p>
|
92 |
-
<ul>
|
93 |
-
<li>OS: Windows 7 o superior</li>
|
94 |
-
<li>CPU: Intel Core i3 o superior</li>
|
95 |
-
<li>RAM: 4 GB o superior</li>
|
96 |
-
<li>GPU: NVIDIA GeForce GT 630 o superior</li>
|
97 |
-
|
98 |
-
<li>Internet: Conexión de banda ancha o superior</li>
|
99 |
-
</ul>
|
100 |
-
<h3>Q: ¿Cómo puedo contactar con el servicio de atención al cliente de FIFA 4 Online? </h3>
|
101 |
-
<p>A: Puede ponerse en contacto con el servicio de atención al cliente de FIFA 4 Online utilizando los siguientes métodos:</p>
|
102 |
-
<ul>
|
103 |
-
<li>Correo electrónico: [email protected]</li>
|
104 |
-
<li>Teléfono: +82-2-1234-5678</li>
|
105 |
-
<li>Chat en vivo: Disponible en el sitio web oficial de FIFA 4 Online </li>
|
106 |
-
</ul>
|
107 |
-
<h3>Q: ¿Cómo puedo reportar un error o un hacker en FIFA 4 Online? </h3>
|
108 |
-
<p>A: Puedes reportar un error o un hacker en FIFA 4 Online usando los siguientes métodos:</p>
|
109 |
-
<ul>
|
110 |
-
<li>Informe en el juego: Puede usar el botón de informe en el HUD o el menú de pausa para informar de un error o un hacker durante un partido. </li>
|
111 |
-
<li>Informe del foro: Puede utilizar la sección de informes en el foro para informar de un error o un hacker con capturas de pantalla o vídeos como evidencia. </li>
|
112 |
-
<li>Informe de correo electrónico: Puede usar la dirección de correo electrónico [email protected] para reportar un error o un hacker con capturas de pantalla o videos como evidencia. </li>
|
113 |
-
</ul>
|
114 |
-
<h3>Q: ¿Cómo puedo obtener más monedas y artículos en FIFA 4 Online? </h3>
|
115 |
-
<p>A: Puedes obtener más monedas y objetos en FIFA 4 Online utilizando los siguientes métodos:</p>
|
116 |
-
<ul>
|
117 |
-
<li>Jugar partidos: Puedes ganar monedas y objetos jugando partidos en diferentes modos y dificultades. </li>
|
118 |
-
<li>Completar logros: Puedes ganar monedas y objetos completando logros en diferentes categorías y niveles. </li>
|
119 |
-
<li>Participar en eventos: Puedes ganar monedas y objetos participando en eventos que se celebran regularmente u ocasionalmente. </li>
|
120 |
-
<li>Comprar monedas y artículos: Puedes comprar monedas y artículos con dinero real utilizando el sistema de mercado o el sitio web oficial de FIFA 4 Online </li>
|
121 |
-
</ul>
|
122 |
-
<h3>P: ¿Cómo puedo mejorar mis habilidades y tácticas en FIFA 4 Online? </h3>
|
123 |
-
<p>A: Puedes mejorar tus habilidades y tácticas en FIFA 4 Online utilizando los siguientes métodos:</p>
|
124 |
-
<ul>
|
125 |
-
<li>Practicar habilidades: Puedes practicar tus habilidades y movimientos en el modo de práctica o contra oponentes fáciles. </li>
|
126 |
-
|
127 |
-
<li>Ver repeticiones: Puedes ver tus propias repeticiones u otras repeticiones de jugadores para analizar tus errores y mejorar tu juego. </li>
|
128 |
-
<li>Pedir consejo: Puedes pedir consejo a otros jugadores o expertos en el chat, el foro, el blog, etc.</li>
|
129 |
-
</ul>
|
130 |
-
: https://www.fifaonline4.nexon.com/ : https://www.ea.com/games/fifa/fifa-fifa-online-</p> 64aa2da5cf<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/vis/densepose.py
DELETED
@@ -1,581 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
from typing import Iterable, Optional, Tuple
|
5 |
-
import cv2
|
6 |
-
|
7 |
-
from ..structures import DensePoseDataRelative, DensePoseOutput, DensePoseResult
|
8 |
-
from .base import Boxes, Image, MatrixVisualizer, PointsVisualizer
|
9 |
-
|
10 |
-
|
11 |
-
class DensePoseResultsVisualizer(object):
|
12 |
-
def visualize(self, image_bgr: Image, densepose_result: Optional[DensePoseResult]) -> Image:
|
13 |
-
if densepose_result is None:
|
14 |
-
return image_bgr
|
15 |
-
context = self.create_visualization_context(image_bgr)
|
16 |
-
for i, result_encoded_w_shape in enumerate(densepose_result.results):
|
17 |
-
iuv_arr = DensePoseResult.decode_png_data(*result_encoded_w_shape)
|
18 |
-
bbox_xywh = densepose_result.boxes_xywh[i]
|
19 |
-
self.visualize_iuv_arr(context, iuv_arr, bbox_xywh)
|
20 |
-
image_bgr = self.context_to_image_bgr(context)
|
21 |
-
return image_bgr
|
22 |
-
|
23 |
-
|
24 |
-
class DensePoseMaskedColormapResultsVisualizer(DensePoseResultsVisualizer):
|
25 |
-
def __init__(
|
26 |
-
self,
|
27 |
-
data_extractor,
|
28 |
-
segm_extractor,
|
29 |
-
inplace=True,
|
30 |
-
cmap=cv2.COLORMAP_PARULA,
|
31 |
-
alpha=0.7,
|
32 |
-
val_scale=1.0,
|
33 |
-
):
|
34 |
-
self.mask_visualizer = MatrixVisualizer(
|
35 |
-
inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha
|
36 |
-
)
|
37 |
-
self.data_extractor = data_extractor
|
38 |
-
self.segm_extractor = segm_extractor
|
39 |
-
|
40 |
-
def create_visualization_context(self, image_bgr: Image):
|
41 |
-
return image_bgr
|
42 |
-
|
43 |
-
def context_to_image_bgr(self, context):
|
44 |
-
return context
|
45 |
-
|
46 |
-
def get_image_bgr_from_context(self, context):
|
47 |
-
return context
|
48 |
-
|
49 |
-
def visualize_iuv_arr(self, context, iuv_arr, bbox_xywh):
|
50 |
-
image_bgr = self.get_image_bgr_from_context(context)
|
51 |
-
matrix = self.data_extractor(iuv_arr)
|
52 |
-
segm = self.segm_extractor(iuv_arr)
|
53 |
-
mask = np.zeros(matrix.shape, dtype=np.uint8)
|
54 |
-
mask[segm > 0] = 1
|
55 |
-
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh)
|
56 |
-
return image_bgr
|
57 |
-
|
58 |
-
|
59 |
-
def _extract_i_from_iuvarr(iuv_arr):
|
60 |
-
return iuv_arr[0, :, :]
|
61 |
-
|
62 |
-
|
63 |
-
def _extract_u_from_iuvarr(iuv_arr):
|
64 |
-
return iuv_arr[1, :, :]
|
65 |
-
|
66 |
-
|
67 |
-
def _extract_v_from_iuvarr(iuv_arr):
|
68 |
-
return iuv_arr[2, :, :]
|
69 |
-
|
70 |
-
|
71 |
-
class DensePoseResultsMplContourVisualizer(DensePoseResultsVisualizer):
|
72 |
-
def __init__(self, levels=10, **kwargs):
|
73 |
-
self.levels = levels
|
74 |
-
self.plot_args = kwargs
|
75 |
-
|
76 |
-
def create_visualization_context(self, image_bgr: Image):
|
77 |
-
import matplotlib.pyplot as plt
|
78 |
-
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
|
79 |
-
|
80 |
-
context = {}
|
81 |
-
context["image_bgr"] = image_bgr
|
82 |
-
dpi = 100
|
83 |
-
height_inches = float(image_bgr.shape[0]) / dpi
|
84 |
-
width_inches = float(image_bgr.shape[1]) / dpi
|
85 |
-
fig = plt.figure(figsize=(width_inches, height_inches), dpi=dpi)
|
86 |
-
plt.axes([0, 0, 1, 1])
|
87 |
-
plt.axis("off")
|
88 |
-
context["fig"] = fig
|
89 |
-
canvas = FigureCanvas(fig)
|
90 |
-
context["canvas"] = canvas
|
91 |
-
extent = (0, image_bgr.shape[1], image_bgr.shape[0], 0)
|
92 |
-
plt.imshow(image_bgr[:, :, ::-1], extent=extent)
|
93 |
-
return context
|
94 |
-
|
95 |
-
def context_to_image_bgr(self, context):
|
96 |
-
fig = context["fig"]
|
97 |
-
w, h = map(int, fig.get_size_inches() * fig.get_dpi())
|
98 |
-
canvas = context["canvas"]
|
99 |
-
canvas.draw()
|
100 |
-
image_1d = np.fromstring(canvas.tostring_rgb(), dtype="uint8")
|
101 |
-
image_rgb = image_1d.reshape(h, w, 3)
|
102 |
-
image_bgr = image_rgb[:, :, ::-1].copy()
|
103 |
-
return image_bgr
|
104 |
-
|
105 |
-
def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> Image:
|
106 |
-
import matplotlib.pyplot as plt
|
107 |
-
|
108 |
-
u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0
|
109 |
-
v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0
|
110 |
-
extent = (
|
111 |
-
bbox_xywh[0],
|
112 |
-
bbox_xywh[0] + bbox_xywh[2],
|
113 |
-
bbox_xywh[1],
|
114 |
-
bbox_xywh[1] + bbox_xywh[3],
|
115 |
-
)
|
116 |
-
plt.contour(u, self.levels, extent=extent, **self.plot_args)
|
117 |
-
plt.contour(v, self.levels, extent=extent, **self.plot_args)
|
118 |
-
|
119 |
-
|
120 |
-
class DensePoseResultsCustomContourVisualizer(DensePoseResultsVisualizer):
|
121 |
-
"""
|
122 |
-
Contour visualization using marching squares
|
123 |
-
"""
|
124 |
-
|
125 |
-
def __init__(self, levels=10, **kwargs):
|
126 |
-
# TODO: colormap is hardcoded
|
127 |
-
cmap = cv2.COLORMAP_PARULA
|
128 |
-
if isinstance(levels, int):
|
129 |
-
self.levels = np.linspace(0, 1, levels)
|
130 |
-
else:
|
131 |
-
self.levels = levels
|
132 |
-
if "linewidths" in kwargs:
|
133 |
-
self.linewidths = kwargs["linewidths"]
|
134 |
-
else:
|
135 |
-
self.linewidths = [1] * len(self.levels)
|
136 |
-
self.plot_args = kwargs
|
137 |
-
img_colors_bgr = cv2.applyColorMap((self.levels * 255).astype(np.uint8), cmap)
|
138 |
-
self.level_colors_bgr = [
|
139 |
-
[int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr
|
140 |
-
]
|
141 |
-
|
142 |
-
def create_visualization_context(self, image_bgr: Image):
|
143 |
-
return image_bgr
|
144 |
-
|
145 |
-
def context_to_image_bgr(self, context):
|
146 |
-
return context
|
147 |
-
|
148 |
-
def get_image_bgr_from_context(self, context):
|
149 |
-
return context
|
150 |
-
|
151 |
-
def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> Image:
|
152 |
-
image_bgr = self.get_image_bgr_from_context(context)
|
153 |
-
segm = _extract_i_from_iuvarr(iuv_arr)
|
154 |
-
u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0
|
155 |
-
v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0
|
156 |
-
self._contours(image_bgr, u, segm, bbox_xywh)
|
157 |
-
self._contours(image_bgr, v, segm, bbox_xywh)
|
158 |
-
|
159 |
-
def _contours(self, image_bgr, arr, segm, bbox_xywh):
|
160 |
-
for part_idx in range(1, DensePoseDataRelative.N_PART_LABELS + 1):
|
161 |
-
mask = segm == part_idx
|
162 |
-
if not np.any(mask):
|
163 |
-
continue
|
164 |
-
arr_min = np.amin(arr[mask])
|
165 |
-
arr_max = np.amax(arr[mask])
|
166 |
-
I, J = np.nonzero(mask)
|
167 |
-
i0 = np.amin(I)
|
168 |
-
i1 = np.amax(I) + 1
|
169 |
-
j0 = np.amin(J)
|
170 |
-
j1 = np.amax(J) + 1
|
171 |
-
if (j1 == j0 + 1) or (i1 == i0 + 1):
|
172 |
-
continue
|
173 |
-
Nw = arr.shape[1] - 1
|
174 |
-
Nh = arr.shape[0] - 1
|
175 |
-
for level_idx, level in enumerate(self.levels):
|
176 |
-
if (level < arr_min) or (level > arr_max):
|
177 |
-
continue
|
178 |
-
vp = arr[i0:i1, j0:j1] >= level
|
179 |
-
bin_codes = vp[:-1, :-1] + vp[1:, :-1] * 2 + vp[1:, 1:] * 4 + vp[:-1, 1:] * 8
|
180 |
-
mp = mask[i0:i1, j0:j1]
|
181 |
-
bin_mask_codes = mp[:-1, :-1] + mp[1:, :-1] * 2 + mp[1:, 1:] * 4 + mp[:-1, 1:] * 8
|
182 |
-
it = np.nditer(bin_codes, flags=["multi_index"])
|
183 |
-
color_bgr = self.level_colors_bgr[level_idx]
|
184 |
-
linewidth = self.linewidths[level_idx]
|
185 |
-
while not it.finished:
|
186 |
-
if (it[0] != 0) and (it[0] != 15):
|
187 |
-
i, j = it.multi_index
|
188 |
-
if bin_mask_codes[i, j] != 0:
|
189 |
-
self._draw_line(
|
190 |
-
image_bgr,
|
191 |
-
arr,
|
192 |
-
mask,
|
193 |
-
level,
|
194 |
-
color_bgr,
|
195 |
-
linewidth,
|
196 |
-
it[0],
|
197 |
-
it.multi_index,
|
198 |
-
bbox_xywh,
|
199 |
-
Nw,
|
200 |
-
Nh,
|
201 |
-
(i0, j0),
|
202 |
-
)
|
203 |
-
it.iternext()
|
204 |
-
|
205 |
-
def _draw_line(
|
206 |
-
self,
|
207 |
-
image_bgr,
|
208 |
-
arr,
|
209 |
-
mask,
|
210 |
-
v,
|
211 |
-
color_bgr,
|
212 |
-
linewidth,
|
213 |
-
bin_code,
|
214 |
-
multi_idx,
|
215 |
-
bbox_xywh,
|
216 |
-
Nw,
|
217 |
-
Nh,
|
218 |
-
offset,
|
219 |
-
):
|
220 |
-
lines = self._bin_code_2_lines(arr, v, bin_code, multi_idx, Nw, Nh, offset)
|
221 |
-
x0, y0, w, h = bbox_xywh
|
222 |
-
x1 = x0 + w
|
223 |
-
y1 = y0 + h
|
224 |
-
for line in lines:
|
225 |
-
x0r, y0r = line[0]
|
226 |
-
x1r, y1r = line[1]
|
227 |
-
pt0 = (int(x0 + x0r * (x1 - x0)), int(y0 + y0r * (y1 - y0)))
|
228 |
-
pt1 = (int(x0 + x1r * (x1 - x0)), int(y0 + y1r * (y1 - y0)))
|
229 |
-
cv2.line(image_bgr, pt0, pt1, color_bgr, linewidth)
|
230 |
-
|
231 |
-
def _bin_code_2_lines(self, arr, v, bin_code, multi_idx, Nw, Nh, offset):
|
232 |
-
i0, j0 = offset
|
233 |
-
i, j = multi_idx
|
234 |
-
i += i0
|
235 |
-
j += j0
|
236 |
-
v0, v1, v2, v3 = arr[i, j], arr[i + 1, j], arr[i + 1, j + 1], arr[i, j + 1]
|
237 |
-
x0i = float(j) / Nw
|
238 |
-
y0j = float(i) / Nh
|
239 |
-
He = 1.0 / Nh
|
240 |
-
We = 1.0 / Nw
|
241 |
-
if (bin_code == 1) or (bin_code == 14):
|
242 |
-
a = (v - v0) / (v1 - v0)
|
243 |
-
b = (v - v0) / (v3 - v0)
|
244 |
-
pt1 = (x0i, y0j + a * He)
|
245 |
-
pt2 = (x0i + b * We, y0j)
|
246 |
-
return [(pt1, pt2)]
|
247 |
-
elif (bin_code == 2) or (bin_code == 13):
|
248 |
-
a = (v - v0) / (v1 - v0)
|
249 |
-
b = (v - v1) / (v2 - v1)
|
250 |
-
pt1 = (x0i, y0j + a * He)
|
251 |
-
pt2 = (x0i + b * We, y0j + He)
|
252 |
-
return [(pt1, pt2)]
|
253 |
-
elif (bin_code == 3) or (bin_code == 12):
|
254 |
-
a = (v - v0) / (v3 - v0)
|
255 |
-
b = (v - v1) / (v2 - v1)
|
256 |
-
pt1 = (x0i + a * We, y0j)
|
257 |
-
pt2 = (x0i + b * We, y0j + He)
|
258 |
-
return [(pt1, pt2)]
|
259 |
-
elif (bin_code == 4) or (bin_code == 11):
|
260 |
-
a = (v - v1) / (v2 - v1)
|
261 |
-
b = (v - v3) / (v2 - v3)
|
262 |
-
pt1 = (x0i + a * We, y0j + He)
|
263 |
-
pt2 = (x0i + We, y0j + b * He)
|
264 |
-
return [(pt1, pt2)]
|
265 |
-
elif (bin_code == 6) or (bin_code == 9):
|
266 |
-
a = (v - v0) / (v1 - v0)
|
267 |
-
b = (v - v3) / (v2 - v3)
|
268 |
-
pt1 = (x0i, y0j + a * He)
|
269 |
-
pt2 = (x0i + We, y0j + b * He)
|
270 |
-
return [(pt1, pt2)]
|
271 |
-
elif (bin_code == 7) or (bin_code == 8):
|
272 |
-
a = (v - v0) / (v3 - v0)
|
273 |
-
b = (v - v3) / (v2 - v3)
|
274 |
-
pt1 = (x0i + a * We, y0j)
|
275 |
-
pt2 = (x0i + We, y0j + b * He)
|
276 |
-
return [(pt1, pt2)]
|
277 |
-
elif bin_code == 5:
|
278 |
-
a1 = (v - v0) / (v1 - v0)
|
279 |
-
b1 = (v - v1) / (v2 - v1)
|
280 |
-
pt11 = (x0i, y0j + a1 * He)
|
281 |
-
pt12 = (x0i + b1 * We, y0j + He)
|
282 |
-
a2 = (v - v0) / (v3 - v0)
|
283 |
-
b2 = (v - v3) / (v2 - v3)
|
284 |
-
pt21 = (x0i + a2 * We, y0j)
|
285 |
-
pt22 = (x0i + We, y0j + b2 * He)
|
286 |
-
return [(pt11, pt12), (pt21, pt22)]
|
287 |
-
elif bin_code == 10:
|
288 |
-
a1 = (v - v0) / (v3 - v0)
|
289 |
-
b1 = (v - v0) / (v1 - v0)
|
290 |
-
pt11 = (x0i + a1 * We, y0j)
|
291 |
-
pt12 = (x0i, y0j + b1 * He)
|
292 |
-
a2 = (v - v1) / (v2 - v1)
|
293 |
-
b2 = (v - v3) / (v2 - v3)
|
294 |
-
pt21 = (x0i + a2 * We, y0j + He)
|
295 |
-
pt22 = (x0i + We, y0j + b2 * He)
|
296 |
-
return [(pt11, pt12), (pt21, pt22)]
|
297 |
-
return []
|
298 |
-
|
299 |
-
|
300 |
-
try:
|
301 |
-
import matplotlib
|
302 |
-
|
303 |
-
matplotlib.use("Agg")
|
304 |
-
DensePoseResultsContourVisualizer = DensePoseResultsMplContourVisualizer
|
305 |
-
except ModuleNotFoundError:
|
306 |
-
logger = logging.getLogger(__name__)
|
307 |
-
logger.warning("Could not import matplotlib, using custom contour visualizer")
|
308 |
-
DensePoseResultsContourVisualizer = DensePoseResultsCustomContourVisualizer
|
309 |
-
|
310 |
-
|
311 |
-
class DensePoseResultsFineSegmentationVisualizer(DensePoseMaskedColormapResultsVisualizer):
|
312 |
-
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7):
|
313 |
-
super(DensePoseResultsFineSegmentationVisualizer, self).__init__(
|
314 |
-
_extract_i_from_iuvarr,
|
315 |
-
_extract_i_from_iuvarr,
|
316 |
-
inplace,
|
317 |
-
cmap,
|
318 |
-
alpha,
|
319 |
-
val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS,
|
320 |
-
)
|
321 |
-
|
322 |
-
|
323 |
-
class DensePoseResultsUVisualizer(DensePoseMaskedColormapResultsVisualizer):
|
324 |
-
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7):
|
325 |
-
super(DensePoseResultsUVisualizer, self).__init__(
|
326 |
-
_extract_u_from_iuvarr, _extract_i_from_iuvarr, inplace, cmap, alpha, val_scale=1.0
|
327 |
-
)
|
328 |
-
|
329 |
-
|
330 |
-
class DensePoseResultsVVisualizer(DensePoseMaskedColormapResultsVisualizer):
|
331 |
-
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7):
|
332 |
-
super(DensePoseResultsVVisualizer, self).__init__(
|
333 |
-
_extract_v_from_iuvarr, _extract_i_from_iuvarr, inplace, cmap, alpha, val_scale=1.0
|
334 |
-
)
|
335 |
-
|
336 |
-
|
337 |
-
class DensePoseOutputsFineSegmentationVisualizer(object):
|
338 |
-
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7):
|
339 |
-
self.mask_visualizer = MatrixVisualizer(
|
340 |
-
inplace=inplace,
|
341 |
-
cmap=cmap,
|
342 |
-
val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS,
|
343 |
-
alpha=alpha,
|
344 |
-
)
|
345 |
-
|
346 |
-
def visualize(
|
347 |
-
self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]]
|
348 |
-
) -> Image:
|
349 |
-
if dp_output_with_bboxes is None:
|
350 |
-
return image_bgr
|
351 |
-
densepose_output, bboxes_xywh = dp_output_with_bboxes
|
352 |
-
S = densepose_output.S
|
353 |
-
I = densepose_output.I # noqa
|
354 |
-
U = densepose_output.U
|
355 |
-
V = densepose_output.V
|
356 |
-
N = S.size(0)
|
357 |
-
assert N == I.size(0), (
|
358 |
-
"densepose outputs S {} and I {}"
|
359 |
-
" should have equal first dim size".format(S.size(), I.size())
|
360 |
-
)
|
361 |
-
assert N == U.size(0), (
|
362 |
-
"densepose outputs S {} and U {}"
|
363 |
-
" should have equal first dim size".format(S.size(), U.size())
|
364 |
-
)
|
365 |
-
assert N == V.size(0), (
|
366 |
-
"densepose outputs S {} and V {}"
|
367 |
-
" should have equal first dim size".format(S.size(), V.size())
|
368 |
-
)
|
369 |
-
assert N == len(bboxes_xywh), (
|
370 |
-
"number of bounding boxes {}"
|
371 |
-
" should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N)
|
372 |
-
)
|
373 |
-
for n in range(N):
|
374 |
-
Sn = S[n].argmax(dim=0)
|
375 |
-
In = I[n].argmax(dim=0) * (Sn > 0).long()
|
376 |
-
matrix = In.cpu().numpy().astype(np.uint8)
|
377 |
-
mask = np.zeros(matrix.shape, dtype=np.uint8)
|
378 |
-
mask[matrix > 0] = 1
|
379 |
-
bbox_xywh = bboxes_xywh[n]
|
380 |
-
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh)
|
381 |
-
return image_bgr
|
382 |
-
|
383 |
-
|
384 |
-
class DensePoseOutputsUVisualizer(object):
|
385 |
-
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7):
|
386 |
-
self.mask_visualizer = MatrixVisualizer(
|
387 |
-
inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha
|
388 |
-
)
|
389 |
-
|
390 |
-
def visualize(
|
391 |
-
self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]]
|
392 |
-
) -> Image:
|
393 |
-
if dp_output_with_bboxes is None:
|
394 |
-
return image_bgr
|
395 |
-
densepose_output, bboxes_xywh = dp_output_with_bboxes
|
396 |
-
assert isinstance(
|
397 |
-
densepose_output, DensePoseOutput
|
398 |
-
), "DensePoseOutput expected, {} encountered".format(type(densepose_output))
|
399 |
-
S = densepose_output.S
|
400 |
-
I = densepose_output.I # noqa
|
401 |
-
U = densepose_output.U
|
402 |
-
V = densepose_output.V
|
403 |
-
N = S.size(0)
|
404 |
-
assert N == I.size(0), (
|
405 |
-
"densepose outputs S {} and I {}"
|
406 |
-
" should have equal first dim size".format(S.size(), I.size())
|
407 |
-
)
|
408 |
-
assert N == U.size(0), (
|
409 |
-
"densepose outputs S {} and U {}"
|
410 |
-
" should have equal first dim size".format(S.size(), U.size())
|
411 |
-
)
|
412 |
-
assert N == V.size(0), (
|
413 |
-
"densepose outputs S {} and V {}"
|
414 |
-
" should have equal first dim size".format(S.size(), V.size())
|
415 |
-
)
|
416 |
-
assert N == len(bboxes_xywh), (
|
417 |
-
"number of bounding boxes {}"
|
418 |
-
" should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N)
|
419 |
-
)
|
420 |
-
for n in range(N):
|
421 |
-
Sn = S[n].argmax(dim=0)
|
422 |
-
In = I[n].argmax(dim=0) * (Sn > 0).long()
|
423 |
-
segmentation = In.cpu().numpy().astype(np.uint8)
|
424 |
-
mask = np.zeros(segmentation.shape, dtype=np.uint8)
|
425 |
-
mask[segmentation > 0] = 1
|
426 |
-
Un = U[n].cpu().numpy().astype(np.float32)
|
427 |
-
Uvis = np.zeros(segmentation.shape, dtype=np.float32)
|
428 |
-
for partId in range(Un.shape[0]):
|
429 |
-
Uvis[segmentation == partId] = Un[partId][segmentation == partId].clip(0, 1) * 255
|
430 |
-
bbox_xywh = bboxes_xywh[n]
|
431 |
-
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, Uvis, bbox_xywh)
|
432 |
-
return image_bgr
|
433 |
-
|
434 |
-
|
435 |
-
class DensePoseOutputsVVisualizer(object):
|
436 |
-
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7):
|
437 |
-
self.mask_visualizer = MatrixVisualizer(
|
438 |
-
inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha
|
439 |
-
)
|
440 |
-
|
441 |
-
def visualize(
|
442 |
-
self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]]
|
443 |
-
) -> Image:
|
444 |
-
if dp_output_with_bboxes is None:
|
445 |
-
return image_bgr
|
446 |
-
densepose_output, bboxes_xywh = dp_output_with_bboxes
|
447 |
-
assert isinstance(
|
448 |
-
densepose_output, DensePoseOutput
|
449 |
-
), "DensePoseOutput expected, {} encountered".format(type(densepose_output))
|
450 |
-
S = densepose_output.S
|
451 |
-
I = densepose_output.I # noqa
|
452 |
-
U = densepose_output.U
|
453 |
-
V = densepose_output.V
|
454 |
-
N = S.size(0)
|
455 |
-
assert N == I.size(0), (
|
456 |
-
"densepose outputs S {} and I {}"
|
457 |
-
" should have equal first dim size".format(S.size(), I.size())
|
458 |
-
)
|
459 |
-
assert N == U.size(0), (
|
460 |
-
"densepose outputs S {} and U {}"
|
461 |
-
" should have equal first dim size".format(S.size(), U.size())
|
462 |
-
)
|
463 |
-
assert N == V.size(0), (
|
464 |
-
"densepose outputs S {} and V {}"
|
465 |
-
" should have equal first dim size".format(S.size(), V.size())
|
466 |
-
)
|
467 |
-
assert N == len(bboxes_xywh), (
|
468 |
-
"number of bounding boxes {}"
|
469 |
-
" should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N)
|
470 |
-
)
|
471 |
-
for n in range(N):
|
472 |
-
Sn = S[n].argmax(dim=0)
|
473 |
-
In = I[n].argmax(dim=0) * (Sn > 0).long()
|
474 |
-
segmentation = In.cpu().numpy().astype(np.uint8)
|
475 |
-
mask = np.zeros(segmentation.shape, dtype=np.uint8)
|
476 |
-
mask[segmentation > 0] = 1
|
477 |
-
Vn = V[n].cpu().numpy().astype(np.float32)
|
478 |
-
Vvis = np.zeros(segmentation.shape, dtype=np.float32)
|
479 |
-
for partId in range(Vn.size(0)):
|
480 |
-
Vvis[segmentation == partId] = Vn[partId][segmentation == partId].clip(0, 1) * 255
|
481 |
-
bbox_xywh = bboxes_xywh[n]
|
482 |
-
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, Vvis, bbox_xywh)
|
483 |
-
return image_bgr
|
484 |
-
|
485 |
-
|
486 |
-
class DensePoseDataCoarseSegmentationVisualizer(object):
|
487 |
-
"""
|
488 |
-
Visualizer for ground truth segmentation
|
489 |
-
"""
|
490 |
-
|
491 |
-
def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7):
|
492 |
-
self.mask_visualizer = MatrixVisualizer(
|
493 |
-
inplace=inplace,
|
494 |
-
cmap=cmap,
|
495 |
-
val_scale=255.0 / DensePoseDataRelative.N_BODY_PARTS,
|
496 |
-
alpha=alpha,
|
497 |
-
)
|
498 |
-
|
499 |
-
def visualize(
|
500 |
-
self,
|
501 |
-
image_bgr: Image,
|
502 |
-
bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]],
|
503 |
-
) -> Image:
|
504 |
-
if bbox_densepose_datas is None:
|
505 |
-
return image_bgr
|
506 |
-
for bbox_xywh, densepose_data in zip(*bbox_densepose_datas):
|
507 |
-
matrix = densepose_data.segm.numpy()
|
508 |
-
mask = np.zeros(matrix.shape, dtype=np.uint8)
|
509 |
-
mask[matrix > 0] = 1
|
510 |
-
image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh.numpy())
|
511 |
-
return image_bgr
|
512 |
-
|
513 |
-
|
514 |
-
class DensePoseDataPointsVisualizer(object):
|
515 |
-
def __init__(self, densepose_data_to_value_fn=None, cmap=cv2.COLORMAP_PARULA):
|
516 |
-
self.points_visualizer = PointsVisualizer()
|
517 |
-
self.densepose_data_to_value_fn = densepose_data_to_value_fn
|
518 |
-
self.cmap = cmap
|
519 |
-
|
520 |
-
def visualize(
|
521 |
-
self,
|
522 |
-
image_bgr: Image,
|
523 |
-
bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]],
|
524 |
-
) -> Image:
|
525 |
-
if bbox_densepose_datas is None:
|
526 |
-
return image_bgr
|
527 |
-
for bbox_xywh, densepose_data in zip(*bbox_densepose_datas):
|
528 |
-
x0, y0, w, h = bbox_xywh.numpy()
|
529 |
-
x = densepose_data.x.numpy() * w / 255.0 + x0
|
530 |
-
y = densepose_data.y.numpy() * h / 255.0 + y0
|
531 |
-
pts_xy = zip(x, y)
|
532 |
-
if self.densepose_data_to_value_fn is None:
|
533 |
-
image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy)
|
534 |
-
else:
|
535 |
-
v = self.densepose_data_to_value_fn(densepose_data)
|
536 |
-
img_colors_bgr = cv2.applyColorMap(v, self.cmap)
|
537 |
-
colors_bgr = [
|
538 |
-
[int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr
|
539 |
-
]
|
540 |
-
image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy, colors_bgr)
|
541 |
-
return image_bgr
|
542 |
-
|
543 |
-
|
544 |
-
def _densepose_data_u_for_cmap(densepose_data):
|
545 |
-
u = np.clip(densepose_data.u.numpy(), 0, 1) * 255.0
|
546 |
-
return u.astype(np.uint8)
|
547 |
-
|
548 |
-
|
549 |
-
def _densepose_data_v_for_cmap(densepose_data):
|
550 |
-
v = np.clip(densepose_data.v.numpy(), 0, 1) * 255.0
|
551 |
-
return v.astype(np.uint8)
|
552 |
-
|
553 |
-
|
554 |
-
def _densepose_data_i_for_cmap(densepose_data):
|
555 |
-
i = (
|
556 |
-
np.clip(densepose_data.i.numpy(), 0.0, DensePoseDataRelative.N_PART_LABELS)
|
557 |
-
* 255.0
|
558 |
-
/ DensePoseDataRelative.N_PART_LABELS
|
559 |
-
)
|
560 |
-
return i.astype(np.uint8)
|
561 |
-
|
562 |
-
|
563 |
-
class DensePoseDataPointsUVisualizer(DensePoseDataPointsVisualizer):
|
564 |
-
def __init__(self):
|
565 |
-
super(DensePoseDataPointsUVisualizer, self).__init__(
|
566 |
-
densepose_data_to_value_fn=_densepose_data_u_for_cmap
|
567 |
-
)
|
568 |
-
|
569 |
-
|
570 |
-
class DensePoseDataPointsVVisualizer(DensePoseDataPointsVisualizer):
|
571 |
-
def __init__(self):
|
572 |
-
super(DensePoseDataPointsVVisualizer, self).__init__(
|
573 |
-
densepose_data_to_value_fn=_densepose_data_v_for_cmap
|
574 |
-
)
|
575 |
-
|
576 |
-
|
577 |
-
class DensePoseDataPointsIVisualizer(DensePoseDataPointsVisualizer):
|
578 |
-
def __init__(self):
|
579 |
-
super(DensePoseDataPointsIVisualizer, self).__init__(
|
580 |
-
densepose_data_to_value_fn=_densepose_data_i_for_cmap
|
581 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/par.h
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/allocator_aware_execution_policy.h>
|
21 |
-
#include <thrust/system/omp/detail/execution_policy.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace system
|
26 |
-
{
|
27 |
-
namespace omp
|
28 |
-
{
|
29 |
-
namespace detail
|
30 |
-
{
|
31 |
-
|
32 |
-
|
33 |
-
struct par_t : thrust::system::omp::detail::execution_policy<par_t>,
|
34 |
-
thrust::detail::allocator_aware_execution_policy<
|
35 |
-
thrust::system::omp::detail::execution_policy>
|
36 |
-
{
|
37 |
-
__host__ __device__
|
38 |
-
THRUST_CONSTEXPR par_t() : thrust::system::omp::detail::execution_policy<par_t>() {}
|
39 |
-
};
|
40 |
-
|
41 |
-
|
42 |
-
} // end detail
|
43 |
-
|
44 |
-
|
45 |
-
static const detail::par_t par;
|
46 |
-
|
47 |
-
|
48 |
-
} // end omp
|
49 |
-
} // end system
|
50 |
-
|
51 |
-
|
52 |
-
// alias par here
|
53 |
-
namespace omp
|
54 |
-
{
|
55 |
-
|
56 |
-
|
57 |
-
using thrust::system::omp::par;
|
58 |
-
|
59 |
-
|
60 |
-
} // end omp
|
61 |
-
} // end thrust
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/postprocessing.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import torch
|
3 |
-
from torch.nn import functional as F
|
4 |
-
|
5 |
-
from detectron2.structures import Instances, ROIMasks
|
6 |
-
|
7 |
-
|
8 |
-
# perhaps should rename to "resize_instance"
|
9 |
-
def detector_postprocess(
|
10 |
-
results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
|
11 |
-
):
|
12 |
-
"""
|
13 |
-
Resize the output instances.
|
14 |
-
The input images are often resized when entering an object detector.
|
15 |
-
As a result, we often need the outputs of the detector in a different
|
16 |
-
resolution from its inputs.
|
17 |
-
|
18 |
-
This function will resize the raw outputs of an R-CNN detector
|
19 |
-
to produce outputs according to the desired output resolution.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
results (Instances): the raw outputs from the detector.
|
23 |
-
`results.image_size` contains the input image resolution the detector sees.
|
24 |
-
This object might be modified in-place.
|
25 |
-
output_height, output_width: the desired output resolution.
|
26 |
-
|
27 |
-
Returns:
|
28 |
-
Instances: the resized output from the model, based on the output resolution
|
29 |
-
"""
|
30 |
-
# Change to 'if is_tracing' after PT1.7
|
31 |
-
if isinstance(output_height, torch.Tensor):
|
32 |
-
# Converts integer tensors to float temporaries to ensure true
|
33 |
-
# division is performed when computing scale_x and scale_y.
|
34 |
-
output_width_tmp = output_width.float()
|
35 |
-
output_height_tmp = output_height.float()
|
36 |
-
new_size = torch.stack([output_height, output_width])
|
37 |
-
else:
|
38 |
-
new_size = (output_height, output_width)
|
39 |
-
output_width_tmp = output_width
|
40 |
-
output_height_tmp = output_height
|
41 |
-
|
42 |
-
scale_x, scale_y = (
|
43 |
-
output_width_tmp / results.image_size[1],
|
44 |
-
output_height_tmp / results.image_size[0],
|
45 |
-
)
|
46 |
-
results = Instances(new_size, **results.get_fields())
|
47 |
-
|
48 |
-
if results.has("pred_boxes"):
|
49 |
-
output_boxes = results.pred_boxes
|
50 |
-
elif results.has("proposal_boxes"):
|
51 |
-
output_boxes = results.proposal_boxes
|
52 |
-
else:
|
53 |
-
output_boxes = None
|
54 |
-
assert output_boxes is not None, "Predictions must contain boxes!"
|
55 |
-
|
56 |
-
output_boxes.scale(scale_x, scale_y)
|
57 |
-
output_boxes.clip(results.image_size)
|
58 |
-
|
59 |
-
results = results[output_boxes.nonempty()]
|
60 |
-
|
61 |
-
if results.has("pred_masks"):
|
62 |
-
if isinstance(results.pred_masks, ROIMasks):
|
63 |
-
roi_masks = results.pred_masks
|
64 |
-
else:
|
65 |
-
# pred_masks is a tensor of shape (N, 1, M, M)
|
66 |
-
roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
|
67 |
-
results.pred_masks = roi_masks.to_bitmasks(
|
68 |
-
results.pred_boxes, output_height, output_width, mask_threshold
|
69 |
-
).tensor # TODO return ROIMasks/BitMask object in the future
|
70 |
-
|
71 |
-
if results.has("pred_keypoints"):
|
72 |
-
results.pred_keypoints[:, :, 0] *= scale_x
|
73 |
-
results.pred_keypoints[:, :, 1] *= scale_y
|
74 |
-
|
75 |
-
return results
|
76 |
-
|
77 |
-
|
78 |
-
def sem_seg_postprocess(result, img_size, output_height, output_width):
|
79 |
-
"""
|
80 |
-
Return semantic segmentation predictions in the original resolution.
|
81 |
-
|
82 |
-
The input images are often resized when entering semantic segmentor. Moreover, in same
|
83 |
-
cases, they also padded inside segmentor to be divisible by maximum network stride.
|
84 |
-
As a result, we often need the predictions of the segmentor in a different
|
85 |
-
resolution from its inputs.
|
86 |
-
|
87 |
-
Args:
|
88 |
-
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
|
89 |
-
where C is the number of classes, and H, W are the height and width of the prediction.
|
90 |
-
img_size (tuple): image size that segmentor is taking as input.
|
91 |
-
output_height, output_width: the desired output resolution.
|
92 |
-
|
93 |
-
Returns:
|
94 |
-
semantic segmentation prediction (Tensor): A tensor of the shape
|
95 |
-
(C, output_height, output_width) that contains per-pixel soft predictions.
|
96 |
-
"""
|
97 |
-
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
|
98 |
-
result = F.interpolate(
|
99 |
-
result, size=(output_height, output_width), mode="bilinear", align_corners=False
|
100 |
-
)[0]
|
101 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chomkwoy/Nilkessye/cpool_new/src/left_pool.cpp
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
// #include <torch/torch.h>
|
2 |
-
#include <torch/extension.h>
|
3 |
-
|
4 |
-
#include <vector>
|
5 |
-
|
6 |
-
std::vector<torch::Tensor> pool_forward(
|
7 |
-
torch::Tensor input
|
8 |
-
) {
|
9 |
-
// Initialize output
|
10 |
-
torch::Tensor output = torch::zeros_like(input);
|
11 |
-
|
12 |
-
// Get width
|
13 |
-
int64_t width = input.size(3);
|
14 |
-
|
15 |
-
// Copy the last column
|
16 |
-
torch::Tensor input_temp = input.select(3, width - 1);
|
17 |
-
torch::Tensor output_temp = output.select(3, width - 1);
|
18 |
-
output_temp.copy_(input_temp);
|
19 |
-
|
20 |
-
torch::Tensor max_temp;
|
21 |
-
for (int64_t ind = 1; ind < width; ++ind) {
|
22 |
-
input_temp = input.select(3, width - ind - 1);
|
23 |
-
output_temp = output.select(3, width - ind);
|
24 |
-
max_temp = output.select(3, width - ind - 1);
|
25 |
-
|
26 |
-
torch::max_out(max_temp, input_temp, output_temp);
|
27 |
-
}
|
28 |
-
|
29 |
-
return {
|
30 |
-
output
|
31 |
-
};
|
32 |
-
}
|
33 |
-
|
34 |
-
std::vector<torch::Tensor> pool_backward(
|
35 |
-
torch::Tensor input,
|
36 |
-
torch::Tensor grad_output
|
37 |
-
) {
|
38 |
-
auto output = torch::zeros_like(input);
|
39 |
-
|
40 |
-
int32_t batch = input.size(0);
|
41 |
-
int32_t channel = input.size(1);
|
42 |
-
int32_t height = input.size(2);
|
43 |
-
int32_t width = input.size(3);
|
44 |
-
|
45 |
-
// auto max_val = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, height});
|
46 |
-
// auto max_ind = torch::zeros(torch::CUDA(torch::kLong), {batch, channel, height});
|
47 |
-
auto max_val = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
|
48 |
-
auto max_ind = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA));
|
49 |
-
|
50 |
-
auto input_temp = input.select(3, width - 1);
|
51 |
-
max_val.copy_(input_temp);
|
52 |
-
|
53 |
-
max_ind.fill_(width - 1);
|
54 |
-
|
55 |
-
auto output_temp = output.select(3, width - 1);
|
56 |
-
auto grad_output_temp = grad_output.select(3, width - 1);
|
57 |
-
output_temp.copy_(grad_output_temp);
|
58 |
-
|
59 |
-
auto un_max_ind = max_ind.unsqueeze(3);
|
60 |
-
// auto gt_mask = torch::zeros(torch::CUDA(torch::kByte), {batch, channel, height});
|
61 |
-
// auto max_temp = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, height});
|
62 |
-
auto gt_mask = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA));
|
63 |
-
auto max_temp = torch::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
|
64 |
-
|
65 |
-
for (int32_t ind = 1; ind < width; ++ind) {
|
66 |
-
input_temp = input.select(3, width - ind - 1);
|
67 |
-
torch::gt_out(gt_mask, input_temp, max_val);
|
68 |
-
|
69 |
-
torch::masked_select_out(max_temp, input_temp, gt_mask);
|
70 |
-
max_val.masked_scatter_(gt_mask, max_temp);
|
71 |
-
max_ind.masked_fill_(gt_mask, width - ind - 1);
|
72 |
-
|
73 |
-
grad_output_temp = grad_output.select(3, width - ind - 1).unsqueeze(3);
|
74 |
-
output.scatter_add_(3, un_max_ind, grad_output_temp);
|
75 |
-
}
|
76 |
-
|
77 |
-
return {
|
78 |
-
output
|
79 |
-
};
|
80 |
-
}
|
81 |
-
|
82 |
-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
83 |
-
m.def(
|
84 |
-
"forward", &pool_forward, "Left Pool Forward",
|
85 |
-
py::call_guard<py::gil_scoped_release>()
|
86 |
-
);
|
87 |
-
m.def(
|
88 |
-
"backward", &pool_backward, "Left Pool Backward",
|
89 |
-
py::call_guard<py::gil_scoped_release>()
|
90 |
-
);
|
91 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/g4f/Provider/Providers/Dfehub.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import requests
|
3 |
-
from ...typing import sha256, Dict, get_type_hints
|
4 |
-
|
5 |
-
url = "https://chat.dfehub.com"
|
6 |
-
model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4']
|
7 |
-
supports_stream = True
|
8 |
-
needs_auth = False
|
9 |
-
|
10 |
-
|
11 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
-
headers = {
|
13 |
-
'Authority': 'chat.dfehub.com',
|
14 |
-
'Content-Type': 'application/json',
|
15 |
-
'Method': 'POST',
|
16 |
-
'Path': '/api/openai/v1/chat/completions',
|
17 |
-
'Scheme': 'https',
|
18 |
-
'Accept': 'text/event-stream',
|
19 |
-
'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5',
|
20 |
-
'Content-Type': 'application/json',
|
21 |
-
'Origin': 'https://chat.dfehub.com',
|
22 |
-
'Referer': 'https://chat.dfehub.com/',
|
23 |
-
'Sec-Ch-Ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
24 |
-
'Sec-Ch-Ua-Mobile': '?0',
|
25 |
-
'Sec-Ch-Ua-Platform': '"Windows"',
|
26 |
-
'Sec-Fetch-Dest': 'empty',
|
27 |
-
'Sec-Fetch-Mode': 'cors',
|
28 |
-
'Sec-Fetch-Site': 'same-origin',
|
29 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
30 |
-
'X-Requested-With': 'XMLHttpRequest',
|
31 |
-
}
|
32 |
-
|
33 |
-
data = {
|
34 |
-
'model': model,
|
35 |
-
'temperature': 0.7,
|
36 |
-
'max_tokens': '8000',
|
37 |
-
'presence_penalty': 0,
|
38 |
-
'messages': messages,
|
39 |
-
}
|
40 |
-
|
41 |
-
response = requests.post(url + '/api/openai/v1/chat/completions',
|
42 |
-
headers=headers, json=data, stream=stream)
|
43 |
-
|
44 |
-
yield response.json()['choices'][0]['message']['content']
|
45 |
-
|
46 |
-
|
47 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
48 |
-
'(%s)' % ', '.join(
|
49 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/__init__.py
DELETED
File without changes
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_routedef.py
DELETED
@@ -1,216 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
import os # noqa
|
3 |
-
from typing import (
|
4 |
-
TYPE_CHECKING,
|
5 |
-
Any,
|
6 |
-
Callable,
|
7 |
-
Dict,
|
8 |
-
Iterator,
|
9 |
-
List,
|
10 |
-
Optional,
|
11 |
-
Sequence,
|
12 |
-
Type,
|
13 |
-
Union,
|
14 |
-
overload,
|
15 |
-
)
|
16 |
-
|
17 |
-
import attr
|
18 |
-
|
19 |
-
from . import hdrs
|
20 |
-
from .abc import AbstractView
|
21 |
-
from .typedefs import Handler, PathLike
|
22 |
-
|
23 |
-
if TYPE_CHECKING: # pragma: no cover
|
24 |
-
from .web_request import Request
|
25 |
-
from .web_response import StreamResponse
|
26 |
-
from .web_urldispatcher import AbstractRoute, UrlDispatcher
|
27 |
-
else:
|
28 |
-
Request = StreamResponse = UrlDispatcher = AbstractRoute = None
|
29 |
-
|
30 |
-
|
31 |
-
__all__ = (
|
32 |
-
"AbstractRouteDef",
|
33 |
-
"RouteDef",
|
34 |
-
"StaticDef",
|
35 |
-
"RouteTableDef",
|
36 |
-
"head",
|
37 |
-
"options",
|
38 |
-
"get",
|
39 |
-
"post",
|
40 |
-
"patch",
|
41 |
-
"put",
|
42 |
-
"delete",
|
43 |
-
"route",
|
44 |
-
"view",
|
45 |
-
"static",
|
46 |
-
)
|
47 |
-
|
48 |
-
|
49 |
-
class AbstractRouteDef(abc.ABC):
|
50 |
-
@abc.abstractmethod
|
51 |
-
def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
|
52 |
-
pass # pragma: no cover
|
53 |
-
|
54 |
-
|
55 |
-
_HandlerType = Union[Type[AbstractView], Handler]
|
56 |
-
|
57 |
-
|
58 |
-
@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True)
|
59 |
-
class RouteDef(AbstractRouteDef):
|
60 |
-
method: str
|
61 |
-
path: str
|
62 |
-
handler: _HandlerType
|
63 |
-
kwargs: Dict[str, Any]
|
64 |
-
|
65 |
-
def __repr__(self) -> str:
|
66 |
-
info = []
|
67 |
-
for name, value in sorted(self.kwargs.items()):
|
68 |
-
info.append(f", {name}={value!r}")
|
69 |
-
return "<RouteDef {method} {path} -> {handler.__name__!r}" "{info}>".format(
|
70 |
-
method=self.method, path=self.path, handler=self.handler, info="".join(info)
|
71 |
-
)
|
72 |
-
|
73 |
-
def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
|
74 |
-
if self.method in hdrs.METH_ALL:
|
75 |
-
reg = getattr(router, "add_" + self.method.lower())
|
76 |
-
return [reg(self.path, self.handler, **self.kwargs)]
|
77 |
-
else:
|
78 |
-
return [
|
79 |
-
router.add_route(self.method, self.path, self.handler, **self.kwargs)
|
80 |
-
]
|
81 |
-
|
82 |
-
|
83 |
-
@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True)
|
84 |
-
class StaticDef(AbstractRouteDef):
|
85 |
-
prefix: str
|
86 |
-
path: PathLike
|
87 |
-
kwargs: Dict[str, Any]
|
88 |
-
|
89 |
-
def __repr__(self) -> str:
|
90 |
-
info = []
|
91 |
-
for name, value in sorted(self.kwargs.items()):
|
92 |
-
info.append(f", {name}={value!r}")
|
93 |
-
return "<StaticDef {prefix} -> {path}" "{info}>".format(
|
94 |
-
prefix=self.prefix, path=self.path, info="".join(info)
|
95 |
-
)
|
96 |
-
|
97 |
-
def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
|
98 |
-
resource = router.add_static(self.prefix, self.path, **self.kwargs)
|
99 |
-
routes = resource.get_info().get("routes", {})
|
100 |
-
return list(routes.values())
|
101 |
-
|
102 |
-
|
103 |
-
def route(method: str, path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
|
104 |
-
return RouteDef(method, path, handler, kwargs)
|
105 |
-
|
106 |
-
|
107 |
-
def head(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
|
108 |
-
return route(hdrs.METH_HEAD, path, handler, **kwargs)
|
109 |
-
|
110 |
-
|
111 |
-
def options(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
|
112 |
-
return route(hdrs.METH_OPTIONS, path, handler, **kwargs)
|
113 |
-
|
114 |
-
|
115 |
-
def get(
|
116 |
-
path: str,
|
117 |
-
handler: _HandlerType,
|
118 |
-
*,
|
119 |
-
name: Optional[str] = None,
|
120 |
-
allow_head: bool = True,
|
121 |
-
**kwargs: Any,
|
122 |
-
) -> RouteDef:
|
123 |
-
return route(
|
124 |
-
hdrs.METH_GET, path, handler, name=name, allow_head=allow_head, **kwargs
|
125 |
-
)
|
126 |
-
|
127 |
-
|
128 |
-
def post(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
|
129 |
-
return route(hdrs.METH_POST, path, handler, **kwargs)
|
130 |
-
|
131 |
-
|
132 |
-
def put(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
|
133 |
-
return route(hdrs.METH_PUT, path, handler, **kwargs)
|
134 |
-
|
135 |
-
|
136 |
-
def patch(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
|
137 |
-
return route(hdrs.METH_PATCH, path, handler, **kwargs)
|
138 |
-
|
139 |
-
|
140 |
-
def delete(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
|
141 |
-
return route(hdrs.METH_DELETE, path, handler, **kwargs)
|
142 |
-
|
143 |
-
|
144 |
-
def view(path: str, handler: Type[AbstractView], **kwargs: Any) -> RouteDef:
|
145 |
-
return route(hdrs.METH_ANY, path, handler, **kwargs)
|
146 |
-
|
147 |
-
|
148 |
-
def static(prefix: str, path: PathLike, **kwargs: Any) -> StaticDef:
|
149 |
-
return StaticDef(prefix, path, kwargs)
|
150 |
-
|
151 |
-
|
152 |
-
_Deco = Callable[[_HandlerType], _HandlerType]
|
153 |
-
|
154 |
-
|
155 |
-
class RouteTableDef(Sequence[AbstractRouteDef]):
|
156 |
-
"""Route definition table"""
|
157 |
-
|
158 |
-
def __init__(self) -> None:
|
159 |
-
self._items: List[AbstractRouteDef] = []
|
160 |
-
|
161 |
-
def __repr__(self) -> str:
|
162 |
-
return f"<RouteTableDef count={len(self._items)}>"
|
163 |
-
|
164 |
-
@overload
|
165 |
-
def __getitem__(self, index: int) -> AbstractRouteDef:
|
166 |
-
...
|
167 |
-
|
168 |
-
@overload
|
169 |
-
def __getitem__(self, index: slice) -> List[AbstractRouteDef]:
|
170 |
-
...
|
171 |
-
|
172 |
-
def __getitem__(self, index): # type: ignore[no-untyped-def]
|
173 |
-
return self._items[index]
|
174 |
-
|
175 |
-
def __iter__(self) -> Iterator[AbstractRouteDef]:
|
176 |
-
return iter(self._items)
|
177 |
-
|
178 |
-
def __len__(self) -> int:
|
179 |
-
return len(self._items)
|
180 |
-
|
181 |
-
def __contains__(self, item: object) -> bool:
|
182 |
-
return item in self._items
|
183 |
-
|
184 |
-
def route(self, method: str, path: str, **kwargs: Any) -> _Deco:
|
185 |
-
def inner(handler: _HandlerType) -> _HandlerType:
|
186 |
-
self._items.append(RouteDef(method, path, handler, kwargs))
|
187 |
-
return handler
|
188 |
-
|
189 |
-
return inner
|
190 |
-
|
191 |
-
def head(self, path: str, **kwargs: Any) -> _Deco:
|
192 |
-
return self.route(hdrs.METH_HEAD, path, **kwargs)
|
193 |
-
|
194 |
-
def get(self, path: str, **kwargs: Any) -> _Deco:
|
195 |
-
return self.route(hdrs.METH_GET, path, **kwargs)
|
196 |
-
|
197 |
-
def post(self, path: str, **kwargs: Any) -> _Deco:
|
198 |
-
return self.route(hdrs.METH_POST, path, **kwargs)
|
199 |
-
|
200 |
-
def put(self, path: str, **kwargs: Any) -> _Deco:
|
201 |
-
return self.route(hdrs.METH_PUT, path, **kwargs)
|
202 |
-
|
203 |
-
def patch(self, path: str, **kwargs: Any) -> _Deco:
|
204 |
-
return self.route(hdrs.METH_PATCH, path, **kwargs)
|
205 |
-
|
206 |
-
def delete(self, path: str, **kwargs: Any) -> _Deco:
|
207 |
-
return self.route(hdrs.METH_DELETE, path, **kwargs)
|
208 |
-
|
209 |
-
def options(self, path: str, **kwargs: Any) -> _Deco:
|
210 |
-
return self.route(hdrs.METH_OPTIONS, path, **kwargs)
|
211 |
-
|
212 |
-
def view(self, path: str, **kwargs: Any) -> _Deco:
|
213 |
-
return self.route(hdrs.METH_ANY, path, **kwargs)
|
214 |
-
|
215 |
-
def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None:
|
216 |
-
self._items.append(StaticDef(prefix, path, kwargs))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_compat.py
DELETED
@@ -1,623 +0,0 @@
|
|
1 |
-
import codecs
|
2 |
-
import io
|
3 |
-
import os
|
4 |
-
import re
|
5 |
-
import sys
|
6 |
-
import typing as t
|
7 |
-
from weakref import WeakKeyDictionary
|
8 |
-
|
9 |
-
CYGWIN = sys.platform.startswith("cygwin")
|
10 |
-
WIN = sys.platform.startswith("win")
|
11 |
-
auto_wrap_for_ansi: t.Optional[t.Callable[[t.TextIO], t.TextIO]] = None
|
12 |
-
_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]")
|
13 |
-
|
14 |
-
|
15 |
-
def _make_text_stream(
|
16 |
-
stream: t.BinaryIO,
|
17 |
-
encoding: t.Optional[str],
|
18 |
-
errors: t.Optional[str],
|
19 |
-
force_readable: bool = False,
|
20 |
-
force_writable: bool = False,
|
21 |
-
) -> t.TextIO:
|
22 |
-
if encoding is None:
|
23 |
-
encoding = get_best_encoding(stream)
|
24 |
-
if errors is None:
|
25 |
-
errors = "replace"
|
26 |
-
return _NonClosingTextIOWrapper(
|
27 |
-
stream,
|
28 |
-
encoding,
|
29 |
-
errors,
|
30 |
-
line_buffering=True,
|
31 |
-
force_readable=force_readable,
|
32 |
-
force_writable=force_writable,
|
33 |
-
)
|
34 |
-
|
35 |
-
|
36 |
-
def is_ascii_encoding(encoding: str) -> bool:
|
37 |
-
"""Checks if a given encoding is ascii."""
|
38 |
-
try:
|
39 |
-
return codecs.lookup(encoding).name == "ascii"
|
40 |
-
except LookupError:
|
41 |
-
return False
|
42 |
-
|
43 |
-
|
44 |
-
def get_best_encoding(stream: t.IO[t.Any]) -> str:
|
45 |
-
"""Returns the default stream encoding if not found."""
|
46 |
-
rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
|
47 |
-
if is_ascii_encoding(rv):
|
48 |
-
return "utf-8"
|
49 |
-
return rv
|
50 |
-
|
51 |
-
|
52 |
-
class _NonClosingTextIOWrapper(io.TextIOWrapper):
|
53 |
-
def __init__(
|
54 |
-
self,
|
55 |
-
stream: t.BinaryIO,
|
56 |
-
encoding: t.Optional[str],
|
57 |
-
errors: t.Optional[str],
|
58 |
-
force_readable: bool = False,
|
59 |
-
force_writable: bool = False,
|
60 |
-
**extra: t.Any,
|
61 |
-
) -> None:
|
62 |
-
self._stream = stream = t.cast(
|
63 |
-
t.BinaryIO, _FixupStream(stream, force_readable, force_writable)
|
64 |
-
)
|
65 |
-
super().__init__(stream, encoding, errors, **extra)
|
66 |
-
|
67 |
-
def __del__(self) -> None:
|
68 |
-
try:
|
69 |
-
self.detach()
|
70 |
-
except Exception:
|
71 |
-
pass
|
72 |
-
|
73 |
-
def isatty(self) -> bool:
|
74 |
-
# https://bitbucket.org/pypy/pypy/issue/1803
|
75 |
-
return self._stream.isatty()
|
76 |
-
|
77 |
-
|
78 |
-
class _FixupStream:
|
79 |
-
"""The new io interface needs more from streams than streams
|
80 |
-
traditionally implement. As such, this fix-up code is necessary in
|
81 |
-
some circumstances.
|
82 |
-
|
83 |
-
The forcing of readable and writable flags are there because some tools
|
84 |
-
put badly patched objects on sys (one such offender are certain version
|
85 |
-
of jupyter notebook).
|
86 |
-
"""
|
87 |
-
|
88 |
-
def __init__(
|
89 |
-
self,
|
90 |
-
stream: t.BinaryIO,
|
91 |
-
force_readable: bool = False,
|
92 |
-
force_writable: bool = False,
|
93 |
-
):
|
94 |
-
self._stream = stream
|
95 |
-
self._force_readable = force_readable
|
96 |
-
self._force_writable = force_writable
|
97 |
-
|
98 |
-
def __getattr__(self, name: str) -> t.Any:
|
99 |
-
return getattr(self._stream, name)
|
100 |
-
|
101 |
-
def read1(self, size: int) -> bytes:
|
102 |
-
f = getattr(self._stream, "read1", None)
|
103 |
-
|
104 |
-
if f is not None:
|
105 |
-
return t.cast(bytes, f(size))
|
106 |
-
|
107 |
-
return self._stream.read(size)
|
108 |
-
|
109 |
-
def readable(self) -> bool:
|
110 |
-
if self._force_readable:
|
111 |
-
return True
|
112 |
-
x = getattr(self._stream, "readable", None)
|
113 |
-
if x is not None:
|
114 |
-
return t.cast(bool, x())
|
115 |
-
try:
|
116 |
-
self._stream.read(0)
|
117 |
-
except Exception:
|
118 |
-
return False
|
119 |
-
return True
|
120 |
-
|
121 |
-
def writable(self) -> bool:
|
122 |
-
if self._force_writable:
|
123 |
-
return True
|
124 |
-
x = getattr(self._stream, "writable", None)
|
125 |
-
if x is not None:
|
126 |
-
return t.cast(bool, x())
|
127 |
-
try:
|
128 |
-
self._stream.write("") # type: ignore
|
129 |
-
except Exception:
|
130 |
-
try:
|
131 |
-
self._stream.write(b"")
|
132 |
-
except Exception:
|
133 |
-
return False
|
134 |
-
return True
|
135 |
-
|
136 |
-
def seekable(self) -> bool:
|
137 |
-
x = getattr(self._stream, "seekable", None)
|
138 |
-
if x is not None:
|
139 |
-
return t.cast(bool, x())
|
140 |
-
try:
|
141 |
-
self._stream.seek(self._stream.tell())
|
142 |
-
except Exception:
|
143 |
-
return False
|
144 |
-
return True
|
145 |
-
|
146 |
-
|
147 |
-
def _is_binary_reader(stream: t.IO[t.Any], default: bool = False) -> bool:
|
148 |
-
try:
|
149 |
-
return isinstance(stream.read(0), bytes)
|
150 |
-
except Exception:
|
151 |
-
return default
|
152 |
-
# This happens in some cases where the stream was already
|
153 |
-
# closed. In this case, we assume the default.
|
154 |
-
|
155 |
-
|
156 |
-
def _is_binary_writer(stream: t.IO[t.Any], default: bool = False) -> bool:
|
157 |
-
try:
|
158 |
-
stream.write(b"")
|
159 |
-
except Exception:
|
160 |
-
try:
|
161 |
-
stream.write("")
|
162 |
-
return False
|
163 |
-
except Exception:
|
164 |
-
pass
|
165 |
-
return default
|
166 |
-
return True
|
167 |
-
|
168 |
-
|
169 |
-
def _find_binary_reader(stream: t.IO[t.Any]) -> t.Optional[t.BinaryIO]:
|
170 |
-
# We need to figure out if the given stream is already binary.
|
171 |
-
# This can happen because the official docs recommend detaching
|
172 |
-
# the streams to get binary streams. Some code might do this, so
|
173 |
-
# we need to deal with this case explicitly.
|
174 |
-
if _is_binary_reader(stream, False):
|
175 |
-
return t.cast(t.BinaryIO, stream)
|
176 |
-
|
177 |
-
buf = getattr(stream, "buffer", None)
|
178 |
-
|
179 |
-
# Same situation here; this time we assume that the buffer is
|
180 |
-
# actually binary in case it's closed.
|
181 |
-
if buf is not None and _is_binary_reader(buf, True):
|
182 |
-
return t.cast(t.BinaryIO, buf)
|
183 |
-
|
184 |
-
return None
|
185 |
-
|
186 |
-
|
187 |
-
def _find_binary_writer(stream: t.IO[t.Any]) -> t.Optional[t.BinaryIO]:
|
188 |
-
# We need to figure out if the given stream is already binary.
|
189 |
-
# This can happen because the official docs recommend detaching
|
190 |
-
# the streams to get binary streams. Some code might do this, so
|
191 |
-
# we need to deal with this case explicitly.
|
192 |
-
if _is_binary_writer(stream, False):
|
193 |
-
return t.cast(t.BinaryIO, stream)
|
194 |
-
|
195 |
-
buf = getattr(stream, "buffer", None)
|
196 |
-
|
197 |
-
# Same situation here; this time we assume that the buffer is
|
198 |
-
# actually binary in case it's closed.
|
199 |
-
if buf is not None and _is_binary_writer(buf, True):
|
200 |
-
return t.cast(t.BinaryIO, buf)
|
201 |
-
|
202 |
-
return None
|
203 |
-
|
204 |
-
|
205 |
-
def _stream_is_misconfigured(stream: t.TextIO) -> bool:
|
206 |
-
"""A stream is misconfigured if its encoding is ASCII."""
|
207 |
-
# If the stream does not have an encoding set, we assume it's set
|
208 |
-
# to ASCII. This appears to happen in certain unittest
|
209 |
-
# environments. It's not quite clear what the correct behavior is
|
210 |
-
# but this at least will force Click to recover somehow.
|
211 |
-
return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
|
212 |
-
|
213 |
-
|
214 |
-
def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: t.Optional[str]) -> bool:
|
215 |
-
"""A stream attribute is compatible if it is equal to the
|
216 |
-
desired value or the desired value is unset and the attribute
|
217 |
-
has a value.
|
218 |
-
"""
|
219 |
-
stream_value = getattr(stream, attr, None)
|
220 |
-
return stream_value == value or (value is None and stream_value is not None)
|
221 |
-
|
222 |
-
|
223 |
-
def _is_compatible_text_stream(
|
224 |
-
stream: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
|
225 |
-
) -> bool:
|
226 |
-
"""Check if a stream's encoding and errors attributes are
|
227 |
-
compatible with the desired values.
|
228 |
-
"""
|
229 |
-
return _is_compat_stream_attr(
|
230 |
-
stream, "encoding", encoding
|
231 |
-
) and _is_compat_stream_attr(stream, "errors", errors)
|
232 |
-
|
233 |
-
|
234 |
-
def _force_correct_text_stream(
|
235 |
-
text_stream: t.IO[t.Any],
|
236 |
-
encoding: t.Optional[str],
|
237 |
-
errors: t.Optional[str],
|
238 |
-
is_binary: t.Callable[[t.IO[t.Any], bool], bool],
|
239 |
-
find_binary: t.Callable[[t.IO[t.Any]], t.Optional[t.BinaryIO]],
|
240 |
-
force_readable: bool = False,
|
241 |
-
force_writable: bool = False,
|
242 |
-
) -> t.TextIO:
|
243 |
-
if is_binary(text_stream, False):
|
244 |
-
binary_reader = t.cast(t.BinaryIO, text_stream)
|
245 |
-
else:
|
246 |
-
text_stream = t.cast(t.TextIO, text_stream)
|
247 |
-
# If the stream looks compatible, and won't default to a
|
248 |
-
# misconfigured ascii encoding, return it as-is.
|
249 |
-
if _is_compatible_text_stream(text_stream, encoding, errors) and not (
|
250 |
-
encoding is None and _stream_is_misconfigured(text_stream)
|
251 |
-
):
|
252 |
-
return text_stream
|
253 |
-
|
254 |
-
# Otherwise, get the underlying binary reader.
|
255 |
-
possible_binary_reader = find_binary(text_stream)
|
256 |
-
|
257 |
-
# If that's not possible, silently use the original reader
|
258 |
-
# and get mojibake instead of exceptions.
|
259 |
-
if possible_binary_reader is None:
|
260 |
-
return text_stream
|
261 |
-
|
262 |
-
binary_reader = possible_binary_reader
|
263 |
-
|
264 |
-
# Default errors to replace instead of strict in order to get
|
265 |
-
# something that works.
|
266 |
-
if errors is None:
|
267 |
-
errors = "replace"
|
268 |
-
|
269 |
-
# Wrap the binary stream in a text stream with the correct
|
270 |
-
# encoding parameters.
|
271 |
-
return _make_text_stream(
|
272 |
-
binary_reader,
|
273 |
-
encoding,
|
274 |
-
errors,
|
275 |
-
force_readable=force_readable,
|
276 |
-
force_writable=force_writable,
|
277 |
-
)
|
278 |
-
|
279 |
-
|
280 |
-
def _force_correct_text_reader(
|
281 |
-
text_reader: t.IO[t.Any],
|
282 |
-
encoding: t.Optional[str],
|
283 |
-
errors: t.Optional[str],
|
284 |
-
force_readable: bool = False,
|
285 |
-
) -> t.TextIO:
|
286 |
-
return _force_correct_text_stream(
|
287 |
-
text_reader,
|
288 |
-
encoding,
|
289 |
-
errors,
|
290 |
-
_is_binary_reader,
|
291 |
-
_find_binary_reader,
|
292 |
-
force_readable=force_readable,
|
293 |
-
)
|
294 |
-
|
295 |
-
|
296 |
-
def _force_correct_text_writer(
|
297 |
-
text_writer: t.IO[t.Any],
|
298 |
-
encoding: t.Optional[str],
|
299 |
-
errors: t.Optional[str],
|
300 |
-
force_writable: bool = False,
|
301 |
-
) -> t.TextIO:
|
302 |
-
return _force_correct_text_stream(
|
303 |
-
text_writer,
|
304 |
-
encoding,
|
305 |
-
errors,
|
306 |
-
_is_binary_writer,
|
307 |
-
_find_binary_writer,
|
308 |
-
force_writable=force_writable,
|
309 |
-
)
|
310 |
-
|
311 |
-
|
312 |
-
def get_binary_stdin() -> t.BinaryIO:
|
313 |
-
reader = _find_binary_reader(sys.stdin)
|
314 |
-
if reader is None:
|
315 |
-
raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
|
316 |
-
return reader
|
317 |
-
|
318 |
-
|
319 |
-
def get_binary_stdout() -> t.BinaryIO:
|
320 |
-
writer = _find_binary_writer(sys.stdout)
|
321 |
-
if writer is None:
|
322 |
-
raise RuntimeError("Was not able to determine binary stream for sys.stdout.")
|
323 |
-
return writer
|
324 |
-
|
325 |
-
|
326 |
-
def get_binary_stderr() -> t.BinaryIO:
|
327 |
-
writer = _find_binary_writer(sys.stderr)
|
328 |
-
if writer is None:
|
329 |
-
raise RuntimeError("Was not able to determine binary stream for sys.stderr.")
|
330 |
-
return writer
|
331 |
-
|
332 |
-
|
333 |
-
def get_text_stdin(
|
334 |
-
encoding: t.Optional[str] = None, errors: t.Optional[str] = None
|
335 |
-
) -> t.TextIO:
|
336 |
-
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
|
337 |
-
if rv is not None:
|
338 |
-
return rv
|
339 |
-
return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True)
|
340 |
-
|
341 |
-
|
342 |
-
def get_text_stdout(
|
343 |
-
encoding: t.Optional[str] = None, errors: t.Optional[str] = None
|
344 |
-
) -> t.TextIO:
|
345 |
-
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
|
346 |
-
if rv is not None:
|
347 |
-
return rv
|
348 |
-
return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True)
|
349 |
-
|
350 |
-
|
351 |
-
def get_text_stderr(
|
352 |
-
encoding: t.Optional[str] = None, errors: t.Optional[str] = None
|
353 |
-
) -> t.TextIO:
|
354 |
-
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
|
355 |
-
if rv is not None:
|
356 |
-
return rv
|
357 |
-
return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True)
|
358 |
-
|
359 |
-
|
360 |
-
def _wrap_io_open(
|
361 |
-
file: t.Union[str, "os.PathLike[str]", int],
|
362 |
-
mode: str,
|
363 |
-
encoding: t.Optional[str],
|
364 |
-
errors: t.Optional[str],
|
365 |
-
) -> t.IO[t.Any]:
|
366 |
-
"""Handles not passing ``encoding`` and ``errors`` in binary mode."""
|
367 |
-
if "b" in mode:
|
368 |
-
return open(file, mode)
|
369 |
-
|
370 |
-
return open(file, mode, encoding=encoding, errors=errors)
|
371 |
-
|
372 |
-
|
373 |
-
def open_stream(
|
374 |
-
filename: "t.Union[str, os.PathLike[str]]",
|
375 |
-
mode: str = "r",
|
376 |
-
encoding: t.Optional[str] = None,
|
377 |
-
errors: t.Optional[str] = "strict",
|
378 |
-
atomic: bool = False,
|
379 |
-
) -> t.Tuple[t.IO[t.Any], bool]:
|
380 |
-
binary = "b" in mode
|
381 |
-
filename = os.fspath(filename)
|
382 |
-
|
383 |
-
# Standard streams first. These are simple because they ignore the
|
384 |
-
# atomic flag. Use fsdecode to handle Path("-").
|
385 |
-
if os.fsdecode(filename) == "-":
|
386 |
-
if any(m in mode for m in ["w", "a", "x"]):
|
387 |
-
if binary:
|
388 |
-
return get_binary_stdout(), False
|
389 |
-
return get_text_stdout(encoding=encoding, errors=errors), False
|
390 |
-
if binary:
|
391 |
-
return get_binary_stdin(), False
|
392 |
-
return get_text_stdin(encoding=encoding, errors=errors), False
|
393 |
-
|
394 |
-
# Non-atomic writes directly go out through the regular open functions.
|
395 |
-
if not atomic:
|
396 |
-
return _wrap_io_open(filename, mode, encoding, errors), True
|
397 |
-
|
398 |
-
# Some usability stuff for atomic writes
|
399 |
-
if "a" in mode:
|
400 |
-
raise ValueError(
|
401 |
-
"Appending to an existing file is not supported, because that"
|
402 |
-
" would involve an expensive `copy`-operation to a temporary"
|
403 |
-
" file. Open the file in normal `w`-mode and copy explicitly"
|
404 |
-
" if that's what you're after."
|
405 |
-
)
|
406 |
-
if "x" in mode:
|
407 |
-
raise ValueError("Use the `overwrite`-parameter instead.")
|
408 |
-
if "w" not in mode:
|
409 |
-
raise ValueError("Atomic writes only make sense with `w`-mode.")
|
410 |
-
|
411 |
-
# Atomic writes are more complicated. They work by opening a file
|
412 |
-
# as a proxy in the same folder and then using the fdopen
|
413 |
-
# functionality to wrap it in a Python file. Then we wrap it in an
|
414 |
-
# atomic file that moves the file over on close.
|
415 |
-
import errno
|
416 |
-
import random
|
417 |
-
|
418 |
-
try:
|
419 |
-
perm: t.Optional[int] = os.stat(filename).st_mode
|
420 |
-
except OSError:
|
421 |
-
perm = None
|
422 |
-
|
423 |
-
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
|
424 |
-
|
425 |
-
if binary:
|
426 |
-
flags |= getattr(os, "O_BINARY", 0)
|
427 |
-
|
428 |
-
while True:
|
429 |
-
tmp_filename = os.path.join(
|
430 |
-
os.path.dirname(filename),
|
431 |
-
f".__atomic-write{random.randrange(1 << 32):08x}",
|
432 |
-
)
|
433 |
-
try:
|
434 |
-
fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
|
435 |
-
break
|
436 |
-
except OSError as e:
|
437 |
-
if e.errno == errno.EEXIST or (
|
438 |
-
os.name == "nt"
|
439 |
-
and e.errno == errno.EACCES
|
440 |
-
and os.path.isdir(e.filename)
|
441 |
-
and os.access(e.filename, os.W_OK)
|
442 |
-
):
|
443 |
-
continue
|
444 |
-
raise
|
445 |
-
|
446 |
-
if perm is not None:
|
447 |
-
os.chmod(tmp_filename, perm) # in case perm includes bits in umask
|
448 |
-
|
449 |
-
f = _wrap_io_open(fd, mode, encoding, errors)
|
450 |
-
af = _AtomicFile(f, tmp_filename, os.path.realpath(filename))
|
451 |
-
return t.cast(t.IO[t.Any], af), True
|
452 |
-
|
453 |
-
|
454 |
-
class _AtomicFile:
|
455 |
-
def __init__(self, f: t.IO[t.Any], tmp_filename: str, real_filename: str) -> None:
|
456 |
-
self._f = f
|
457 |
-
self._tmp_filename = tmp_filename
|
458 |
-
self._real_filename = real_filename
|
459 |
-
self.closed = False
|
460 |
-
|
461 |
-
@property
|
462 |
-
def name(self) -> str:
|
463 |
-
return self._real_filename
|
464 |
-
|
465 |
-
def close(self, delete: bool = False) -> None:
|
466 |
-
if self.closed:
|
467 |
-
return
|
468 |
-
self._f.close()
|
469 |
-
os.replace(self._tmp_filename, self._real_filename)
|
470 |
-
self.closed = True
|
471 |
-
|
472 |
-
def __getattr__(self, name: str) -> t.Any:
|
473 |
-
return getattr(self._f, name)
|
474 |
-
|
475 |
-
def __enter__(self) -> "_AtomicFile":
|
476 |
-
return self
|
477 |
-
|
478 |
-
def __exit__(self, exc_type: t.Optional[t.Type[BaseException]], *_: t.Any) -> None:
|
479 |
-
self.close(delete=exc_type is not None)
|
480 |
-
|
481 |
-
def __repr__(self) -> str:
|
482 |
-
return repr(self._f)
|
483 |
-
|
484 |
-
|
485 |
-
def strip_ansi(value: str) -> str:
|
486 |
-
return _ansi_re.sub("", value)
|
487 |
-
|
488 |
-
|
489 |
-
def _is_jupyter_kernel_output(stream: t.IO[t.Any]) -> bool:
|
490 |
-
while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
|
491 |
-
stream = stream._stream
|
492 |
-
|
493 |
-
return stream.__class__.__module__.startswith("ipykernel.")
|
494 |
-
|
495 |
-
|
496 |
-
def should_strip_ansi(
|
497 |
-
stream: t.Optional[t.IO[t.Any]] = None, color: t.Optional[bool] = None
|
498 |
-
) -> bool:
|
499 |
-
if color is None:
|
500 |
-
if stream is None:
|
501 |
-
stream = sys.stdin
|
502 |
-
return not isatty(stream) and not _is_jupyter_kernel_output(stream)
|
503 |
-
return not color
|
504 |
-
|
505 |
-
|
506 |
-
# On Windows, wrap the output streams with colorama to support ANSI
|
507 |
-
# color codes.
|
508 |
-
# NOTE: double check is needed so mypy does not analyze this on Linux
|
509 |
-
if sys.platform.startswith("win") and WIN:
|
510 |
-
from ._winconsole import _get_windows_console_stream
|
511 |
-
|
512 |
-
def _get_argv_encoding() -> str:
|
513 |
-
import locale
|
514 |
-
|
515 |
-
return locale.getpreferredencoding()
|
516 |
-
|
517 |
-
_ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
|
518 |
-
|
519 |
-
def auto_wrap_for_ansi(
|
520 |
-
stream: t.TextIO, color: t.Optional[bool] = None
|
521 |
-
) -> t.TextIO:
|
522 |
-
"""Support ANSI color and style codes on Windows by wrapping a
|
523 |
-
stream with colorama.
|
524 |
-
"""
|
525 |
-
try:
|
526 |
-
cached = _ansi_stream_wrappers.get(stream)
|
527 |
-
except Exception:
|
528 |
-
cached = None
|
529 |
-
|
530 |
-
if cached is not None:
|
531 |
-
return cached
|
532 |
-
|
533 |
-
import colorama
|
534 |
-
|
535 |
-
strip = should_strip_ansi(stream, color)
|
536 |
-
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
|
537 |
-
rv = t.cast(t.TextIO, ansi_wrapper.stream)
|
538 |
-
_write = rv.write
|
539 |
-
|
540 |
-
def _safe_write(s):
|
541 |
-
try:
|
542 |
-
return _write(s)
|
543 |
-
except BaseException:
|
544 |
-
ansi_wrapper.reset_all()
|
545 |
-
raise
|
546 |
-
|
547 |
-
rv.write = _safe_write
|
548 |
-
|
549 |
-
try:
|
550 |
-
_ansi_stream_wrappers[stream] = rv
|
551 |
-
except Exception:
|
552 |
-
pass
|
553 |
-
|
554 |
-
return rv
|
555 |
-
|
556 |
-
else:
|
557 |
-
|
558 |
-
def _get_argv_encoding() -> str:
|
559 |
-
return getattr(sys.stdin, "encoding", None) or sys.getfilesystemencoding()
|
560 |
-
|
561 |
-
def _get_windows_console_stream(
|
562 |
-
f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
|
563 |
-
) -> t.Optional[t.TextIO]:
|
564 |
-
return None
|
565 |
-
|
566 |
-
|
567 |
-
def term_len(x: str) -> int:
|
568 |
-
return len(strip_ansi(x))
|
569 |
-
|
570 |
-
|
571 |
-
def isatty(stream: t.IO[t.Any]) -> bool:
|
572 |
-
try:
|
573 |
-
return stream.isatty()
|
574 |
-
except Exception:
|
575 |
-
return False
|
576 |
-
|
577 |
-
|
578 |
-
def _make_cached_stream_func(
|
579 |
-
src_func: t.Callable[[], t.Optional[t.TextIO]],
|
580 |
-
wrapper_func: t.Callable[[], t.TextIO],
|
581 |
-
) -> t.Callable[[], t.Optional[t.TextIO]]:
|
582 |
-
cache: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
|
583 |
-
|
584 |
-
def func() -> t.Optional[t.TextIO]:
|
585 |
-
stream = src_func()
|
586 |
-
|
587 |
-
if stream is None:
|
588 |
-
return None
|
589 |
-
|
590 |
-
try:
|
591 |
-
rv = cache.get(stream)
|
592 |
-
except Exception:
|
593 |
-
rv = None
|
594 |
-
if rv is not None:
|
595 |
-
return rv
|
596 |
-
rv = wrapper_func()
|
597 |
-
try:
|
598 |
-
cache[stream] = rv
|
599 |
-
except Exception:
|
600 |
-
pass
|
601 |
-
return rv
|
602 |
-
|
603 |
-
return func
|
604 |
-
|
605 |
-
|
606 |
-
_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
|
607 |
-
_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
|
608 |
-
_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
|
609 |
-
|
610 |
-
|
611 |
-
binary_streams: t.Mapping[str, t.Callable[[], t.BinaryIO]] = {
|
612 |
-
"stdin": get_binary_stdin,
|
613 |
-
"stdout": get_binary_stdout,
|
614 |
-
"stderr": get_binary_stderr,
|
615 |
-
}
|
616 |
-
|
617 |
-
text_streams: t.Mapping[
|
618 |
-
str, t.Callable[[t.Optional[str], t.Optional[str]], t.TextIO]
|
619 |
-
] = {
|
620 |
-
"stdin": get_text_stdin,
|
621 |
-
"stdout": get_text_stdout,
|
622 |
-
"stderr": get_text_stderr,
|
623 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9da94804.css
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
div.svelte-1gww5xe{display:flex;position:absolute;justify-content:center;align-items:center;border-radius:var(--radius-sm);background-color:#000c;padding:var(--size-1) .4rem;color:#fff;font-size:var(--text-sm)}span.svelte-1gww5xe{display:inline-block;margin-right:var(--size-1);border-radius:var(--radius-xs);width:var(--size-3);height:var(--size-3)}.wrap.svelte-1mjxput{margin-top:var(--size-3)}.legend.svelte-1mjxput{display:flex;justify-content:center;align-items:center;color:var(--body-text-color)}.legend-item.svelte-1mjxput{display:flex;align-items:center;gap:var(--spacing-sm);margin-right:var(--size-2);margin-left:var(--size-2)}.legend-box.svelte-1mjxput{display:inline-block;border-radius:var(--radius-xs);width:var(--size-3);height:var(--size-3)}svg.svelte-1mjxput{width:var(--size-full)}.label-text.svelte-1mjxput{fill:var(--body-text-color);font-size:var(--text-sm);font-family:var(--font-mono)}.main-label.svelte-1mjxput{display:flex;justify-content:center;align-items:center;color:var(--body-text-color)}.chart.svelte-etmurc{display:flex;display:relative;justify-content:center;align-items:center;background:var(--background-fill-primary);width:var(--size-full);height:var(--size-64)}
|
|
|
|
spaces/DYSHITELGOOGLA/app/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: App
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: green
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.25.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/speech/brian.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
""" Brian speech module for autogpt """
|
2 |
-
import os
|
3 |
-
|
4 |
-
import requests
|
5 |
-
from playsound import playsound
|
6 |
-
|
7 |
-
from autogpt.speech.base import VoiceBase
|
8 |
-
|
9 |
-
|
10 |
-
class BrianSpeech(VoiceBase):
|
11 |
-
"""Brian speech module for autogpt"""
|
12 |
-
|
13 |
-
def _setup(self) -> None:
|
14 |
-
"""Setup the voices, API key, etc."""
|
15 |
-
pass
|
16 |
-
|
17 |
-
def _speech(self, text: str, _: int = 0) -> bool:
|
18 |
-
"""Speak text using Brian with the streamelements API
|
19 |
-
|
20 |
-
Args:
|
21 |
-
text (str): The text to speak
|
22 |
-
|
23 |
-
Returns:
|
24 |
-
bool: True if the request was successful, False otherwise
|
25 |
-
"""
|
26 |
-
tts_url = (
|
27 |
-
f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
|
28 |
-
)
|
29 |
-
response = requests.get(tts_url)
|
30 |
-
|
31 |
-
if response.status_code == 200:
|
32 |
-
with open("speech.mp3", "wb") as f:
|
33 |
-
f.write(response.content)
|
34 |
-
playsound("speech.mp3")
|
35 |
-
os.remove("speech.mp3")
|
36 |
-
return True
|
37 |
-
else:
|
38 |
-
print("Request failed with status code:", response.status_code)
|
39 |
-
print("Response content:", response.content)
|
40 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaweiZ/toy-gpt/app.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import chainlit as cl
|
3 |
-
from langchain.llms import OpenAI
|
4 |
-
|
5 |
-
# The OPENAI_API_KEY is a secret in huggingface settings.
|
6 |
-
# this is the way to retrieve it in runtime
|
7 |
-
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
8 |
-
|
9 |
-
# when the user starts a chat, this will be called
|
10 |
-
@cl.on_chat_start
|
11 |
-
async def start():
|
12 |
-
# Your logic will be here
|
13 |
-
# content = "The function start() is called when the user starts a chat because of the decorator @cl.on_chat_start"
|
14 |
-
# await cl.Message(content=content).send()
|
15 |
-
|
16 |
-
# ask the user for their OpenAI API key
|
17 |
-
# OPENAI_API_KEY = await cl.AskUserMessage(
|
18 |
-
# content="Please enter your OpenAI API key", timeout=100
|
19 |
-
# ).send()['content']
|
20 |
-
|
21 |
-
# Chainlit will automatically load environment variables from a .env file in the root of the project
|
22 |
-
# so you can just get the API key using cl.user_session.get("OPENAI_API_KEY")
|
23 |
-
# OPENAI_API_KEY = cl.user_session.get("OPENAI_API_KEY")
|
24 |
-
|
25 |
-
|
26 |
-
# define the model and save it as an environment variable so that it can be used later
|
27 |
-
llm = OpenAI(
|
28 |
-
model_name="gpt-3.5-turbo",
|
29 |
-
temperature=0,
|
30 |
-
max_tokens=2000,
|
31 |
-
openai_api_key=OPENAI_API_KEY,
|
32 |
-
)
|
33 |
-
cl.user_session.set(key="llm", value=llm)
|
34 |
-
|
35 |
-
|
36 |
-
# continously on a loop
|
37 |
-
# the @on_message decorator to tell Chainlit to run the main function each time a user sends a message. Then, we send back the answer to the UI with the Message class.
|
38 |
-
@cl.on_message
|
39 |
-
async def main(message: str):
|
40 |
-
# Your logic will be here
|
41 |
-
llm = cl.user_session.get("llm")
|
42 |
-
result = llm(message)
|
43 |
-
# send a response back to the user all the time
|
44 |
-
await cl.Message(content=f"The answer from gpt-3.5-turbo: \n{result}").send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dorado607/ChuanhuChatGPT/modules/models/azure.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
from langchain.chat_models import AzureChatOpenAI
|
2 |
-
import os
|
3 |
-
|
4 |
-
from .base_model import Base_Chat_Langchain_Client
|
5 |
-
|
6 |
-
# load_config_to_environ(["azure_openai_api_key", "azure_api_base_url", "azure_openai_api_version", "azure_deployment_name"])
|
7 |
-
|
8 |
-
class Azure_OpenAI_Client(Base_Chat_Langchain_Client):
|
9 |
-
def setup_model(self):
|
10 |
-
# inplement this to setup the model then return it
|
11 |
-
return AzureChatOpenAI(
|
12 |
-
openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"],
|
13 |
-
openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
|
14 |
-
deployment_name=os.environ["AZURE_DEPLOYMENT_NAME"],
|
15 |
-
openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
|
16 |
-
openai_api_type="azure",
|
17 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/yolox/core/launch.py
DELETED
@@ -1,219 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding:utf-8 -*-
|
3 |
-
# Code are based on
|
4 |
-
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py
|
5 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
6 |
-
# Copyright (c) Megvii, Inc. and its affiliates.
|
7 |
-
|
8 |
-
from loguru import logger
|
9 |
-
|
10 |
-
import torch
|
11 |
-
import torch.distributed as dist
|
12 |
-
import torch.multiprocessing as mp
|
13 |
-
|
14 |
-
import yolox.utils.dist as comm
|
15 |
-
from yolox.utils import configure_nccl
|
16 |
-
|
17 |
-
import os
|
18 |
-
import subprocess
|
19 |
-
import sys
|
20 |
-
import time
|
21 |
-
|
22 |
-
__all__ = ["launch"]
|
23 |
-
|
24 |
-
|
25 |
-
def _find_free_port():
|
26 |
-
"""
|
27 |
-
Find an available port of current machine / node.
|
28 |
-
"""
|
29 |
-
import socket
|
30 |
-
|
31 |
-
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
32 |
-
# Binding to port 0 will cause the OS to find an available port for us
|
33 |
-
sock.bind(("", 0))
|
34 |
-
port = sock.getsockname()[1]
|
35 |
-
sock.close()
|
36 |
-
# NOTE: there is still a chance the port could be taken by other processes.
|
37 |
-
return port
|
38 |
-
|
39 |
-
|
40 |
-
def launch(
|
41 |
-
main_func,
|
42 |
-
num_gpus_per_machine,
|
43 |
-
num_machines=1,
|
44 |
-
machine_rank=0,
|
45 |
-
backend="nccl",
|
46 |
-
dist_url=None,
|
47 |
-
args=(),
|
48 |
-
):
|
49 |
-
"""
|
50 |
-
Args:
|
51 |
-
main_func: a function that will be called by `main_func(*args)`
|
52 |
-
num_machines (int): the total number of machines
|
53 |
-
machine_rank (int): the rank of this machine (one per machine)
|
54 |
-
dist_url (str): url to connect to for distributed training, including protocol
|
55 |
-
e.g. "tcp://127.0.0.1:8686".
|
56 |
-
Can be set to auto to automatically select a free port on localhost
|
57 |
-
args (tuple): arguments passed to main_func
|
58 |
-
"""
|
59 |
-
world_size = num_machines * num_gpus_per_machine
|
60 |
-
if world_size > 1:
|
61 |
-
if int(os.environ.get("WORLD_SIZE", "1")) > 1:
|
62 |
-
dist_url = "{}:{}".format(
|
63 |
-
os.environ.get("MASTER_ADDR", None),
|
64 |
-
os.environ.get("MASTER_PORT", "None"),
|
65 |
-
)
|
66 |
-
local_rank = int(os.environ.get("LOCAL_RANK", "0"))
|
67 |
-
world_size = int(os.environ.get("WORLD_SIZE", "1"))
|
68 |
-
_distributed_worker(
|
69 |
-
local_rank,
|
70 |
-
main_func,
|
71 |
-
world_size,
|
72 |
-
num_gpus_per_machine,
|
73 |
-
num_machines,
|
74 |
-
machine_rank,
|
75 |
-
backend,
|
76 |
-
dist_url,
|
77 |
-
args,
|
78 |
-
)
|
79 |
-
exit()
|
80 |
-
launch_by_subprocess(
|
81 |
-
sys.argv,
|
82 |
-
world_size,
|
83 |
-
num_machines,
|
84 |
-
machine_rank,
|
85 |
-
num_gpus_per_machine,
|
86 |
-
dist_url,
|
87 |
-
args,
|
88 |
-
)
|
89 |
-
else:
|
90 |
-
main_func(*args)
|
91 |
-
|
92 |
-
|
93 |
-
def launch_by_subprocess(
|
94 |
-
raw_argv,
|
95 |
-
world_size,
|
96 |
-
num_machines,
|
97 |
-
machine_rank,
|
98 |
-
num_gpus_per_machine,
|
99 |
-
dist_url,
|
100 |
-
args,
|
101 |
-
):
|
102 |
-
assert (
|
103 |
-
world_size > 1
|
104 |
-
), "subprocess mode doesn't support single GPU, use spawn mode instead"
|
105 |
-
|
106 |
-
if dist_url is None:
|
107 |
-
# ------------------------hack for multi-machine training -------------------- #
|
108 |
-
if num_machines > 1:
|
109 |
-
master_ip = subprocess.check_output(["hostname", "--fqdn"]).decode("utf-8")
|
110 |
-
master_ip = str(master_ip).strip()
|
111 |
-
dist_url = "tcp://{}".format(master_ip)
|
112 |
-
ip_add_file = "./" + args[1].experiment_name + "_ip_add.txt"
|
113 |
-
if machine_rank == 0:
|
114 |
-
port = _find_free_port()
|
115 |
-
with open(ip_add_file, "w") as ip_add:
|
116 |
-
ip_add.write(dist_url+'\n')
|
117 |
-
ip_add.write(str(port))
|
118 |
-
else:
|
119 |
-
while not os.path.exists(ip_add_file):
|
120 |
-
time.sleep(0.5)
|
121 |
-
|
122 |
-
with open(ip_add_file, "r") as ip_add:
|
123 |
-
dist_url = ip_add.readline().strip()
|
124 |
-
port = ip_add.readline()
|
125 |
-
else:
|
126 |
-
dist_url = "tcp://127.0.0.1"
|
127 |
-
port = _find_free_port()
|
128 |
-
|
129 |
-
# set PyTorch distributed related environmental variables
|
130 |
-
current_env = os.environ.copy()
|
131 |
-
current_env["MASTER_ADDR"] = dist_url
|
132 |
-
current_env["MASTER_PORT"] = str(port)
|
133 |
-
current_env["WORLD_SIZE"] = str(world_size)
|
134 |
-
assert num_gpus_per_machine <= torch.cuda.device_count()
|
135 |
-
|
136 |
-
if "OMP_NUM_THREADS" not in os.environ and num_gpus_per_machine > 1:
|
137 |
-
current_env["OMP_NUM_THREADS"] = str(1)
|
138 |
-
logger.info(
|
139 |
-
"\n*****************************************\n"
|
140 |
-
"Setting OMP_NUM_THREADS environment variable for each process "
|
141 |
-
"to be {} in default, to avoid your system being overloaded, "
|
142 |
-
"please further tune the variable for optimal performance in "
|
143 |
-
"your application as needed. \n"
|
144 |
-
"*****************************************".format(
|
145 |
-
current_env["OMP_NUM_THREADS"]
|
146 |
-
)
|
147 |
-
)
|
148 |
-
|
149 |
-
processes = []
|
150 |
-
for local_rank in range(0, num_gpus_per_machine):
|
151 |
-
# each process's rank
|
152 |
-
dist_rank = machine_rank * num_gpus_per_machine + local_rank
|
153 |
-
current_env["RANK"] = str(dist_rank)
|
154 |
-
current_env["LOCAL_RANK"] = str(local_rank)
|
155 |
-
|
156 |
-
# spawn the processes
|
157 |
-
cmd = ["python3", *raw_argv]
|
158 |
-
|
159 |
-
process = subprocess.Popen(cmd, env=current_env)
|
160 |
-
processes.append(process)
|
161 |
-
|
162 |
-
for process in processes:
|
163 |
-
process.wait()
|
164 |
-
if process.returncode != 0:
|
165 |
-
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
|
166 |
-
|
167 |
-
|
168 |
-
def _distributed_worker(
|
169 |
-
local_rank,
|
170 |
-
main_func,
|
171 |
-
world_size,
|
172 |
-
num_gpus_per_machine,
|
173 |
-
num_machines,
|
174 |
-
machine_rank,
|
175 |
-
backend,
|
176 |
-
dist_url,
|
177 |
-
args,
|
178 |
-
):
|
179 |
-
assert (
|
180 |
-
torch.cuda.is_available()
|
181 |
-
), "cuda is not available. Please check your installation."
|
182 |
-
configure_nccl()
|
183 |
-
global_rank = machine_rank * num_gpus_per_machine + local_rank
|
184 |
-
logger.info("Rank {} initialization finished.".format(global_rank))
|
185 |
-
try:
|
186 |
-
dist.init_process_group(
|
187 |
-
backend=backend,
|
188 |
-
init_method=dist_url,
|
189 |
-
world_size=world_size,
|
190 |
-
rank=global_rank,
|
191 |
-
)
|
192 |
-
except Exception:
|
193 |
-
logger.error("Process group URL: {}".format(dist_url))
|
194 |
-
raise
|
195 |
-
# synchronize is needed here to prevent a possible timeout after calling init_process_group
|
196 |
-
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
|
197 |
-
comm.synchronize()
|
198 |
-
|
199 |
-
if global_rank == 0 and os.path.exists(
|
200 |
-
"./" + args[1].experiment_name + "_ip_add.txt"
|
201 |
-
):
|
202 |
-
os.remove("./" + args[1].experiment_name + "_ip_add.txt")
|
203 |
-
|
204 |
-
assert num_gpus_per_machine <= torch.cuda.device_count()
|
205 |
-
torch.cuda.set_device(local_rank)
|
206 |
-
|
207 |
-
args[1].local_rank = local_rank
|
208 |
-
args[1].num_machines = num_machines
|
209 |
-
|
210 |
-
# Setup the local process group (which contains ranks within the same machine)
|
211 |
-
# assert comm._LOCAL_PROCESS_GROUP is None
|
212 |
-
# num_machines = world_size // num_gpus_per_machine
|
213 |
-
# for i in range(num_machines):
|
214 |
-
# ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
|
215 |
-
# pg = dist.new_group(ranks_on_i)
|
216 |
-
# if i == machine_rank:
|
217 |
-
# comm._LOCAL_PROCESS_GROUP = pg
|
218 |
-
|
219 |
-
main_func(*args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Epitech/Scarecrow/original_app/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
This is the based scarecrow application with the back-end and the scarecrow communication setup.
|
2 |
-
|
3 |
-
Inside the huggingface face application, as a demo, there is only the back-end part with some visualisation setup.
|
4 |
-
|
5 |
-
|
6 |
-
- a.mp3 -> predator sound for human
|
7 |
-
- b.mp3 -> predator sound for cell_phone
|
8 |
-
- coco.names -> labels for yolo to use
|
9 |
-
- scarecrow.py -> the application that collect video and send the stream to the back-end
|
10 |
-
- backend.py -> the application which run the model to detect animals
|
11 |
-
- yolov3.cfg & yolov3.weights -> can't be included inside huggingface as binary
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|