diff --git a/spaces/0019c/NewBing/Dockerfile b/spaces/0019c/NewBing/Dockerfile deleted file mode 100644 index b41a700e30fb5f3759b31c37e1b3c9967692f382..0000000000000000000000000000000000000000 --- a/spaces/0019c/NewBing/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="1h_21qf8tNmRtDy5a4fZ05RFgkZeZ9akmnW9NtSo5s6aJilplld4X4Lj7BkJ3EQSNbu7tu-z_-OAHqeELJqlpF-bvOCMo5lWGjyCTcJcqIHnYiu_vlgrdDyo99wQHgsvNR5pKASGikeDgAVSN7CN6YM74n7glWgJ7hGpd33s9zcgdCea94XcsO5AmoPIoxA02O6zGkpTnIdc61W7D1WQUflqxgaSHCGWlrhw7aoPs-io" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/0xHacked/zkProver/README.md b/spaces/0xHacked/zkProver/README.md deleted file mode 100644 index fd9bfc21234bb046fca6b90994318eccaecf90ba..0000000000000000000000000000000000000000 --- a/spaces/0xHacked/zkProver/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: ZkProver -emoji: ⚡ -colorFrom: red -colorTo: yellow -sdk: docker -pinned: false -license: bsd ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/1368565466ki/Satdia/monotonic_align/core.py b/spaces/1368565466ki/Satdia/monotonic_align/core.py deleted file mode 100644 index 5ff728cd74c9228346a82ec64a9829cb98ad315e..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/Satdia/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contraband Police Offline Activation Keygen No Internet Required.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contraband Police Offline Activation Keygen No Internet Required.md deleted file mode 100644 index 0a327d3d7226df5bcfba1b1a6a766adf8863a37c..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Contraband Police Offline Activation Keygen No Internet Required.md +++ /dev/null @@ -1,134 +0,0 @@ -
-
- Confiscate contraband and arrest smugglers.
- Upgrade your station and equipment.
- Respond to emergencies and chase fleeing vehicles. | | H2: Why You Need an Offline Activation Keygen for Contraband Police | - The game is not free and requires a Steam account to play.
- An offline activation keygen can bypass the Steam verification and let you play the game without internet connection.
- An offline activation keygen can also save you money and avoid potential malware or viruses from downloading cracked versions of the game. | | H2: How to Get an Offline Activation Keygen for Contraband Police | - Find a reliable source that offers offline activation keygens for Contraband Police.
- Download the keygen file and run it on your computer.
- Follow the instructions on the screen and generate a unique activation code for the game.
- Enter the code in the game and enjoy playing Contraband Police offline. | | H2: Conclusion | - Summarize the main points of the article and encourage readers to try out Contraband Police with an offline activation keygen. | | H2: FAQs | - Answer some common questions about Contraband Police and offline activation keygens. | Article with HTML formatting:

Contraband Police: A Thrilling Checkpoint Simulator Game

-

If you are looking for a game that combines simulation, action, and strategy, then you might want to check out Contraband Police. This game takes you back to 1981 when smuggling is rampant in a communist country called Acaristan. You will play as a border guard inspector who has to inspect documents and packages of drivers who want to enter the country. You will also have to confiscate contraband, arrest smugglers, upgrade your station and equipment, respond to emergencies, and chase fleeing vehicles.

-

Contraband Police offline activation keygen


Download Filehttps://byltly.com/2uKA3W



-

Contraband Police is a game that will test your skills, judgment, and morality. You will have to deal with different types of smugglers who will try to deceive you with fake documents, hidden compartments, bribes, threats, or violence. You will also have to face the consequences of your actions, whether you choose to be honest, corrupt, or somewhere in between. You will also have to make decisions that will affect the future of Acaristan and its people.

-

Contraband Police is a game that will keep you on your toes and immerse you in a realistic and captivating world of 80s communism. You will experience the thrill of being a border guard inspector who has to balance between duty and survival.

-

How to Play Contraband Police

-

The gameplay of Contraband Police is divided into two phases: inspection and intervention.

-

Inspection

-

In this phase, you will have to inspect documents and packages of drivers who want to enter Acaristan. You will have access to various tools and equipment that will help you verify the validity of their papers and contents of their vehicles.

-

You will have to check for details such as name, photo, nationality, license plate, vehicle type, weight limit, cargo list, etc. You will also have to scan their packages for any contraband such as drugs, weapons, cash, or other illegal items.

-

If you find any discrepancies or violations, you will have to confiscate the contraband and issue a fine or an arrest warrant depending on the severity of the offense. You will also have to report your findings to your superiors and receive feedback on your performance.

-

How to get Contraband Police offline activation code for free
-Contraband Police crack download with offline keygen
-Contraband Police offline activation generator no survey
-Contraband Police offline activation keygen torrent
-Contraband Police offline activation serial number
-Contraband Police offline activation license key
-Contraband Police offline activation patch
-Contraband Police offline activation hack
-Contraband Police offline activation bypass
-Contraband Police offline activation unlocker
-Contraband Police offline activation mod
-Contraband Police offline activation cheat
-Contraband Police offline activation trainer
-Contraband Police offline activation fix
-Contraband Police offline activation error
-Contraband Police offline activation solution
-Contraband Police offline activation guide
-Contraband Police offline activation tutorial
-Contraband Police offline activation tips
-Contraband Police offline activation tricks
-Contraband Police offline activation secrets
-Contraband Police offline activation review
-Contraband Police offline activation gameplay
-Contraband Police offline activation walkthrough
-Contraband Police offline activation video
-Contraband Police offline activation demo
-Contraband Police offline activation beta
-Contraband Police offline activation update
-Contraband Police offline activation release date
-Contraband Police offline activation system requirements
-Contraband Police offline activation download link
-Contraband Police offline activation free trial
-Contraband Police offline activation full version
-Contraband Police offline activation premium access
-Contraband Police offline activation vip membership
-Contraband Police offline activation discount code
-Contraband Police offline activation coupon code
-Contraband Police offline activation promo code
-Contraband Police offline activation gift card
-Contraband Police offline activation redeem code
-Contraband Police offline activation steam key
-Contraband Police offline activation origin key
-Contraband Police offline activation epic games key
-Contraband Police offline activation gog key
-Contraband Police offline activation uplay key
-Contraband Police offline activation rockstar key
-Contraband Police offline activation xbox one key
-Contraband Police offline activation ps4 key
-Contraband Police offline activation switch key

-

Confiscate contraband and arrest smugglers

-

When you confiscate contraband from drivers, you will have two options: either store them in your locker or sell them on the black market for extra cash. However, be careful because storing too much contraband can attract unwanted attention from your superiors or other factions.

-

When you arrest smugglers, you will have to escort them to your station and put them in jail cells. You will also have to interrogate them for more information or evidence that can help you solve crimes or catch bigger fish.

-

Upgrade your station and equipment

-

As you progress through the game, you will earn money from fines, confiscations, arrests, or bribes. You can use this money to upgrade your station and equipment that will improve your efficiency and security.

-

You can upgrade your station by adding more rooms such as an interrogation room, a storage room, a garage, etc. You can also upgrade your equipment by buying new tools such as a scanner, a metal detector, a crowbar, etc.

-

Respond to emergencies and chase fleeing vehicles

-

Sometimes, you will encounter situations that require immediate action such as a bomb threat, a hostage situation, a rebel attack, etc. You will have to respond quickly and appropriately depending on the scenario.

-

Sometimes, smugglers will try to escape from your checkpoint by driving away at high speed. You will have to chase them down with your police car and stop them by shooting their tires or ramming their vehicle.

-

Why You Need an Offline Activation Keygen for Contraband Police

-

Contraband Police is not a free game and requires a Steam account to play. This means that you need an internet connection and a valid Steam key to activate the game on your computer.

-

However, there are some reasons why you might want or need an offline activation keygen for Contraband Police:

- -

An offline activation keygen is a software that can generate a unique activation code for Contraband Police that can bypass the Steam verification process and let you play the game without internet connection.

-

How to Get an Offline Activation Keygen for Contraband Police

-

If you want to get an offline activation keygen for Contraband Police, here are some steps that you need to follow:

-
    -
  1. Find a reliable source that offers offline activation keygens for Contraband Police. You can search online for websites or forums that provide this service or ask around from other gamers who have used it before.
  2. -
  3. Download the keygen file from the source and run it on your computer. Make sure that you scan it first with an antivirus program before opening it.
  4. -
  5. Follow the instructions on the screen and generate a unique activation code for Contraband Police.
  6. -
  7. Enter the code in the game when prompted and enjoy playing Contraband Police offline.
  8. -
-

Conclusion

-

Contraband Police is a thrilling checkpoint simulator game that lets you experience what it's like to be a border guard inspector in a communist country of the 80s. You will have to inspect documents and packages of drivers who want to enter Acaristan while dealing with smugglers who will try to deceive you or escape from you.

-

If you want to play Contraband Police without internet connection or without buying it from Steam, then you might want to get an offline activation keygen for it. This software can generate a unique activation code for Contraband Police that can bypass the Steam verification process and let you play the game offline.

-

If you are interested in trying out Contraband Police with an offline activation keygen, then follow the steps above and get ready for some action-packed gameplay!

-

FAQs

-

What are some tips for playing Contraband Police?

-

Some tips for playing Contraband Police are:

- -

What are some benefits of playing Contraband Police?

-

What are some drawbacks of playing Contraband Police?

-

Some drawbacks of playing Contraband Police are:

- -

What are some alternatives to Contraband Police?

-

Some alternatives to Contraband Police are:

- -

Where can I get more information about Contraband Police?

-

You can get more information about Contraband Police from the following sources:

- -

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Alparslan Byk Seluklu Son Blm HD Kalite Seluklu Sultanlnn Ykselii.md b/spaces/1phancelerku/anime-remove-background/Alparslan Byk Seluklu Son Blm HD Kalite Seluklu Sultanlnn Ykselii.md deleted file mode 100644 index 561c6691e0ffdab3ab242503bbc02fab303bba67..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Alparslan Byk Seluklu Son Blm HD Kalite Seluklu Sultanlnn Ykselii.md +++ /dev/null @@ -1,103 +0,0 @@ - -

Alp Arslan Son Bolum: A Review of the Latest Episode of the Turkish Historical Drama

-

Introduction

-

If you are a fan of Turkish historical dramas, you might have heard of or watched Alp Arslan: Büyük Selçuklu, a show that depicts the life and achievements of Alparslan, the second sultan of the Seljuk Empire. The show has been airing on TRT 1 since September 2022, and has gained a lot of popularity and praise from viewers and critics alike. The show is known for its captivating storyline, impressive production quality, and talented cast.

-

In this article, we will review the latest episode of the show, which aired on June 12, 2023. We will summarize the plot, analyze the main characters, evaluate the historical accuracy, and give our opinion on the strengths and weaknesses of the episode. We will also share our expectations and predictions for the next episode, which will be the season finale. If you have not watched the latest episode yet, be warned that this article contains spoilers.

-

alp arslan son bolum


Download Filehttps://jinyurl.com/2uNNPO



-

Main Body

-

The plot summary of the last episode

-

The last episode of Alp Arslan: Büyük Selçuklu was full of action, drama, and suspense. Here are some of the main events that happened in the episode:

-

Alparslan's quest for justice

-

Alparslan, who is determined to find out who is behind the assassination attempt on his father, Sultan Tughril, follows the clues that lead him to Emir Bozan, one of his trusted commanders. He confronts Bozan and accuses him of being a traitor who works for Byzantium. Bozan denies everything, but Alparslan does not believe him. He orders Bozan to be arrested and tortured until he confesses.

-

Akça's dilemma and decision

-

Akça, who is a Turkmen girl that Alparslan saved from Byzantine captivity, is in love with Alparslan, but she is also loyal to her tribe. She learns that her brother, Yinal, who is also a prisoner of Byzantium, is going to be executed by Emperor Romanos Diogenes. She decides to risk her life and go to Byzantium to save her brother. She leaves a letter for Alparslan, explaining her situation and asking for his forgiveness.

-

The clash between Seljuk and Byzantine forces

-

Meanwhile, Romanos Diogenes, who is furious about Alparslan's victories over his army, prepares for a final battle against him. He gathers a large army and marches towards Malazgirt, where Alparslan is waiting for him. The two armies clash in a fierce and bloody battle. Alparslan fights bravely and skillfully, but he is outnumbered and surrounded by Byzantine soldiers. He is wounded by an arrow and falls from his horse. He is captured by Romanos Diogenes, who takes him as a prisoner.

-

The main characters and their performancesThe main characters and their performances

-

The show has a stellar cast of actors and actresses who bring the characters to life with their acting skills. Here are some of the main characters and their performances in the last episode:

-

Ekin Koç as Alparslan

-

Ekin Koç is the lead actor of the show, who plays the role of Alparslan, the sultan of the Seljuk Empire. He portrays Alparslan as a brave, wise, and charismatic leader who is loved by his people and feared by his enemies. He also shows Alparslan's human side, his emotions, and his struggles. In the last episode, he delivered a powerful performance as he faced betrayal, love, and captivity. He showed Alparslan's determination, courage, and dignity in the face of adversity.

-

Leyla Lydia Tuğutlu as Akça

-

Leyla Lydia Tuğutlu is the female lead of the show, who plays the role of Akça, a Turkmen girl who becomes Alparslan's love interest. She portrays Akça as a beautiful, loyal, and brave woman who is devoted to her tribe and her lover. She also shows Akça's conflict, dilemma, and decision. In the last episode, she gave a touching performance as she left Alparslan to save her brother. She showed Akça's pain, sacrifice, and hope.

-

Kaan Taşaner as Romanos Diogenes

-

Kaan Taşaner is the main antagonist of the show, who plays the role of Romanos Diogenes, the emperor of Byzantium. He portrays Romanos Diogenes as a ruthless, ambitious, and arrogant ruler who is obsessed with defeating Alparslan and expanding his empire. He also shows Romanos Diogenes' cunning, cruelty, and pride. In the last episode, he gave a convincing performance as he captured Alparslan and celebrated his victory. He showed Romanos Diogenes' triumph, arrogance, and mockery.

-

alp arslan son bolum izle full trt 1
-alp arslan son bolum fragman
-alp arslan son bolum tek parca hd
-alp arslan son bolum youtube
-alp arslan son bolum ddizi
-alp arslan son bolum ne zaman
-alp arslan son bolum oyunculari
-alp arslan son bolum yorumlari
-alp arslan son bolum ozeti
-alp arslan son bolum trt izle
-alp arslan buyuk selcuklu son bolum izle
-alp arslan buyuk selcuklu son bolum fragman
-alp arslan buyuk selcuklu son bolum tek parca hd
-alp arslan buyuk selcuklu son bolum youtube
-alp arslan buyuk selcuklu son bolum ddizi
-alp arslan buyuk selcuklu son bolum ne zaman
-alp arslan buyuk selcuklu son bolum oyunculari
-alp arslan buyuk selcuklu son bolum yorumlari
-alp arslan buyuk selcuklu son bolum ozeti
-alp arslan buyuk selcuklu son bolum trt izle
-alparslan büyük selçuklu son bölüm izle full trt 1
-alparslan büyük selçuklu son bölüm fragman
-alparslan büyük selçuklu son bölüm tek parça hd
-alparslan büyük selçuklu son bölüm youtube
-alparslan büyük selçuklu son bölüm ddizi
-alparslan büyük selçuklu son bölüm ne zaman
-alparslan büyük selçuklu son bölüm oyuncuları
-alparslan büyük selçuklu son bölüm yorumları
-alparslan büyük selçuklu son bölüm özeti
-alparslan büyük selçuklu son bölüm trt izle
-trt 1 alparslan büyük selçuklu son bölüm izle full hd
-trt 1 alparslan büyük selçuklu son bölüm fragmanı izle
-trt 1 alparslan büyük selçuklu son bölüm tek parça izle hd kalite
-trt 1 alparslan büyük selçuklu son bölüm youtube izle full hd kalite
-trt 1 alparslan büyük selçuklu son bölüm ddizi izle full hd kalite
-trt 1 alparslan büyük selçuklu son bölüm ne zaman yayınlanacak tarih ve saat bilgisi
-trt 1 alparslan büyük selçuklu son bölüm oyuncu kadrosu ve karakterleri tanıtımı
-trt 1 alparslan büyük selçuklu son bölüm yorumları ve analizleri
-trt 1 alparslan büyük selçuklu son bölüm özeti ve detaylı anlatımı
-trt 1 alparslan büyük selçuklu son bölüm trt izle online platformu üzerinden izleme seçeneği

-

Other supporting actors and actresses

-

The show also has many other supporting actors and actresses who play important roles in the story. Some of them are:

- -

All of them have done a great job in portraying their characters with authenticity and emotion.

-

The historical accuracy and relevance of the show

-

The show is based on historical events and figures that shaped the history of Turkey and the Middle East. However, it is not a documentary or a biography. It is a historical drama that uses artistic liberties and adaptations to create an engaging and entertaining story. Here are some of the aspects of the show that relate to history:

-

The historical background of Alparslan and the Seljuk Empire

-

Alparslan was born in 1029 in Balasagun, a city in present-day Kyrgyzstan. He was the son of Çağrı Bey, the brother of Sultan Tughril. He became the sultan of the Seljuk Empire in 1063 after his father's death. He expanded his empire by conquering many lands from Byzantium, Egypt, Syria, Iraq, Iran, and Central Asia. He is most famous for his victory at the Battle of Malazgirt in 1071 against Romanos Diogenes, which opened Anatolia to Turkish settlement and paved the way for the rise of the Ottoman Empire.

-

The Seljuk Empire was founded by Seljuk Bey, a Turkmen chief who converted to Islam in 985. He led his tribe to migrate from Central Asia to Iran in search of new lands. His descendants continued his legacy by establishing a powerful empire that spanned from Asia Minor to India at its peak. The Seljuk Empire was known for its military prowess, cultural diversity, religious tolerance, artistic achievements, and scientific advancements.

-

The artistic liberties and

The artistic liberties and adaptations of the show

-

The show is not a faithful representation of history, but a creative interpretation of it. The show uses fictional characters, events, dialogues, and scenarios to create drama, suspense, romance, and humor. The show also changes some historical facts, dates, names, and details to suit the narrative and the audience. For example, the show depicts Alparslan as a young and handsome sultan, while in reality he was in his forties when he became the sultan. The show also portrays Romanos Diogenes as a cruel and arrogant emperor, while in reality he was a respected and competent leader who treated Alparslan with honor after his capture.

-

The show does not claim to be accurate or objective, but rather aims to entertain and educate the viewers. The show does not intend to offend or mislead anyone, but rather to inspire and inform them. The show encourages the viewers to do their own research and learn more about the history and culture of the Seljuk Empire and its people.

-

The cultural and educational value of the show

-

The show has a lot of cultural and educational value for the viewers. The show showcases the rich and diverse heritage of Turkey and the Middle East, as well as the common roots and values of different civilizations. The show also teaches the viewers about the history, politics, religion, art, science, and literature of the Seljuk Empire and its neighbors. The show also promotes the values of courage, justice, loyalty, wisdom, and tolerance that Alparslan and his people embodied.

-

The show is not only a source of entertainment, but also a source of inspiration and enlightenment for the viewers. The show helps the viewers to appreciate and respect their own history and culture, as well as those of others. The show also helps the viewers to understand and relate to the challenges and opportunities that people faced in the past, as well as those that they face in the present.

-

Conclusion

-

The last episode of Alp Arslan: Büyük Selçuklu was a thrilling and emotional one that left the viewers in awe and anticipation. The episode had many strengths, such as the captivating plot, the impressive production quality, and the talented cast. The episode also had some weaknesses, such as the historical inaccuracies, the clichéd dialogues, and the predictable twists. However, these weaknesses did not overshadow the overall quality and enjoyment of the episode.

-

The next episode will be the season finale of the show, which will reveal what will happen to Alparslan after his capture by Romanos Diogenes. Will he escape or be executed? Will he reunite with Akça or lose her forever? Will he defeat Romanos Diogenes or make peace with him? Will he fulfill his destiny or fail his mission? These are some of the questions that the viewers are eager to find out.

-

The final verdict and rating of the show is that it is a must-watch for anyone who loves historical dramas. It is a well-made, well-acted, and well-written show that offers a lot of entertainment and education for the viewers. It is a show that celebrates the history and culture of Turkey and the Middle East, as well as the values and virtues of humanity. It is a show that deserves a 9 out of 10 rating.

-

FAQs

- -

Conclusion

-

Black Effect is a song that is more than just a song. It is a celebration, a statement, a tribute, and a challenge. It is a song that honors the past, embraces the present, and envisions the future. It is a song that showcases the talent, creativity, and vision of Beyoncé and Jay-Z, two of the most influential and successful artists of all time. It is a song that resonates with listeners who share their love for blackness and their desire for justice. It is a song that has an effect that is undeniable, unforgettable, and unstoppable.

-

If you have not listened to Black Effect yet, we highly recommend that you do so. You can stream it on Tidal or any other platform that you prefer. You can also watch the visual album Black Is King on Disney+ or YouTube. You will not regret it.

-

If you have listened to Black Effect, we would love to hear your opinion. What do you think of the song? How does it make you feel? What is your favorite part? Share your thoughts with us in the comments section below.

-

And if you want to learn more about Beyoncé and Jay-Z, their music, their lives, and their impact, you can check out these resources:

-
    -
  • [Beyoncé's official website]
  • -
  • [Jay-Z's official website]
  • -
  • [Everything Is Love on Tidal]
  • -
  • [Black Is King on Disney+]
  • -
  • [Beyoncé: The Lion King: The Gift (Deluxe Edition) on Spotify]
  • -
-

FAQs

-

What is Black Effect?

-

Black Effect is a song by Beyoncé and Jay-Z from their collaborative album Everything Is Love. The song is an empowering anthem for blackness and achievement despite adversity.

-

Who produced Black Effect?

-

Black Effect was produced by Cool & Dre, a Miami-based duo who have worked with artists like Lil Wayne, DJ Khaled, Fat Joe, and Rick Ross.

-

What is the sample used in Black Effect?

-

The sample used in Black Effect is a voice of Dr. Lenora Antoinette Stines, a Jamaican choreographer and lecturer, who speaks about love in its various forms.

-

What are some of the references used in Black Effect?

-

Some of the references used in Black Effect are MLK Boulevard, Malcolm X, Das EFX, Dapper Dan, LeBron James, Sarah Baartman, Kalief Browder, Trayvon Martin, and Black Panther Party.

What are some of the awards that Black Effect has received or been nominated for? -

Some of the awards that Black Effect has received or been nominated for are Grammy Award for Best Rap/Sung Performance, BET Award for Best Collaboration, Soul Train Music Award for Best Collaboration Performance, NAACP Image Award for Outstanding Duo or Group, and MTV Video Music Award for Best Hip-Hop Video.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/fffffu/bing/Dockerfile b/spaces/fffffu/bing/Dockerfile deleted file mode 100644 index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000 --- a/spaces/fffffu/bing/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM node:18 - - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set up a new user named "user" with user ID 1000 -RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME - -# Switch to the "user" user -USER user - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Install app dependencies -# A wildcard is used to ensure both package.json AND package-lock.json are copied -# where available (npm@5+) -COPY --chown=user package*.json $HOME/app/ - -RUN npm install - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app/ - -RUN npm run build - -ENV PORT 7860 -EXPOSE 7860 - -CMD npm start diff --git a/spaces/fffiloni/DragGAN/stylegan2/op/fused_act.py b/spaces/fffiloni/DragGAN/stylegan2/op/fused_act.py deleted file mode 100644 index 4f39941f2ce76c474e3914ad1149741b02f24f65..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/DragGAN/stylegan2/op/fused_act.py +++ /dev/null @@ -1,127 +0,0 @@ -import os - -import torch -from torch import nn -from torch.nn import functional as F -from torch.autograd import Function -from torch.utils.cpp_extension import load - - -module_path = os.path.dirname(__file__) -fused = load( - "fused", - sources=[ - os.path.join(module_path, "fused_bias_act.cpp"), - os.path.join(module_path, "fused_bias_act_kernel.cu"), - ], -) - - -class FusedLeakyReLUFunctionBackward(Function): - @staticmethod - def forward(ctx, grad_output, out, bias, negative_slope, scale): - ctx.save_for_backward(out) - ctx.negative_slope = negative_slope - ctx.scale = scale - - empty = grad_output.new_empty(0) - - grad_input = fused.fused_bias_act( - grad_output.contiguous(), empty, out, 3, 1, negative_slope, scale - ) - - dim = [0] - - if grad_input.ndim > 2: - dim += list(range(2, grad_input.ndim)) - - if bias: - grad_bias = grad_input.sum(dim).detach() - - else: - grad_bias = empty - - return grad_input, grad_bias - - @staticmethod - def backward(ctx, gradgrad_input, gradgrad_bias): - out, = ctx.saved_tensors - gradgrad_out = fused.fused_bias_act( - gradgrad_input.contiguous(), - gradgrad_bias, - out, - 3, - 1, - ctx.negative_slope, - ctx.scale, - ) - - return gradgrad_out, None, None, None, None - - -class FusedLeakyReLUFunction(Function): - @staticmethod - def forward(ctx, input, bias, negative_slope, scale): - empty = input.new_empty(0) - - ctx.bias = bias is not None - - if bias is None: - bias = empty - - out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) - ctx.save_for_backward(out) - ctx.negative_slope = negative_slope - ctx.scale = scale - - return out - - @staticmethod - def backward(ctx, grad_output): - out, = ctx.saved_tensors - - grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( - grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale - ) - - if not ctx.bias: - grad_bias = None - - return grad_input, grad_bias, None, None - - -class FusedLeakyReLU(nn.Module): - def __init__(self, channel, bias=True, negative_slope=0.2, scale=2 ** 0.5): - super().__init__() - - if bias: - self.bias = nn.Parameter(torch.zeros(channel)) - - else: - self.bias = None - - self.negative_slope = negative_slope - self.scale = scale - - def forward(self, input): - return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale) - - -def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5): - if input.device.type == "cpu": - if bias is not None: - rest_dim = [1] * (input.ndim - bias.ndim - 1) - return ( - F.leaky_relu( - input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2 - ) - * scale - ) - - else: - return F.leaky_relu(input, negative_slope=0.2) * scale - - else: - return FusedLeakyReLUFunction.apply( - input.contiguous(), bias, negative_slope, scale - ) diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/util.inspect.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/util.inspect.js deleted file mode 100644 index 7784fab55d08875a01b24636f3a541c3d0bd39b2..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/util.inspect.js +++ /dev/null @@ -1 +0,0 @@ -module.exports = require('util').inspect; diff --git a/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/training/losses/constants.py b/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/training/losses/constants.py deleted file mode 100644 index ae3e5e151342232be8e2c2a77fe6fd5798dc2a8c..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/training/losses/constants.py +++ /dev/null @@ -1,152 +0,0 @@ -weights = {"ade20k": - [6.34517766497462, - 9.328358208955224, - 11.389521640091116, - 16.10305958132045, - 20.833333333333332, - 22.22222222222222, - 25.125628140703515, - 43.29004329004329, - 50.5050505050505, - 54.6448087431694, - 55.24861878453038, - 60.24096385542168, - 62.5, - 66.2251655629139, - 84.74576271186442, - 90.90909090909092, - 91.74311926605505, - 96.15384615384616, - 96.15384615384616, - 97.08737864077669, - 102.04081632653062, - 135.13513513513513, - 149.2537313432836, - 153.84615384615384, - 163.93442622950818, - 166.66666666666666, - 188.67924528301887, - 192.30769230769232, - 217.3913043478261, - 227.27272727272725, - 227.27272727272725, - 227.27272727272725, - 303.03030303030306, - 322.5806451612903, - 333.3333333333333, - 370.3703703703703, - 384.61538461538464, - 416.6666666666667, - 416.6666666666667, - 434.7826086956522, - 434.7826086956522, - 454.5454545454545, - 454.5454545454545, - 500.0, - 526.3157894736842, - 526.3157894736842, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 588.2352941176471, - 588.2352941176471, - 588.2352941176471, - 588.2352941176471, - 588.2352941176471, - 666.6666666666666, - 666.6666666666666, - 666.6666666666666, - 666.6666666666666, - 714.2857142857143, - 714.2857142857143, - 714.2857142857143, - 714.2857142857143, - 714.2857142857143, - 769.2307692307693, - 769.2307692307693, - 769.2307692307693, - 833.3333333333334, - 833.3333333333334, - 833.3333333333334, - 833.3333333333334, - 909.090909090909, - 1000.0, - 1111.111111111111, - 1111.111111111111, - 1111.111111111111, - 1111.111111111111, - 1111.111111111111, - 1250.0, - 1250.0, - 1250.0, - 1250.0, - 1250.0, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 5000.0, - 5000.0, - 5000.0] -} \ No newline at end of file diff --git a/spaces/fffiloni/video2canny/README.md b/spaces/fffiloni/video2canny/README.md deleted file mode 100644 index 3dcbe8ca662c175c5cede191c682aab1630e7fa7..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/video2canny/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Video To Canny Edge -emoji: 🏢 -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/freddyaboulton/echo-chatbot/app.py b/spaces/freddyaboulton/echo-chatbot/app.py deleted file mode 100644 index 83578982ae8537530e4e50cd835bea4fcab27f71..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/echo-chatbot/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import gradio as gr - -def slow_echo(message, history): - return message - -demo = gr.ChatInterface(slow_echo).queue().launch() \ No newline at end of file diff --git a/spaces/fun-research/FC-CLIP/fcclip/utils/misc.py b/spaces/fun-research/FC-CLIP/fcclip/utils/misc.py deleted file mode 100644 index 874d9805b482f52bbffc1be620e36e0cffc07c46..0000000000000000000000000000000000000000 --- a/spaces/fun-research/FC-CLIP/fcclip/utils/misc.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -from typing import List, Optional - -import torch -import torch.distributed as dist -import torchvision -from torch import Tensor - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], : img.shape[2]] = False - else: - raise ValueError("not supported") - return NestedTensor(tensor, mask) - - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max( - torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32) - ).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True diff --git a/spaces/genevera/AudioToken/modules/beats/BEATs.py b/spaces/genevera/AudioToken/modules/beats/BEATs.py deleted file mode 100644 index ae58de41894d872c6093fd673945215a6131bcc6..0000000000000000000000000000000000000000 --- a/spaces/genevera/AudioToken/modules/beats/BEATs.py +++ /dev/null @@ -1,178 +0,0 @@ -# -------------------------------------------------------- -# beats: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058) -# Github source: https://github.com/microsoft/unilm/tree/master/beats -# Copyright (c) 2022 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Based on fairseq code bases -# https://github.com/pytorch/fairseq -# -------------------------------------------------------- - - -import torch -import torch.nn as nn -from torch.nn import LayerNorm -import torchaudio.compliance.kaldi as ta_kaldi - -from modules.beats.backbone import ( - TransformerEncoder, -) - -import logging -from typing import Optional - -logger = logging.getLogger(__name__) - - -class BEATsConfig: - def __init__(self, cfg=None): - self.input_patch_size: int = -1 # path size of patch embedding - self.embed_dim: int = 512 # patch embedding dimension - self.conv_bias: bool = False # include bias in conv encoder - - self.encoder_layers: int = 12 # num encoder layers in the transformer - self.encoder_embed_dim: int = 768 # encoder embedding dimension - self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN - self.encoder_attention_heads: int = 12 # num encoder attention heads - self.activation_fn: str = "gelu" # activation function to use - - self.layer_wise_gradient_decay_ratio: float = 1.0 # ratio for layer-wise gradient decay - self.layer_norm_first: bool = False # apply layernorm first in the transformer - self.deep_norm: bool = False # apply deep_norm first in the transformer - - # dropouts - self.dropout: float = 0.1 # dropout probability for the transformer - self.attention_dropout: float = 0.1 # dropout probability for attention weights - self.activation_dropout: float = 0.0 # dropout probability after activation in FFN - self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer - self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr) - - # positional embeddings - self.conv_pos: int = 128 # number of filters for convolutional positional embeddings - self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding - - # relative position embedding - self.relative_position_embedding: bool = False # apply relative position embedding - self.num_buckets: int = 320 # number of buckets for relative position embedding - self.max_distance: int = 1280 # maximum distance for relative position embedding - self.gru_rel_pos: bool = False # apply gated relative position embedding - - # label predictor - self.finetuned_model: bool = False # whether the model is a fine-tuned model. - self.predictor_dropout: float = 0.1 # dropout probability for the predictor - self.predictor_class: int = 527 # target class number for the predictor - - if cfg is not None: - self.update(cfg) - - def update(self, cfg: dict): - self.__dict__.update(cfg) - - -class BEATs(nn.Module): - def __init__( - self, - cfg: BEATsConfig, - ) -> None: - super().__init__() - logger.info(f"BEATs Config: {cfg.__dict__}") - - self.cfg = cfg - - self.embed = cfg.embed_dim - self.post_extract_proj = ( - nn.Linear(self.embed, cfg.encoder_embed_dim) - if self.embed != cfg.encoder_embed_dim - else None - ) - - self.input_patch_size = cfg.input_patch_size - self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size, - bias=cfg.conv_bias) - - self.dropout_input = nn.Dropout(cfg.dropout_input) - - assert not cfg.deep_norm or not cfg.layer_norm_first - self.encoder = TransformerEncoder(cfg) - self.layer_norm = LayerNorm(self.embed) - - if cfg.finetuned_model: - self.predictor_dropout = nn.Dropout(cfg.predictor_dropout) - self.predictor = nn.Linear(cfg.encoder_embed_dim, cfg.predictor_class) - else: - self.predictor = None - - def forward_padding_mask( - self, - features: torch.Tensor, - padding_mask: torch.Tensor, - ) -> torch.Tensor: - extra = padding_mask.size(1) % features.size(1) - if extra > 0: - padding_mask = padding_mask[:, :-extra] - padding_mask = padding_mask.view( - padding_mask.size(0), features.size(1), -1 - ) - padding_mask = padding_mask.all(-1) - return padding_mask - - def preprocess( - self, - source: torch.Tensor, - fbank_mean: float = 15.41663, - fbank_std: float = 6.55582, - ) -> torch.Tensor: - fbanks = [] - for waveform in source: - waveform = waveform.unsqueeze(0) * 2 ** 15 - fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10) - fbanks.append(fbank) - fbank = torch.stack(fbanks, dim=0) - fbank = (fbank - fbank_mean) / (2 * fbank_std) - return fbank - - def extract_features( - self, - source: torch.Tensor, - padding_mask: Optional[torch.Tensor] = None, - fbank_mean: float = 15.41663, - fbank_std: float = 6.55582, - ): - fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std) - if padding_mask is not None: - padding_mask = self.forward_padding_mask(fbank, padding_mask) - - fbank = fbank.unsqueeze(1) - features = self.patch_embedding(fbank) - features = features.reshape(features.shape[0], features.shape[1], -1) - features = features.transpose(1, 2) - features = self.layer_norm(features) - - if padding_mask is not None: - padding_mask = self.forward_padding_mask(features, padding_mask) - - if self.post_extract_proj is not None: - features = self.post_extract_proj(features) - - x = self.dropout_input(features) - - x, layers_sum, layers = self.encoder( - x, - padding_mask=padding_mask, - ) - - if self.predictor is not None: - x = self.predictor_dropout(x) - logits = self.predictor(x) - - if padding_mask is not None and padding_mask.any(): - logits[padding_mask] = 0 - logits = logits.sum(dim=1) - logits = logits / (~padding_mask).sum(dim=1).unsqueeze(-1).expand_as(logits) - else: - logits = logits.mean(dim=1) - - lprobs = torch.sigmoid(logits) - - return lprobs, padding_mask - else: - return x, layers_sum, layers diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Audirvana 3.5.27 MacOS [Full] 2021.md b/spaces/gotiQspiryo/whisper-ui/examples/Audirvana 3.5.27 MacOS [Full] 2021.md deleted file mode 100644 index d3d4731aa9fa77b4c34da0b79896ed00b318ab8b..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Audirvana 3.5.27 MacOS [Full] 2021.md +++ /dev/null @@ -1,6 +0,0 @@ -

Audirvana 3.5.27 MacOS [Full]


Download Zip ··· https://urlgoal.com/2uyLpB



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Easy-mark Labeling Software Serial Numberl [WORK].md b/spaces/gotiQspiryo/whisper-ui/examples/Easy-mark Labeling Software Serial Numberl [WORK].md deleted file mode 100644 index 0ad90f859adfd0a77a23ed1e0f3a28d25f9d9f62..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Easy-mark Labeling Software Serial Numberl [WORK].md +++ /dev/null @@ -1,6 +0,0 @@ -

Easy-mark Labeling Software Serial Numberl


DOWNLOAD ——— https://urlgoal.com/2uyMFK



-
- 1fdad05405
-
-
-

diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Free Pdf Reader Software For Nokia 5130.md b/spaces/gotiQspiryo/whisper-ui/examples/Free Pdf Reader Software For Nokia 5130.md deleted file mode 100644 index 4214da653dd89d30506d16c46aa20cd8fd53bc40..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Free Pdf Reader Software For Nokia 5130.md +++ /dev/null @@ -1,6 +0,0 @@ -
-

Phone Calls Recorder - 1. By downloading the software You agree to the terms of use which are laid out below. 2. You download, install and use RiAySofts Phone Call Recorder on your own risk. RiAySofts is not responsible or liable for any damage, consequential loss or failure that may arise from running the software on your system. You waive any claims against RiAySofts that may result from such incidents. 3. RiAySofts assume no liability or warrantee for any kind of damage that may result from defective software, incorrect or incomplete instructions, faulty links or other deficient content or functions. 4. RiAySofts is not liable for damages that result from any errors, in particular regarding the mentioned licences for freeware, shareware, demosoftware, etc. 5. RiAySofts is not liable for any damages that result from download and use of RiAySofts Phone Call Recorder.



-

free pdf reader software for nokia 5130


Download File »»» https://urlgoal.com/2uyMOr



-

Nokia PC Suite is a free management software for Nokia mobile phones developed for Microsoft Windows PC computers and laptops. You can manage your mobile content on a PC desktop screen with Nokia PC Suite. The freeware lets you transfer items between your mobile device and PC.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/gradio/base/theme_dropdown.py b/spaces/gradio/base/theme_dropdown.py deleted file mode 100644 index 6235388fd00549553df44028f3ccf03e946994ea..0000000000000000000000000000000000000000 --- a/spaces/gradio/base/theme_dropdown.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import pathlib - -from gradio.themes.utils import ThemeAsset - - -def create_theme_dropdown(): - import gradio as gr - - asset_path = pathlib.Path(__file__).parent / "themes" - themes = [] - for theme_asset in os.listdir(str(asset_path)): - themes.append( - (ThemeAsset(theme_asset), gr.Theme.load(str(asset_path / theme_asset))) - ) - - def make_else_if(theme_asset): - return f""" - else if (theme == '{str(theme_asset[0].version)}') {{ - var theme_css = `{theme_asset[1]._get_theme_css()}` - }}""" - - head, tail = themes[0], themes[1:] - if_statement = f""" - if (theme == "{str(head[0].version)}") {{ - var theme_css = `{head[1]._get_theme_css()}` - }} {" ".join(make_else_if(t) for t in tail)} - """ - - latest_to_oldest = sorted([t[0] for t in themes], key=lambda asset: asset.version)[ - ::-1 - ] - latest_to_oldest = [str(t.version) for t in latest_to_oldest] - - component = gr.Dropdown( - choices=latest_to_oldest, - value=latest_to_oldest[0], - render=False, - label="Select Version", - ).style(container=False) - - return ( - component, - f""" - (theme) => {{ - if (!document.querySelector('.theme-css')) {{ - var theme_elem = document.createElement('style'); - theme_elem.classList.add('theme-css'); - document.head.appendChild(theme_elem); - }} else {{ - var theme_elem = document.querySelector('.theme-css'); - }} - {if_statement} - theme_elem.innerHTML = theme_css; - }} - """, - ) diff --git a/spaces/gradio/chatbot_multimodal/README.md b/spaces/gradio/chatbot_multimodal/README.md deleted file mode 100644 index b85e98fcfd4cdd85a01fd61fdbcb6e3a9a392feb..0000000000000000000000000000000000000000 --- a/spaces/gradio/chatbot_multimodal/README.md +++ /dev/null @@ -1,12 +0,0 @@ - ---- -title: chatbot_multimodal -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 4.1.2 -app_file: run.py -pinned: false -hf_oauth: true ---- diff --git a/spaces/gronkomatic/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md b/spaces/gronkomatic/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md deleted file mode 100644 index 9af54dca9f1956d33877bf7df09b34c2d6ddeeaf..0000000000000000000000000000000000000000 --- a/spaces/gronkomatic/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Animation Using Thin Plate Spline Motion Model -emoji: 👁 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false -duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gwang-kim/DATID-3D/eg3d/training/training_loop.py b/spaces/gwang-kim/DATID-3D/eg3d/training/training_loop.py deleted file mode 100644 index 1338bb7dd4359dd0f21679e059534aeed9780890..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/eg3d/training/training_loop.py +++ /dev/null @@ -1,475 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -"""Main training loop.""" - -import os -import time -import copy -import json -import pickle -import psutil -import PIL.Image -import numpy as np -import torch -import dnnlib -from torch_utils import misc -from torch_utils import training_stats -from torch_utils.ops import conv2d_gradfix -from torch_utils.ops import grid_sample_gradfix - -import legacy -from metrics import metric_main -from camera_utils import LookAtPoseSampler -from training.crosssection_utils import sample_cross_section - -#---------------------------------------------------------------------------- - -def setup_snapshot_image_grid(training_set, random_seed=0): - rnd = np.random.RandomState(random_seed) - gw = np.clip(7680 // training_set.image_shape[2], 7, 32) - gh = np.clip(4320 // training_set.image_shape[1], 4, 32) - - # No labels => show random subset of training samples. - if not training_set.has_labels: - all_indices = list(range(len(training_set))) - rnd.shuffle(all_indices) - grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)] - - else: - # Group training samples by label. - label_groups = dict() # label => [idx, ...] - for idx in range(len(training_set)): - label = tuple(training_set.get_details(idx).raw_label.flat[::-1]) - if label not in label_groups: - label_groups[label] = [] - label_groups[label].append(idx) - - # Reorder. - label_order = list(label_groups.keys()) - rnd.shuffle(label_order) - for label in label_order: - rnd.shuffle(label_groups[label]) - - # Organize into grid. - grid_indices = [] - for y in range(gh): - label = label_order[y % len(label_order)] - indices = label_groups[label] - grid_indices += [indices[x % len(indices)] for x in range(gw)] - label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))] - - # Load data. - images, labels = zip(*[training_set[i] for i in grid_indices]) - return (gw, gh), np.stack(images), np.stack(labels) - -#---------------------------------------------------------------------------- - -def save_image_grid(img, fname, drange, grid_size): - lo, hi = drange - img = np.asarray(img, dtype=np.float32) - img = (img - lo) * (255 / (hi - lo)) - img = np.rint(img).clip(0, 255).astype(np.uint8) - - gw, gh = grid_size - _N, C, H, W = img.shape - img = img.reshape([gh, gw, C, H, W]) - img = img.transpose(0, 3, 1, 4, 2) - img = img.reshape([gh * H, gw * W, C]) - - assert C in [1, 3] - if C == 1: - PIL.Image.fromarray(img[:, :, 0], 'L').save(fname) - if C == 3: - PIL.Image.fromarray(img, 'RGB').save(fname) - -#---------------------------------------------------------------------------- - -def training_loop( - run_dir = '.', # Output directory. - training_set_kwargs = {}, # Options for training set. - data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader. - G_kwargs = {}, # Options for generator network. - D_kwargs = {}, # Options for discriminator network. - G_opt_kwargs = {}, # Options for generator optimizer. - D_opt_kwargs = {}, # Options for discriminator optimizer. - augment_kwargs = None, # Options for augmentation pipeline. None = disable. - loss_kwargs = {}, # Options for loss function. - metrics = [], # Metrics to evaluate during training. - random_seed = 0, # Global random seed. - num_gpus = 1, # Number of GPUs participating in the training. - rank = 0, # Rank of the current process in [0, num_gpus[. - batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus. - batch_gpu = 4, # Number of samples processed at a time by one GPU. - ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights. - ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup. - G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization. - D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization. - augment_p = 0, # Initial value of augmentation probability. - ada_target = None, # ADA target value. None = fixed p. - ada_interval = 4, # How often to perform ADA adjustment? - ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit. - total_kimg = 25000, # Total length of the training, measured in thousands of real images. - kimg_per_tick = 4, # Progress snapshot interval. - image_snapshot_ticks = 50, # How often to save image snapshots? None = disable. - network_snapshot_ticks = 50, # How often to save network snapshots? None = disable. - resume_pkl = None, # Network pickle to resume training from. - resume_kimg = 0, # First kimg to report when resuming training. - cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark? - abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks. - progress_fn = None, # Callback function for updating training progress. Called for all ranks. - freeze_dec_sr = False, -): - # Initialize. - start_time = time.time() - device = torch.device('cuda', rank) - np.random.seed(random_seed * num_gpus + rank) - torch.manual_seed(random_seed * num_gpus + rank) - torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed. - torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy. - torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy. - torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False # Improves numerical accuracy. - conv2d_gradfix.enabled = True # Improves training speed. # TODO: ENABLE - grid_sample_gradfix.enabled = False # Avoids errors with the augmentation pipe. - - # Load training set. - if rank == 0: - print('Loading training set...') - training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset - training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed) - training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs)) - if rank == 0: - print() - print('Num images: ', len(training_set)) - print('Image shape:', training_set.image_shape) - print('Label shape:', training_set.label_shape) - print() - - # Construct networks. - if rank == 0: - print('Constructing networks...') - common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels) - G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module - G.register_buffer('dataset_label_std', torch.tensor(training_set.get_label_std()).to(device)) - D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module - G_ema = copy.deepcopy(G).eval() - if freeze_dec_sr: - for p in G.renderer.parameters(): - p.requires_grad_(False) - for p in G.ray_sampler.parameters(): - p.requires_grad_(False) - for p in G.superresolution.parameters(): - p.requires_grad_(False) - for p in G.decoder.parameters(): - p.requires_grad_(False) - - - # Resume from existing pickle. - if (resume_pkl is not None) and (rank == 0): - print(f'Resuming from "{resume_pkl}"') - with dnnlib.util.open_url(resume_pkl) as f: - resume_data = legacy.load_network_pkl(f) - for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]: - misc.copy_params_and_buffers(resume_data[name], module, require_all=False) - - # Print network summary tables. - if rank == 0: - z = torch.empty([batch_gpu, G.z_dim], device=device) - c = torch.empty([batch_gpu, G.c_dim], device=device) - img = misc.print_module_summary(G, [z, c]) - misc.print_module_summary(D, [img, c]) - - # Setup augmentation. - if rank == 0: - print('Setting up augmentation...') - augment_pipe = None - ada_stats = None - if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None): - augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module - augment_pipe.p.copy_(torch.as_tensor(augment_p)) - if ada_target is not None: - ada_stats = training_stats.Collector(regex='Loss/signs/real') - - # Distribute across GPUs. - if rank == 0: - print(f'Distributing across {num_gpus} GPUs...') - for module in [G, D, G_ema, augment_pipe]: - if module is not None: - for param in misc.params_and_buffers(module): - if param.numel() > 0 and num_gpus > 1: - torch.distributed.broadcast(param, src=0) - - # Setup training phases. - if rank == 0: - print('Setting up training phases...') - loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe, **loss_kwargs) # subclass of training.loss.Loss - phases = [] - for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]: - if reg_interval is None: - opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer - phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)] - else: # Lazy regularization. - mb_ratio = reg_interval / (reg_interval + 1) - opt_kwargs = dnnlib.EasyDict(opt_kwargs) - opt_kwargs.lr = opt_kwargs.lr * mb_ratio - opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas] - opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer - phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)] - phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)] - for phase in phases: - phase.start_event = None - phase.end_event = None - if rank == 0: - phase.start_event = torch.cuda.Event(enable_timing=True) - phase.end_event = torch.cuda.Event(enable_timing=True) - - # Export sample images. - grid_size = None - grid_z = None - grid_c = None - if rank == 0: - print('Exporting sample images...') - grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set) - save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size) - grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu) - grid_c = torch.from_numpy(labels).to(device).split(batch_gpu) - - # Initialize logs. - if rank == 0: - print('Initializing logs...') - stats_collector = training_stats.Collector(regex='.*') - stats_metrics = dict() - stats_jsonl = None - stats_tfevents = None - if rank == 0: - stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt') - try: - import torch.utils.tensorboard as tensorboard - stats_tfevents = tensorboard.SummaryWriter(run_dir) - except ImportError as err: - print('Skipping tfevents export:', err) - - # Train. - if rank == 0: - print(f'Training for {total_kimg} kimg...') - print() - cur_nimg = resume_kimg * 1000 - cur_tick = 0 - tick_start_nimg = cur_nimg - tick_start_time = time.time() - maintenance_time = tick_start_time - start_time - batch_idx = 0 - if progress_fn is not None: - progress_fn(0, total_kimg) - while True: - - # Fetch training data. - with torch.autograd.profiler.record_function('data_fetch'): - phase_real_img, phase_real_c = next(training_set_iterator) - phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu) - phase_real_c = phase_real_c.to(device).split(batch_gpu) - all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device) - all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)] - all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)] - all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device) - all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)] - - # Execute training phases. - for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c): - if batch_idx % phase.interval != 0: - continue - if phase.start_event is not None: - phase.start_event.record(torch.cuda.current_stream(device)) - - # Accumulate gradients. - phase.opt.zero_grad(set_to_none=True) - phase.module.requires_grad_(True) - for real_img, real_c, gen_z, gen_c in zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c): - loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg) - phase.module.requires_grad_(False) - - # Update weights. - with torch.autograd.profiler.record_function(phase.name + '_opt'): - params = [param for param in phase.module.parameters() if param.numel() > 0 and param.grad is not None] - if len(params) > 0: - flat = torch.cat([param.grad.flatten() for param in params]) - if num_gpus > 1: - torch.distributed.all_reduce(flat) - flat /= num_gpus - misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat) - grads = flat.split([param.numel() for param in params]) - for param, grad in zip(params, grads): - param.grad = grad.reshape(param.shape) - phase.opt.step() - - # Phase done. - if phase.end_event is not None: - phase.end_event.record(torch.cuda.current_stream(device)) - - # Update G_ema. - with torch.autograd.profiler.record_function('Gema'): - ema_nimg = ema_kimg * 1000 - if ema_rampup is not None: - ema_nimg = min(ema_nimg, cur_nimg * ema_rampup) - ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8)) - for p_ema, p in zip(G_ema.parameters(), G.parameters()): - p_ema.copy_(p.lerp(p_ema, ema_beta)) - for b_ema, b in zip(G_ema.buffers(), G.buffers()): - b_ema.copy_(b) - G_ema.neural_rendering_resolution = G.neural_rendering_resolution - G_ema.rendering_kwargs = G.rendering_kwargs.copy() - - # Update state. - cur_nimg += batch_size - batch_idx += 1 - - # Execute ADA heuristic. - if (ada_stats is not None) and (batch_idx % ada_interval == 0): - ada_stats.update() - adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000) - augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device))) - - # Perform maintenance tasks once per tick. - done = (cur_nimg >= total_kimg * 1000) - if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000): - continue - - # Print status line, accumulating the same information in training_stats. - tick_end_time = time.time() - fields = [] - fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"] - fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"] - fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"] - fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"] - fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"] - fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"] - fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"] - fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"] - fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"] - torch.cuda.reset_peak_memory_stats() - fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"] - training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60)) - training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60)) - if rank == 0: - print(' '.join(fields)) - - # Check for abort. - if (not done) and (abort_fn is not None) and abort_fn(): - done = True - if rank == 0: - print() - print('Aborting...') - - # Save image snapshot. - if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0): - out = [G_ema(z=z, c=c, noise_mode='const') for z, c in zip(grid_z, grid_c)] - images = torch.cat([o['image'].cpu() for o in out]).numpy() - images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy() - images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy() - save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size) - save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_raw.png'), drange=[-1,1], grid_size=grid_size) - save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_depth.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size) - - #-------------------- - # # Log forward-conditioned images - - # forward_cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, torch.tensor([0, 0, 0.2], device=device), radius=2.7, device=device) - # intrinsics = torch.tensor([[4.2647, 0, 0.5], [0, 4.2647, 0.5], [0, 0, 1]], device=device) - # forward_label = torch.cat([forward_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) - - # grid_ws = [G_ema.mapping(z, forward_label.expand(z.shape[0], -1)) for z, c in zip(grid_z, grid_c)] - # out = [G_ema.synthesis(ws, c=c, noise_mode='const') for ws, c in zip(grid_ws, grid_c)] - - # images = torch.cat([o['image'].cpu() for o in out]).numpy() - # images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy() - # images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy() - # save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_f.png'), drange=[-1,1], grid_size=grid_size) - # save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_raw_f.png'), drange=[-1,1], grid_size=grid_size) - # save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_depth_f.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size) - - #-------------------- - # # Log Cross sections - - # grid_ws = [G_ema.mapping(z, c.expand(z.shape[0], -1)) for z, c in zip(grid_z, grid_c)] - # out = [sample_cross_section(G_ema, ws, w=G.rendering_kwargs['box_warp']) for ws, c in zip(grid_ws, grid_c)] - # crossections = torch.cat([o.cpu() for o in out]).numpy() - # save_image_grid(crossections, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_crossection.png'), drange=[-50,100], grid_size=grid_size) - - # Save network snapshot. - snapshot_pkl = None - snapshot_data = None - if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0): - snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs)) - for name, module in [('G', G), ('D', D), ('G_ema', G_ema), ('augment_pipe', augment_pipe)]: - if module is not None: - if num_gpus > 1: - misc.check_ddp_consistency(module, ignore_regex=r'.*\.[^.]+_(avg|ema)') - module = copy.deepcopy(module).eval().requires_grad_(False).cpu() - snapshot_data[name] = module - del module # conserve memory - snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl') - if rank == 0: - with open(snapshot_pkl, 'wb') as f: - pickle.dump(snapshot_data, f) - - # Evaluate metrics. - if (snapshot_data is not None) and (len(metrics) > 0): - if rank == 0: - print(run_dir) - print('Evaluating metrics...') - for metric in metrics: - result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'], - dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device) - if rank == 0: - metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl) - stats_metrics.update(result_dict.results) - del snapshot_data # conserve memory - - # Collect statistics. - for phase in phases: - value = [] - if (phase.start_event is not None) and (phase.end_event is not None): - phase.end_event.synchronize() - value = phase.start_event.elapsed_time(phase.end_event) - training_stats.report0('Timing/' + phase.name, value) - stats_collector.update() - stats_dict = stats_collector.as_dict() - - # Update logs. - timestamp = time.time() - if stats_jsonl is not None: - fields = dict(stats_dict, timestamp=timestamp) - stats_jsonl.write(json.dumps(fields) + '\n') - stats_jsonl.flush() - if stats_tfevents is not None: - global_step = int(cur_nimg / 1e3) - walltime = timestamp - start_time - for name, value in stats_dict.items(): - stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime) - for name, value in stats_metrics.items(): - stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime) - stats_tfevents.flush() - if progress_fn is not None: - progress_fn(cur_nimg // 1000, total_kimg) - - # Update state. - cur_tick += 1 - tick_start_nimg = cur_nimg - tick_start_time = time.time() - maintenance_time = tick_start_time - tick_end_time - if done: - break - - # Done. - if rank == 0: - print() - print('Exiting...') - -#---------------------------------------------------------------------------- diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/configs/global_config.py b/spaces/gyugnsu/DragGan-Inversion/PTI/configs/global_config.py deleted file mode 100644 index bf3a20e61b0baf5e85377570cdf0f235bade21bd..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/configs/global_config.py +++ /dev/null @@ -1,12 +0,0 @@ -# Device -cuda_visible_devices = '0' -device = 'cuda:0' - -# Logs -training_step = 1 -image_rec_result_log_snapshot = 100 -pivotal_training_steps = 0 -model_snapshot_interval = 400 - -# Run name to be updated during PTI -run_name = '' diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/utils/train_boundary.py b/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/utils/train_boundary.py deleted file mode 100644 index 710d062bc4b42913fcc5b12bd545e47af00c7123..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/utils/train_boundary.py +++ /dev/null @@ -1,158 +0,0 @@ - -import numpy as np -from sklearn import svm - - - - - -def train_boundary(latent_codes, - scores, - chosen_num_or_ratio=0.02, - split_ratio=0.7, - invalid_value=None, - logger=None, - logger_name='train_boundary'): - """Trains boundary in latent space with offline predicted attribute scores. - - Given a collection of latent codes and the attribute scores predicted from the - corresponding images, this function will train a linear SVM by treating it as - a bi-classification problem. Basically, the samples with highest attribute - scores are treated as positive samples, while those with lowest scores as - negative. For now, the latent code can ONLY be with 1 dimension. - - NOTE: The returned boundary is with shape (1, latent_space_dim), and also - normalized with unit norm. - - Args: - latent_codes: Input latent codes as training data. - scores: Input attribute scores used to generate training labels. - chosen_num_or_ratio: How many samples will be chosen as positive (negative) - samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio * - latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio, - 0.5 * latent_codes_num)` will be used. (default: 0.02) - split_ratio: Ratio to split training and validation sets. (default: 0.7) - invalid_value: This field is used to filter out data. (default: None) - logger: Logger for recording log messages. If set as `None`, a default - logger, which prints messages from all levels to screen, will be created. - (default: None) - - Returns: - A decision boundary with type `numpy.ndarray`. - - Raises: - ValueError: If the input `latent_codes` or `scores` are with invalid format. - """ -# if not logger: -# logger = setup_logger(work_dir='', logger_name=logger_name) - - if (not isinstance(latent_codes, np.ndarray) or - not len(latent_codes.shape) == 2): - raise ValueError(f'Input `latent_codes` should be with type' - f'`numpy.ndarray`, and shape [num_samples, ' - f'latent_space_dim]!') - num_samples = latent_codes.shape[0] - latent_space_dim = latent_codes.shape[1] - if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or - not scores.shape[0] == num_samples or not scores.shape[1] == 1): - raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and ' - f'shape [num_samples, 1], where `num_samples` should be ' - f'exactly same as that of input `latent_codes`!') - if chosen_num_or_ratio <= 0: - raise ValueError(f'Input `chosen_num_or_ratio` should be positive, ' - f'but {chosen_num_or_ratio} received!') - -# logger.info(f'Filtering training data.') - print('Filtering training data.') - if invalid_value is not None: - latent_codes = latent_codes[scores[:, 0] != invalid_value] - scores = scores[scores[:, 0] != invalid_value] - -# logger.info(f'Sorting scores to get positive and negative samples.') - print('Sorting scores to get positive and negative samples.') - - sorted_idx = np.argsort(scores, axis=0)[::-1, 0] - latent_codes = latent_codes[sorted_idx] - scores = scores[sorted_idx] - num_samples = latent_codes.shape[0] - if 0 < chosen_num_or_ratio <= 1: - chosen_num = int(num_samples * chosen_num_or_ratio) - else: - chosen_num = int(chosen_num_or_ratio) - chosen_num = min(chosen_num, num_samples // 2) - -# logger.info(f'Spliting training and validation sets:') - print('Filtering training data.') - - train_num = int(chosen_num * split_ratio) - val_num = chosen_num - train_num - # Positive samples. - positive_idx = np.arange(chosen_num) - np.random.shuffle(positive_idx) - positive_train = latent_codes[:chosen_num][positive_idx[:train_num]] - positive_val = latent_codes[:chosen_num][positive_idx[train_num:]] - # Negative samples. - negative_idx = np.arange(chosen_num) - np.random.shuffle(negative_idx) - negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]] - negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]] - # Training set. - train_data = np.concatenate([positive_train, negative_train], axis=0) - train_label = np.concatenate([np.ones(train_num, dtype=np.int), - np.zeros(train_num, dtype=np.int)], axis=0) -# logger.info(f' Training: {train_num} positive, {train_num} negative.') - print(f' Training: {train_num} positive, {train_num} negative.') - # Validation set. - val_data = np.concatenate([positive_val, negative_val], axis=0) - val_label = np.concatenate([np.ones(val_num, dtype=np.int), - np.zeros(val_num, dtype=np.int)], axis=0) -# logger.info(f' Validation: {val_num} positive, {val_num} negative.') - print(f' Validation: {val_num} positive, {val_num} negative.') - - # Remaining set. - remaining_num = num_samples - chosen_num * 2 - remaining_data = latent_codes[chosen_num:-chosen_num] - remaining_scores = scores[chosen_num:-chosen_num] - decision_value = (scores[0] + scores[-1]) / 2 - remaining_label = np.ones(remaining_num, dtype=np.int) - remaining_label[remaining_scores.ravel() < decision_value] = 0 - remaining_positive_num = np.sum(remaining_label == 1) - remaining_negative_num = np.sum(remaining_label == 0) -# logger.info(f' Remaining: {remaining_positive_num} positive, ' -# f'{remaining_negative_num} negative.') - print(f' Remaining: {remaining_positive_num} positive, ' - f'{remaining_negative_num} negative.') -# logger.info(f'Training boundary.') - print(f'Training boundary.') - - clf = svm.SVC(kernel='linear') - classifier = clf.fit(train_data, train_label) -# logger.info(f'Finish training.') - print(f'Finish training.') - - - if val_num: - val_prediction = classifier.predict(val_data) - correct_num = np.sum(val_label == val_prediction) -# logger.info(f'Accuracy for validation set: ' -# f'{correct_num} / {val_num * 2} = ' -# f'{correct_num / (val_num * 2):.6f}') - print(f'Accuracy for validation set: ' - f'{correct_num} / {val_num * 2} = ' - f'{correct_num / (val_num * 2):.6f}') - vacc=correct_num/len(val_label) - ''' - if remaining_num: - remaining_prediction = classifier.predict(remaining_data) - correct_num = np.sum(remaining_label == remaining_prediction) - logger.info(f'Accuracy for remaining set: ' - f'{correct_num} / {remaining_num} = ' - f'{correct_num / remaining_num:.6f}') - ''' - a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32) - return a / np.linalg.norm(a),vacc - - - - - diff --git a/spaces/h2oai/wave-tour/examples/hash_routing_parameters.py b/spaces/h2oai/wave-tour/examples/hash_routing_parameters.py deleted file mode 100644 index c2329d1522623d956e6f1480ce26ceed075255e9..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/hash_routing_parameters.py +++ /dev/null @@ -1,67 +0,0 @@ -# Routing / Hash / Parameters -# Use the browser's [location hash](https://developer.mozilla.org/en-US/docs/Web/API/Location/hash) -# for #routing using URLs, with parameters. -# --- -from h2o_wave import main, app, Q, ui, on, handle_on - -air_passengers_fields = ['Year', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] -air_passengers_rows = [ - ['1949', '112', '118', '132', '129', '121', '135'], - ['1950', '115', '126', '141', '135', '125', '149'], - ['1951', '145', '150', '178', '163', '172', '178'], - ['1952', '171', '180', '193', '181', '183', '218'], - ['1953', '196', '196', '236', '235', '229', '243'], - ['1954', '204', '188', '235', '227', '234', '264'], - ['1955', '242', '233', '267', '269', '270', '315'], - ['1956', '284', '277', '317', '313', '318', '374'], - ['1957', '315', '301', '356', '348', '355', '422'], - ['1958', '340', '318', '362', '348', '363', '435'], - ['1959', '360', '342', '406', '396', '420', '472'], - ['1960', '417', '391', '419', '461', '472', '535'], -] - - -def make_markdown_row(values): - return f"| {' | '.join([str(x) for x in values])} |" - - -def make_markdown_table(fields, rows): - return '\n'.join([ - make_markdown_row(fields), - make_markdown_row('-' * len(fields)), - '\n'.join([make_markdown_row(row) for row in rows]), - ]) - - -def add_links_to_cells(rows): - return [[f'[{cell}](#row{i + 1}/col{j + 1})' for j, cell in enumerate(row)] for i, row in enumerate(rows)] - - -@on('#row{row:int}/col{col:int}') -async def print_clicked_cell(q: Q, row: int, col: int): - q.page['message'].content = f'You clicked on row {row}, column {col}!' - - -@app('/demo') -async def serve(q: Q): - if not q.client.initialized: - q.client.initialized = True - q.page['table'] = ui.form_card( - box='1 1 4 7', - items=[ - ui.text_l('Airline Passenger Counts'), - ui.text(make_markdown_table( - fields=air_passengers_fields, - rows=add_links_to_cells(air_passengers_rows), - )), - ], - ) - q.page['message'] = ui.markdown_card( - box='1 8 4 1', - title='', - content='Click on a cell in the table above!', - ) - - await handle_on(q) - - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/label.py b/spaces/h2oai/wave-tour/examples/label.py deleted file mode 100644 index 77e80ce46f3c1a54916e974b79dc0eed57270982..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/label.py +++ /dev/null @@ -1,17 +0,0 @@ -# Form / Label -# Use labels to give a name to a component or a group of components in a #form. -# #label -# --- -from h2o_wave import site, ui - -page = site['/demo'] - -page['example'] = ui.form_card( - box='1 1 4 7', - items=[ - ui.label(label='Standard Label'), - ui.label(label='Required Label', required=True), - ui.label(label='Disabled Label', disabled=True), - ] -) -page.save() diff --git a/spaces/h2oai/wave-tour/examples/meta_script.py b/spaces/h2oai/wave-tour/examples/meta_script.py deleted file mode 100644 index cde010d4925764da0d76ae5085d9396b059c4606..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/meta_script.py +++ /dev/null @@ -1,72 +0,0 @@ -# Meta / Script -# Load external Javascript libraries. -# --- - -from h2o_wave import site, ui - -# This example displays animated text using using anime.js (https://animejs.com/). -# Original example by Tobias Ahlin https://tobiasahlin.com/moving-letters/#2 - -page = site['/demo'] - -html = ''' - - -

Moving Letters!

-''' - -script = ''' -// Wrap every letter in a span -var textWrapper = document.querySelector('.anim'); -textWrapper.innerHTML = textWrapper.textContent.replace(/\S/g, "$&"); - -anime.timeline({loop: true}) - .add({ - targets: '.anim .letter', - scale: [4,1], - opacity: [0,1], - translateZ: 0, - easing: "easeOutExpo", - duration: 950, - delay: (el, i) => 70*i - }).add({ - targets: '.anim', - opacity: 0, - duration: 1000, - easing: "easeOutExpo", - delay: 1000 - }); -''' - -# Add a placeholder for the animation. -page['example'] = ui.markup_card( - box='1 1 6 8', - title='Animation', - content=html, -) - -# Add the script to the page. -page['meta'] = ui.meta_card( - box='', - # Load anime.js - scripts=[ui.script(path='https://cdnjs.cloudflare.com/ajax/libs/animejs/2.0.2/anime.min.js')], - script=ui.inline_script( - # The Javascript code for this script. - content=script, - # Execute this script only if the 'anime' library is available. - requires=['anime'], - # Execute this script only if the 'animation' element is available. - targets=['animation'], - )) - -page.save() diff --git a/spaces/haakohu/deep_privacy2/configs/datasets/utils.py b/spaces/haakohu/deep_privacy2/configs/datasets/utils.py deleted file mode 100644 index 6704b12a5e379b707b2c6d3dc9f78431ce01e61d..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/configs/datasets/utils.py +++ /dev/null @@ -1,21 +0,0 @@ -from dp2.metrics.ppl import calculate_ppl -from dp2.metrics.torch_metrics import compute_metrics_iteratively -from dp2.metrics.fid_clip import compute_fid_clip - - -def final_eval_fn(*args, **kwargs): - result = compute_metrics_iteratively(*args, **kwargs) - result2 = calculate_ppl(*args, **kwargs,) - result3 = compute_fid_clip(*args, **kwargs) - assert all(key not in result for key in result2) - result.update(result2) - result.update(result3) - return result - - -def train_eval_fn(*args, **kwargs): - result = compute_metrics_iteratively(*args, **kwargs) - result2 = compute_fid_clip(*args, **kwargs) - assert all(key not in result for key in result2) - result.update(result2) - return result \ No newline at end of file diff --git a/spaces/hadisalman/photoguard/app.py b/spaces/hadisalman/photoguard/app.py deleted file mode 100644 index c259121d814a5fe8a3a168bb8dfa272cbe960cb9..0000000000000000000000000000000000000000 --- a/spaces/hadisalman/photoguard/app.py +++ /dev/null @@ -1,192 +0,0 @@ -from io import BytesIO -import requests -import gradio as gr -import requests -import torch -from tqdm import tqdm -from PIL import Image, ImageOps -from diffusers import StableDiffusionInpaintPipeline -from torchvision.transforms import ToPILImage -from utils import preprocess, prepare_mask_and_masked_image, recover_image, resize_and_crop - -gr.close_all() -topil = ToPILImage() - -pipe_inpaint = StableDiffusionInpaintPipeline.from_pretrained( - "runwayml/stable-diffusion-inpainting", - revision="fp16", - torch_dtype=torch.float16, -) -pipe_inpaint = pipe_inpaint.to("cuda") - -## Good params for editing that we used all over the paper --> decent quality and speed -GUIDANCE_SCALE = 7.5 -NUM_INFERENCE_STEPS = 100 -DEFAULT_SEED = 1234 - -def pgd(X, targets, model, criterion, eps=0.1, step_size=0.015, iters=40, clamp_min=0, clamp_max=1, mask=None): - X_adv = X.clone().detach() + (torch.rand(*X.shape)*2*eps-eps).cuda() - pbar = tqdm(range(iters)) - for i in pbar: - actual_step_size = step_size - (step_size - step_size / 100) / iters * i - X_adv.requires_grad_(True) - - loss = (model(X_adv).latent_dist.mean - targets).norm() - pbar.set_description(f"Loss {loss.item():.5f} | step size: {actual_step_size:.4}") - - grad, = torch.autograd.grad(loss, [X_adv]) - - X_adv = X_adv - grad.detach().sign() * actual_step_size - X_adv = torch.minimum(torch.maximum(X_adv, X - eps), X + eps) - X_adv.data = torch.clamp(X_adv, min=clamp_min, max=clamp_max) - X_adv.grad = None - - if mask is not None: - X_adv.data *= mask - - return X_adv - -def get_target(): - target_url = 'https://www.rtings.com/images/test-materials/2015/204_Gray_Uniformity.png' - response = requests.get(target_url) - target_image = Image.open(BytesIO(response.content)).convert("RGB") - target_image = target_image.resize((512, 512)) - return target_image - -def immunize_fn(init_image, mask_image): - with torch.autocast('cuda'): - mask, X = prepare_mask_and_masked_image(init_image, mask_image) - X = X.half().cuda() - mask = mask.half().cuda() - - targets = pipe_inpaint.vae.encode(preprocess(get_target()).half().cuda()).latent_dist.mean - - adv_X = pgd(X, - targets = targets, - model=pipe_inpaint.vae.encode, - criterion=torch.nn.MSELoss(), - clamp_min=-1, - clamp_max=1, - eps=0.12, - step_size=0.01, - iters=200, - mask=1-mask - ) - - adv_X = (adv_X / 2 + 0.5).clamp(0, 1) - - adv_image = topil(adv_X[0]).convert("RGB") - adv_image = recover_image(adv_image, init_image, mask_image, background=True) - return adv_image - -def run(image, prompt, seed, guidance_scale, num_inference_steps, immunize=False): - if seed == '': - seed = DEFAULT_SEED - else: - seed = int(seed) - torch.manual_seed(seed) - - init_image = Image.fromarray(image['image']) - init_image = resize_and_crop(init_image, (512,512)) - mask_image = ImageOps.invert(Image.fromarray(image['mask']).convert('RGB')) - mask_image = resize_and_crop(mask_image, init_image.size) - - if immunize: - immunized_image = immunize_fn(init_image, mask_image) - - image_edited = pipe_inpaint(prompt=prompt, - image=init_image if not immunize else immunized_image, - mask_image=mask_image, - height = init_image.size[0], - width = init_image.size[1], - eta=1, - guidance_scale=guidance_scale, - num_inference_steps=num_inference_steps, - ).images[0] - - image_edited = recover_image(image_edited, init_image, mask_image) - - if immunize: - return [(immunized_image, 'Immunized Image'), (image_edited, 'Edited After Immunization')] - else: - return [(image_edited, 'Edited Image (Without Immunization)')] - - -description='''Demo of our paper:
-**Raising the Cost of Malicious AI-Powered Image Editing**
-*[Hadi Salman](https://twitter.com/hadisalmanX), [Alaa Khaddaj](https://twitter.com/Alaa_Khaddaj), [Guillaume Leclerc](https://twitter.com/gpoleclerc), [Andrew Ilyas](https://twitter.com/andrew_ilyas), [Aleksander Madry](https://twitter.com/aleks_madry)*
-MIT   [Paper](https://arxiv.org/abs/2302.06588) -  [Blog post](https://gradientscience.org/photoguard/) -  [![](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/MadryLab/photoguard) -
-Below you can test our (encoder attack) immunization method for making images resistant to manipulation by Stable Diffusion. This immunization process forces the model to perform unrealistic edits. - -**See Section 5 in our paper for a discussion of the intended use cases for (as well as limitations of) this tool.** -
-''' - -examples_list = [ - ['./images/hadi_and_trevor.jpg', 'man attending a wedding', '329357', GUIDANCE_SCALE, NUM_INFERENCE_STEPS], - ['./images/trevor_2.jpg', 'two men in prison', '329357', GUIDANCE_SCALE, NUM_INFERENCE_STEPS], - ['./images/elon_2.jpg', 'man in a metro station', '214213', GUIDANCE_SCALE, NUM_INFERENCE_STEPS], - ] - - -with gr.Blocks() as demo: - gr.HTML(value="""

- Interactive Demo: Raising the Cost of Malicious AI-Powered Image Editing

- """) - gr.Markdown(description) - with gr.Accordion(label='How to use (step by step):', open=False): - gr.Markdown(''' - *First, let's edit your image:* - + Upload an image (or select from the examples below) - + Use the brush to mask the parts of the image you want to keep unedited (e.g., faces of people) - + Add a prompt to guide the edit (see examples below) - + Play with the seed and click submit until you get a realistic edit that you are happy with (we provided good example seeds for you below) - - *Now, let's immunize your image and try again:* - + Click on the "Immunize" button, then submit. - + You will get an immunized version of the image (which should look essentially identical to the original one) as well as its edited version (which should now look rather unrealistic) - ''') - - with gr.Accordion(label='Example (video):', open=False): - gr.HTML(''' -
- -
- ''' - ) - - with gr.Row(): - with gr.Column(): - imgmask = gr.ImageMask(label='Drawing tool to mask regions you want to keep, e.g. faces') - prompt = gr.Textbox(label='Prompt', placeholder='A photo of a man in a wedding') - seed = gr.Textbox(label='Seed (change to get different edits)', placeholder=str(DEFAULT_SEED), visible=True) - with gr.Accordion("Advanced options (to improve quality of edits)", open=False): - scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=25.0, value=GUIDANCE_SCALE, step=0.1) - num_steps = gr.Slider(label="Number of inference steps (higher better, but slower)", minimum=10, maximum=250, value=NUM_INFERENCE_STEPS, step=5) - immunize = gr.Checkbox(label='Immunize', value=False) - b1 = gr.Button('Submit') - with gr.Column(): - genimages = gr.Gallery(label="Generated images", - show_label=False, - elem_id="gallery").style(grid=[1,2], height="auto") - duplicate = gr.HTML(""" -

For faster inference without waiting in queue, run this demo locally (instruction in our Github repo), or duplicate this space and upgrade to GPU in settings. -
- - - - Duplicate Space -

- """) - - b1.click(run, [imgmask, prompt, seed, scale, num_steps, immunize], [genimages]) - examples = gr.Examples(examples=examples_list,inputs = [imgmask, prompt, seed, scale, num_steps, immunize], outputs=[genimages], cache_examples=False, fn=run) - - -demo.launch() -# demo.launch(server_name='0.0.0.0', share=False, server_port=7860, inline=False) \ No newline at end of file diff --git a/spaces/haouarin/pdftotext/README.md b/spaces/haouarin/pdftotext/README.md deleted file mode 100644 index d01914abf9b4e0a92ac7e99c9a71ed37e1f739f6..0000000000000000000000000000000000000000 --- a/spaces/haouarin/pdftotext/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Pdftotext -emoji: 📊 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/cppipc/queue.h b/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/cppipc/queue.h deleted file mode 100644 index a21f3446e06b5826af7b554c8a7d9c5d80848b62..0000000000000000000000000000000000000000 --- a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/cppipc/queue.h +++ /dev/null @@ -1,216 +0,0 @@ -#pragma once - -#include -#include -#include // [[since C++14]]: std::exchange -#include -#include -#include -#include -#include -#include -#include // assert - -#include "libipc/def.h" -#include "libipc/shm.h" -#include "libipc/rw_lock.h" - -#include "libipc/utility/log.h" -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" - -namespace ipc { -namespace detail { - -class queue_conn { -protected: - circ::cc_t connected_ = 0; - shm::handle elems_h_; - - template - Elems* open(char const * name) { - if (name == nullptr || name[0] == '\0') { - ipc::error("fail open waiter: name is empty!\n"); - return nullptr; - } - if (!elems_h_.acquire(name, sizeof(Elems))) { - return nullptr; - } - auto elems = static_cast(elems_h_.get()); - if (elems == nullptr) { - ipc::error("fail acquire elems: %s\n", name); - return nullptr; - } - elems->init(); - return elems; - } - - void close() { - elems_h_.release(); - } - -public: - queue_conn() = default; - queue_conn(const queue_conn&) = delete; - queue_conn& operator=(const queue_conn&) = delete; - - bool connected() const noexcept { - return connected_ != 0; - } - - circ::cc_t connected_id() const noexcept { - return connected_; - } - - template - auto connect(Elems* elems) noexcept - /*needs 'optional' here*/ - -> std::tuple().cursor())> { - if (elems == nullptr) return {}; - // if it's already connected, just return - if (connected()) return {connected(), false, 0}; - connected_ = elems->connect_receiver(); - return {connected(), true, elems->cursor()}; - } - - template - bool disconnect(Elems* elems) noexcept { - if (elems == nullptr) return false; - // if it's already disconnected, just return false - if (!connected()) return false; - elems->disconnect_receiver(std::exchange(connected_, 0)); - return true; - } -}; - -template -class queue_base : public queue_conn { - using base_t = queue_conn; - -public: - using elems_t = Elems; - using policy_t = typename elems_t::policy_t; - -protected: - elems_t * elems_ = nullptr; - decltype(std::declval().cursor()) cursor_ = 0; - bool sender_flag_ = false; - -public: - using base_t::base_t; - - queue_base() = default; - - explicit queue_base(char const * name) - : queue_base{} { - elems_ = open(name); - } - - explicit queue_base(elems_t * elems) noexcept - : queue_base{} { - assert(elems != nullptr); - elems_ = elems; - } - - /* not virtual */ ~queue_base() { - base_t::close(); - } - - elems_t * elems() noexcept { return elems_; } - elems_t const * elems() const noexcept { return elems_; } - - bool ready_sending() noexcept { - if (elems_ == nullptr) return false; - return sender_flag_ || (sender_flag_ = elems_->connect_sender()); - } - - void shut_sending() noexcept { - if (elems_ == nullptr) return; - if (!sender_flag_) return; - elems_->disconnect_sender(); - } - - bool connect() noexcept { - auto tp = base_t::connect(elems_); - if (std::get<0>(tp) && std::get<1>(tp)) { - cursor_ = std::get<2>(tp); - return true; - } - return std::get<0>(tp); - } - - bool disconnect() noexcept { - return base_t::disconnect(elems_); - } - - std::size_t conn_count() const noexcept { - return (elems_ == nullptr) ? static_cast(invalid_value) : elems_->conn_count(); - } - - bool valid() const noexcept { - return elems_ != nullptr; - } - - bool empty() const noexcept { - return !valid() || (cursor_ == elems_->cursor()); - } - - template - bool push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

(params)...); - }); - } - - template - bool force_push(F&& prep, P&&... params) { - if (elems_ == nullptr) return false; - return elems_->force_push(this, [&](void* p) { - if (prep(p)) ::new (p) T(std::forward

(params)...); - }); - } - - template - bool pop(T& item, F&& out) { - if (elems_ == nullptr) { - return false; - } - return elems_->pop(this, &(this->cursor_), [&item](void* p) { - ::new (&item) T(std::move(*static_cast(p))); - }, std::forward(out)); - } -}; - -} // namespace detail - -template -class queue final : public detail::queue_base> { - using base_t = detail::queue_base>; - -public: - using value_t = T; - - using base_t::base_t; - - template - bool push(P&&... params) { - return base_t::template push(std::forward

(params)...); - } - - template - bool force_push(P&&... params) { - return base_t::template force_push(std::forward

(params)...); - } - - bool pop(T& item) { - return base_t::pop(item, [](bool) {}); - } - - template - bool pop(T& item, F&& out) { - return base_t::pop(item, std::forward(out)); - } -}; - -} // namespace ipc diff --git "a/spaces/hbestm/gpt-academic-play/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" "b/spaces/hbestm/gpt-academic-play/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" deleted file mode 100644 index 62f05d395bafab8638ed6963e2d24334d95ecf37..0000000000000000000000000000000000000000 --- "a/spaces/hbestm/gpt-academic-play/crazy_functions/\346\200\273\347\273\223\351\237\263\350\247\206\351\242\221.py" +++ /dev/null @@ -1,184 +0,0 @@ -from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file, get_conf -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - -def split_audio_file(filename, split_duration=1000): - """ - 根据给定的切割时长将音频文件切割成多个片段。 - - Args: - filename (str): 需要被切割的音频文件名。 - split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。 - - Returns: - filelist (list): 一个包含所有切割音频片段文件路径的列表。 - - """ - from moviepy.editor import AudioFileClip - import os - os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹 - - # 读取音频文件 - audio = AudioFileClip(filename) - - # 计算文件总时长和切割点 - total_duration = audio.duration - split_points = list(range(0, int(total_duration), split_duration)) - split_points.append(int(total_duration)) - filelist = [] - - # 切割音频文件 - for i in range(len(split_points) - 1): - start_time = split_points[i] - end_time = split_points[i + 1] - split_audio = audio.subclip(start_time, end_time) - split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") - filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") - - audio.close() - return filelist - -def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history): - import os, requests - from moviepy.editor import AudioFileClip - from request_llm.bridge_all import model_info - - # 设置OpenAI密钥和模型 - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - - whisper_endpoint = chat_endpoint.replace('chat/completions', 'audio/transcriptions') - url = whisper_endpoint - headers = { - 'Authorization': f"Bearer {api_key}" - } - - os.makedirs('gpt_log/mp3/', exist_ok=True) - for index, fp in enumerate(file_manifest): - audio_history = [] - # 提取文件扩展名 - ext = os.path.splitext(fp)[1] - # 提取视频中的音频 - if ext not in [".mp3", ".wav", ".m4a", ".mpga"]: - audio_clip = AudioFileClip(fp) - audio_clip.write_audiofile(f'gpt_log/mp3/output{index}.mp3') - fp = f'gpt_log/mp3/output{index}.mp3' - # 调用whisper模型音频转文字 - voice = split_audio_file(fp) - for j, i in enumerate(voice): - with open(i, 'rb') as f: - file_content = f.read() # 读取文件内容到内存 - files = { - 'file': (os.path.basename(i), file_content), - } - data = { - "model": "whisper-1", - "prompt": parse_prompt, - 'response_format': "text" - } - - chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - proxies, = get_conf('proxies') - response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text - - chatbot.append(["音频解析结果", response]) - history.extend(["音频解析结果", response]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - i_say = f'请对下面的音频片段做概述,音频内容是 ```{response}```' - i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt=f"总结音频。音频文件名{fp}" - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.extend([i_say_show_user, gpt_say]) - audio_history.extend([i_say_show_user, gpt_say]) - - # 已经对该文章的所有片段总结完毕,如果文章被切分了 - result = "".join(audio_history) - if len(audio_history) > 1: - i_say = f"根据以上的对话,使用中文总结音频“{result}”的主要内容。" - i_say_show_user = f'第{index + 1}段音频的主要内容:' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=audio_history, - sys_prompt="总结文章。" - ) - - history.extend([i_say, gpt_say]) - audio_history.extend([i_say, gpt_say]) - - res = write_results_to_file(history) - chatbot.append((f"第{index + 1}段音频完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 删除中间文件夹 - import shutil - shutil.rmtree('gpt_log/mp3') - res = write_results_to_file(history) - chatbot.append(("所有音频都总结完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) - - -@CatchException -def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - try: - from moviepy.editor import AudioFileClip - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - extensions = ['.mp4', '.m4a', '.wav', '.mpga', '.mpeg', '.mp3', '.avi', '.mkv', '.flac', '.aac'] - - if txt.endswith(tuple(extensions)): - file_manifest = [txt] - else: - file_manifest = [] - for extension in extensions: - file_manifest.extend(glob.glob(f'{project_folder}/**/*{extension}', recursive=True)) - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - parse_prompt = plugin_kwargs.get("advanced_arg", '将音频解析为简体中文') - yield from AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history) - - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/spaces/hekbobo/bingo/src/components/user-menu.tsx b/spaces/hekbobo/bingo/src/components/user-menu.tsx deleted file mode 100644 index 172194aaed1322ce8cf833610f9d2d8b55b23081..0000000000000000000000000000000000000000 --- a/spaces/hekbobo/bingo/src/components/user-menu.tsx +++ /dev/null @@ -1,76 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - return ( -

- - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - 设置用户 - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - 语音设置 - - - - -
版本信息 {pkg.version}
-
- - -
站点域名
-
copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
-
-
-
-
- ) -} diff --git a/spaces/hersia/youtube-video-transcription-with-whisper/app.py b/spaces/hersia/youtube-video-transcription-with-whisper/app.py deleted file mode 100644 index 680e53a7e09596ff97e073fa0510cf254cf21a0b..0000000000000000000000000000000000000000 --- a/spaces/hersia/youtube-video-transcription-with-whisper/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import whisper -from pytube import YouTube -from transformers import pipeline -import gradio as gr -import os - -model = whisper.load_model("base") -summarizer = pipeline("summarization") - -def get_audio(url): - yt = YouTube(url) - video = yt.streams.filter(only_audio=True).first() - out_file=video.download(output_path=".") - base, ext = os.path.splitext(out_file) - new_file = base+'.mp3' - os.rename(out_file, new_file) - a = new_file - return a - -def get_text(url): - result = model.transcribe(get_audio(url)) - return result['text'] - -def get_summary(url): - article = get_text(url) - b = summarizer(article) - b = b[0]['summary_text'] - return b - -with gr.Blocks() as demo: - gr.Markdown("

Youtube video transcription with OpenAI's Whisper

") - gr.Markdown("
Enter the link of any youtube video to get the transcription of the video and a summary of the video in the form of text.
") - with gr.Tab('Get the transcription of any Youtube video'): - with gr.Row(): - input_text_1 = gr.Textbox(placeholder='Enter the Youtube video URL', label='URL') - output_text_1 = gr.Textbox(placeholder='Transcription of the video', label='Transcription') - result_button_1 = gr.Button('Get Transcription') - with gr.Tab('Summary of Youtube video'): - with gr.Row(): - input_text = gr.Textbox(placeholder='Enter the Youtube video URL', label='URL') - output_text = gr.Textbox(placeholder='Summary text of the Youtube Video', label='Summary') - result_button = gr.Button('Get Summary') - - result_button.click(get_summary, inputs = input_text, outputs = output_text) - result_button_1.click(get_text, inputs = input_text_1, outputs = output_text_1) -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/huggingface/HuggingDiscussions/README.md b/spaces/huggingface/HuggingDiscussions/README.md deleted file mode 100644 index 831363a4e73f16b266e810afeb4a3fa787245edf..0000000000000000000000000000000000000000 --- a/spaces/huggingface/HuggingDiscussions/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: HuggingDiscussions -emoji: 🏢 -colorFrom: green -colorTo: green -sdk: static -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/hushell/pmf_with_gis/models/vision_transformer.py b/spaces/hushell/pmf_with_gis/models/vision_transformer.py deleted file mode 100644 index 90fda9263f034f4f318b8cdd2bc5ebd07175cd27..0000000000000000000000000000000000000000 --- a/spaces/hushell/pmf_with_gis/models/vision_transformer.py +++ /dev/null @@ -1,246 +0,0 @@ -import torch -import torch.nn as nn - -import math -from functools import partial -from .utils import trunc_normal_ - - -def drop_path(x, drop_prob: float = 0., training: bool = False): - if drop_prob == 0. or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize - output = x.div(keep_prob) * random_tensor - return output - - -class DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - """ - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - self.drop_prob = drop_prob - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training) - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x, attn - - -class Block(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x, return_attention=False): - y, attn = self.attn(self.norm1(x)) - if return_attention: - return attn - x = x + self.drop_path(y) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): - super().__init__() - num_patches = (img_size // patch_size) * (img_size // patch_size) - self.img_size = img_size - self.patch_size = patch_size - self.num_patches = num_patches - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - - def forward(self, x): - B, C, H, W = x.shape - x = self.proj(x).flatten(2).transpose(1, 2) - return x - - -class VisionTransformer(nn.Module): - """ Vision Transformer """ - def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, - num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., - drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs): - super().__init__() - self.num_features = self.embed_dim = embed_dim - - self.patch_embed = PatchEmbed( - img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) - num_patches = self.patch_embed.num_patches - - self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.blocks = nn.ModuleList([ - Block( - dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(depth)]) - self.norm = norm_layer(embed_dim) - - # Classifier head - self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() - - trunc_normal_(self.pos_embed, std=.02) - trunc_normal_(self.cls_token, std=.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - def interpolate_pos_encoding(self, x, w, h): - npatch = x.shape[1] - 1 - N = self.pos_embed.shape[1] - 1 - if npatch == N and w == h: - return self.pos_embed - class_pos_embed = self.pos_embed[:, 0] - patch_pos_embed = self.pos_embed[:, 1:] - dim = x.shape[-1] - w0 = w // self.patch_embed.patch_size - h0 = h // self.patch_embed.patch_size - # we add a small number to avoid floating point error in the interpolation - # see discussion at https://github.com/facebookresearch/dino/issues/8 - w0, h0 = w0 + 0.1, h0 + 0.1 - patch_pos_embed = nn.functional.interpolate( - patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), - scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), - mode='bicubic', - align_corners=False, - recompute_scale_factor=False - ) - assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] - patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) - return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) - - def prepare_tokens(self, x, ada_token=None): - B, nc, w, h = x.shape - x = self.patch_embed(x) # patch linear embedding - - # add the [CLS] token to the embed patch tokens - cls_tokens = self.cls_token.expand(B, -1, -1) - x = torch.cat((cls_tokens, x), dim=1) - - # add positional encoding to each token - x = x + self.interpolate_pos_encoding(x, w, h) - - if ada_token is not None: - ada_tokens = ada_token.expand(B, -1, -1) # B, p, d - x = torch.cat((x, ada_tokens), dim=1) - - return self.pos_drop(x) - - def forward(self, x, ada_token=None, use_patches=False): - x = self.prepare_tokens(x, ada_token) - for blk in self.blocks: - x = blk(x) - x = self.norm(x) - - if use_patches: - return x[:, 1:] - else: - return x[:, 0] - - def get_last_selfattention(self, x): - x = self.prepare_tokens(x) - for i, blk in enumerate(self.blocks): - if i < len(self.blocks) - 1: - x = blk(x) - else: - # return attention of the last block - return blk(x, return_attention=True) - - def get_intermediate_layers(self, x, n=1): - x = self.prepare_tokens(x) - # we return the output tokens from the `n` last blocks - output = [] - for i, blk in enumerate(self.blocks): - x = blk(x) - if len(self.blocks) - i <= n: - output.append(self.norm(x)) - return output - - -def vit_tiny(patch_size=16, **kwargs): - model = VisionTransformer( - patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, - qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) - return model - - -def vit_small(patch_size=16, **kwargs): - model = VisionTransformer( - patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, - qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) - return model - - -def vit_base(patch_size=16, **kwargs): - model = VisionTransformer( - patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, - qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) - return model diff --git a/spaces/hussain-shk/IndiSent/model_configs/__init__.py b/spaces/hussain-shk/IndiSent/model_configs/__init__.py deleted file mode 100644 index 2ec41f7daeb7930e9df766abdd790c4c5b09b6d9..0000000000000000000000000000000000000000 --- a/spaces/hussain-shk/IndiSent/model_configs/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import custom_transformer \ No newline at end of file diff --git a/spaces/iamstolas/STOLAS/src/pages/api/image.ts b/spaces/iamstolas/STOLAS/src/pages/api/image.ts deleted file mode 100644 index 4b894bea86050c0f3888cc56f60c0cb7f8b57cfc..0000000000000000000000000000000000000000 --- a/spaces/iamstolas/STOLAS/src/pages/api/image.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' -import { createImage } from '@/lib/bots/bing/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const { prompt, id } = req.query - if (!prompt) { - return res.json({ - result: { - value: 'Image', - message: 'No Prompt' - } - }) - } - try { - const headers = createHeaders(req.cookies, { - IMAGE_BING_COOKIE: process.env.IMAGE_BING_COOKIE - }) - - debug('headers', headers) - const response = await createImage(String(prompt), String(id), { - ...headers, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - }) - res.writeHead(200, { - 'Content-Type': 'text/plain; charset=UTF-8', - }) - return res.end(response) - } catch (e) { - return res.json({ - result: { - value: 'Error', - message: `${e}` - } - }) - } -} diff --git a/spaces/ilmhona/api/main.py b/spaces/ilmhona/api/main.py deleted file mode 100644 index 9296392aa5295b36e4b72bed1198b459d2dde767..0000000000000000000000000000000000000000 --- a/spaces/ilmhona/api/main.py +++ /dev/null @@ -1,218 +0,0 @@ -from fastapi import FastAPI -from pydantic import BaseModel, constr -import os -from fastapi.responses import JSONResponse -from openai.error import OpenAIError -from typing import List -from ml import DocumentLoader, AIEducator -from enum import Enum -import sqlite3 -from cachetools import cached, TTLCache -from fastapi.middleware.cors import CORSMiddleware -import json - -app = FastAPI() - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], # Allows all origins - allow_credentials=False, - allow_methods=["*"], # Allows all methods - allow_headers=["*"], # Allows all headers -) - -# Create a cache with a Time-To-Live (TTL) of 600 seconds (10 minutes) and a maximum of 1000 cached items -cache = TTLCache(maxsize=1000, ttl=600) - - -def create_database(db_path: str): - conn = sqlite3.connect(db_path) - cur = conn.cursor() - cur.execute( - """CREATE TABLE IF NOT EXISTS chat_histories ( - student_id INTEGER PRIMARY KEY, - chat_history TEXT NOT NULL - )""" - ) - conn.commit() - conn.close() - - -# Specify the path to your database -DB_PATH = "chat_histories.db" - -# Create the database and table at the start of your application -create_database(DB_PATH) - -openai_key = os.environ.get("OPENAI_API_KEY") -# csv_file_directory_path = os.environ.get("CSV_FILE_DIRECTORY_PATH") -csv_file_directory_path = "./data.csv" - -# Initialize DocumentLoader -document_loader = DocumentLoader(openai_key, csv_file_directory_path) - -# Initialize AIEducator -educator = AIEducator(document_loader, DB_PATH) - - -class SupportedLanguages(str, Enum): - """An enumeration of the supported languages for the API.""" - - arabic = "ar" - russian = "ru" - tajik = "tg" - turkish = "tr" - - -class ResponseModel(BaseModel): - """A model representing the response structure of the API.""" - - original_response: str - translated_response: str - - -class StudentProfile(BaseModel): - """A model representing the student's profile which includes their name, age, and learning preferences.""" - - name: constr(min_length=2) = "Qurbon" - age: int = 18 - education_level: str = "Highschool education" - learning_style: str = "Active learning style" - metadata: dict = {} - - -class EducatorRequest(BaseModel): - """ - A model representing the entire request structure which includes - the student's profile, the message to be processed, the conversation and the target language for the response. - """ - - student_id: int - student_profile: StudentProfile = StudentProfile( - metadata={"favorite_subject": "Math", "hobbies": ["Coding", "Reading"]} - ) - message: str = "What is HTML?" - target_language: SupportedLanguages = SupportedLanguages.tajik - - -@app.exception_handler(ValueError) -async def value_error_handler(request, exc: ValueError): - return JSONResponse(status_code=400, content={"message": str(exc)}) - - -@cached(cache) -@app.post("/educator_response", response_model=ResponseModel) -async def get_educator_response(request: EducatorRequest): - """ - This endpoint takes in a request with the student's profile, message, and target language, - and returns a response generated by the AI educator. - - - **student_profile**: The profile of the student including their name, age, and learning preferences. - - **message**: The message or question from the student. - - **target_language**: The language in which the response should be generated. - - Response - - **response**: The generated response from the AI educator. - - """ - try: - response = educator.predict( - student_id=request.student_id, - message=request.message, - student_profile=request.student_profile, - target_language=request.target_language, - ) - return ResponseModel( - original_response=response["original_response"], - translated_response=response["translated_response"], - ) - except OpenAIError as e: - return JSONResponse( - status_code=502, content={"message": "OpenAI API error: " + str(e)} - ) - except Exception as e: - return JSONResponse( - status_code=500, content={"message": "An unknown error occurred: " + str(e)} - ) - - -@app.get("/chat_history_stats/{student_id}") -async def chat_history_stats(student_id: int): - try: - conn = sqlite3.connect(DB_PATH) - cur = conn.cursor() - - # Get the chat history for the specified student ID - cur.execute( - "SELECT chat_history FROM chat_histories WHERE student_id = ?", - (student_id,), - ) - row = cur.fetchone() - - conn.close() - - if row: - chat_history = json.loads(row[0]) - chat_count = len(chat_history) - user_messages = sum(1 for msg in chat_history if msg["role"] == "user") - assistant_messages = sum( - 1 for msg in chat_history if msg["role"] == "assistant" - ) - return { - "chat_count": chat_count, - "user_messages": user_messages, - "assistant_messages": assistant_messages, - } - else: - return JSONResponse( - status_code=404, - content={"status": "error", "detail": "Student ID not found"}, - ) - except Exception as e: - return JSONResponse( - status_code=500, content={"status": "error", "detail": str(e)} - ) - - -@app.get("/") -async def root(): - return { - "message": "Welcome to ilmhona ML API", - "version": "0.0.1", - "documentation": "/docs", - "available_endpoints": [ - { - "path": "/educator_response", - "method": "POST", - "description": "Get a response from the AI educator", - }, - { - "path": "/health", - "method": "GET", - "description": "Check the API health status", - }, - { - "path": "/chat_history_stats/{student_id}", - "method": "GET", - "description": "Get chat history statistics for a specific student ID", - }, - ], - } - - -@app.get("/health") -async def health_check(): - try: - # Check database connectivity - conn = sqlite3.connect(DB_PATH) - cur = conn.cursor() - cur.execute("SELECT 1") - conn.close() - - # Add other health checks here as needed - - return {"status": "healthy"} - except Exception as e: - return JSONResponse( - status_code=500, content={"status": "unhealthy", "detail": str(e)} - ) diff --git a/spaces/ilumine-AI/Retro-to-3D/README.md b/spaces/ilumine-AI/Retro-to-3D/README.md deleted file mode 100644 index d1cc055a79b0035c60e7fc0082c8fdc8a4cc16c6..0000000000000000000000000000000000000000 --- a/spaces/ilumine-AI/Retro-to-3D/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Retro To 3D -emoji: 👾👾👾 -colorFrom: yellow -colorTo: red -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/innnky/soft-vits-singingvc/preprocess.py b/spaces/innnky/soft-vits-singingvc/preprocess.py deleted file mode 100644 index aaedbf076c30114b3ac6c27dfb42fd54ac81a71c..0000000000000000000000000000000000000000 --- a/spaces/innnky/soft-vits-singingvc/preprocess.py +++ /dev/null @@ -1,25 +0,0 @@ -import argparse -import text -from utils import load_filepaths_and_text - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--out_extension", default="cleaned") - parser.add_argument("--text_index", default=1, type=int) - parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"]) - parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"]) - - args = parser.parse_args() - - - for filelist in args.filelists: - print("START:", filelist) - filepaths_and_text = load_filepaths_and_text(filelist) - for i in range(len(filepaths_and_text)): - original_text = filepaths_and_text[i][args.text_index] - cleaned_text = text._clean_text(original_text, args.text_cleaners) - filepaths_and_text[i][args.text_index] = cleaned_text - - new_filelist = filelist + "." + args.out_extension - with open(new_filelist, "w", encoding="utf-8") as f: - f.writelines(["|".join(x) + "\n" for x in filepaths_and_text]) diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Download PS3 Emulator 1.9.6.rar 3.49 MB.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Download PS3 Emulator 1.9.6.rar 3.49 MB.md deleted file mode 100644 index 49ba0ad9217e2c7879b79a788404ad14ea4d7a78..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Download PS3 Emulator 1.9.6.rar 3.49 MB.md +++ /dev/null @@ -1,6 +0,0 @@ -

Download PS3 Emulator 1.9.6.rar 3.49 MB


Download File · https://urlin.us/2uEx37



-
-Download PS3 Emulator 1.9.6.rar 3.49 MB-adds >> tlniurl.com/1ny3s6. 293 People Used More Information ››. Visit Site › ... 1fdad05405
-
-
-

diff --git a/spaces/inreVtussa/clothingai/Examples/Asamardhuni Jeeva Yatra Pdf !!LINK!! Download.md b/spaces/inreVtussa/clothingai/Examples/Asamardhuni Jeeva Yatra Pdf !!LINK!! Download.md deleted file mode 100644 index d795a87798d1dcbf4d350c2139e27488717cd7b2..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Asamardhuni Jeeva Yatra Pdf !!LINK!! Download.md +++ /dev/null @@ -1,14 +0,0 @@ - -

Asamardhuni Jeeva Yatra Pdf Download

-

Asamardhuni Jeeva Yatra is a classic Telugu novel written by Tripuraneni Gopichand in 1947. It is considered as the first psychological novel in Telugu literature, using stream of consciousness technique to explore the inner world of the protagonist, a frustrated and unsuccessful man who struggles with his identity and purpose in life.

-

Asamardhuni Jeeva Yatra Pdf Download


Download Ziphttps://tiurll.com/2uClHZ



-

The novel is divided into three parts: the first part depicts the childhood and youth of the protagonist, Ramaraju; the second part shows his married life and his affair with another woman; and the third part reveals his mental breakdown and suicide attempt. The novel is a critique of the social and moral values of the time, as well as a reflection of the author's own existential crisis.

-

If you want to read this novel online or download it as a pdf file, you can find it on various websites such as Internet Archive[^1^], Google Drive[^2^], or Goodreads[^3^]. However, you may need to sign in or create an account to access some of these sites. Alternatively, you can also buy a printed copy of the novel from online or offline bookstores.

Asamardhuni Jeeva Yatra is not only a novel, but also a literary masterpiece that influenced many Telugu writers and readers. It is regarded as one of the best works of Tripuraneni Gopichand, who was a lawyer, journalist, poet, and novelist. He was also a rationalist and a social reformer who advocated for women's rights, education, and scientific temper.

-

The novel has been translated into several languages such as English, Hindi, Tamil, and Kannada. It has also been adapted into a movie in 1963, directed by V. Madhusudan Rao and starring Akkineni Nageswara Rao as Ramaraju. The movie was well received by the critics and the audience alike.

-

If you are interested in reading Asamardhuni Jeeva Yatra, you can find it online or offline in various formats. However, you may also want to read some reviews or analyses of the novel to understand its themes and significance better. You can find some of them on websites such as Telugu Sahityam, Telugu Basha, or Telugu One.

-

Asamardhuni Jeeva Yatra is a novel that explores the human condition and the meaning of life. It is a novel that challenges the conventional notions of success and happiness, and exposes the hypocrisy and corruption of the society. It is a novel that portrays the psychological turmoil and the existential angst of a man who feels trapped and hopeless in his own life.

-

Asamardhuni Jeeva Yatra is a novel that you should read if you want to experience a different kind of Telugu literature. It is a novel that will make you think and feel deeply about yourself and the world around you. It is a novel that will leave a lasting impression on your mind and heart.

In conclusion, Asamardhuni Jeeva Yatra is a novel that deserves to be read and appreciated by anyone who loves Telugu literature. It is a novel that showcases the talent and vision of Tripuraneni Gopichand, who was a pioneer and a legend in Telugu literature. It is a novel that transcends time and space, and speaks to the universal human dilemmas and aspirations.

-

If you want to download Asamardhuni Jeeva Yatra as a pdf file, you can use the links provided in this article. However, you may also want to support the author and the publisher by buying a physical copy of the novel from a bookstore or an online platform. You can also share your thoughts and opinions about the novel with other readers on social media or online forums.

-

Thank you for reading this article. I hope you enjoyed it and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Happy reading!

d5da3c52bf
-
-
\ No newline at end of file diff --git a/spaces/iqovocn/ChuanhuChatGPT/modules/models/modeling_moss.py b/spaces/iqovocn/ChuanhuChatGPT/modules/models/modeling_moss.py deleted file mode 100644 index b7adea5bca857f7fdd6399dde7ce359f8f8cecfe..0000000000000000000000000000000000000000 --- a/spaces/iqovocn/ChuanhuChatGPT/modules/models/modeling_moss.py +++ /dev/null @@ -1,711 +0,0 @@ -""" PyTorch Moss model.""" - -from typing import Optional, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn -from torch.nn import CrossEntropyLoss - -from transformers.activations import ACT2FN -from transformers.modeling_utils import PreTrainedModel -from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast -from transformers.utils import ( - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging -) - -from .configuration_moss import MossConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "fnlp/moss-moon-003-base" -_CONFIG_FOR_DOC = "MossConfig" - - -MOSS_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "fnlp/moss-moon-003-base", - "fnlp/moss-moon-003-sft", - "fnlp/moss-moon-003-sft-plugin", -] - - -# Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions -def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: - inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim)) - sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq).float() - return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1) - - -# Copied from transformers.models.gptj.modeling_gptj.rotate_every_two -def rotate_every_two(x: torch.Tensor) -> torch.Tensor: - x1 = x[:, :, :, ::2] - x2 = x[:, :, :, 1::2] - x = torch.stack((-x2, x1), dim=-1) - return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)') - - -# Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb -def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor: - sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3) - cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3) - return (tensor * cos) + (rotate_every_two(tensor) * sin) - - -class MossAttention(nn.Module): - def __init__(self, config): - super().__init__() - - max_positions = config.max_position_embeddings - self.register_buffer( - "causal_mask", - torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( - 1, 1, max_positions, max_positions - ), - ) - - self.attn_dropout = nn.Dropout(config.attn_pdrop) - self.resid_dropout = nn.Dropout(config.resid_pdrop) - - self.embed_dim = config.hidden_size - self.num_attention_heads = config.num_attention_heads - self.head_dim = self.embed_dim // self.num_attention_heads - if self.head_dim * self.num_attention_heads != self.embed_dim: - raise ValueError( - f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and" - f" `num_attention_heads`: {self.num_attention_heads})." - ) - self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) - self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False) - - self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) - self.rotary_dim = config.rotary_dim - pos_embd_dim = self.rotary_dim or self.embed_dim - self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim) - - def _split_heads(self, x, n_head, dim_head, mp_num): - reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head)) - reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:]) - return reshaped - - def _merge_heads(self, tensor, num_attention_heads, attn_head_size): - """ - Merges attn_head_size dim and num_attn_heads dim into n_ctx - """ - if len(tensor.shape) == 5: - tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() - elif len(tensor.shape) == 4: - tensor = tensor.permute(0, 2, 1, 3).contiguous() - else: - raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}") - new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,) - return tensor.view(new_shape) - - def _attn( - self, - query, - key, - value, - attention_mask=None, - head_mask=None, - ): - # compute causal mask from causal mask buffer - query_length, key_length = query.size(-2), key.size(-2) - causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length] - - # Keep the attention weights computation in fp32 to avoid overflow issues - query = query.to(torch.float32) - key = key.to(torch.float32) - - attn_weights = torch.matmul(query, key.transpose(-1, -2)) - - attn_weights = attn_weights / self.scale_attn - mask_value = torch.finfo(attn_weights.dtype).min - # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. - # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` - mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) - attn_weights = torch.where(causal_mask, attn_weights, mask_value) - - if attention_mask is not None: - # Apply the attention mask - attn_weights = attn_weights + attention_mask - - attn_weights = nn.Softmax(dim=-1)(attn_weights) - attn_weights = attn_weights.to(value.dtype) - attn_weights = self.attn_dropout(attn_weights) - - # Mask heads if we want to - if head_mask is not None: - attn_weights = attn_weights * head_mask - - attn_output = torch.matmul(attn_weights, value) - - return attn_output, attn_weights - - def forward( - self, - hidden_states: Optional[torch.FloatTensor], - layer_past: Optional[Tuple[torch.Tensor]] = None, - attention_mask: Optional[torch.FloatTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = False, - output_attentions: Optional[bool] = False, - ) -> Union[ - Tuple[torch.Tensor, Tuple[torch.Tensor]], - Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]], - ]: - qkv = self.qkv_proj(hidden_states) - # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic - mp_num = 4 - qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1)) - - local_dim = self.head_dim * self.num_attention_heads // mp_num - query, value, key = torch.split(qkv_split, local_dim, dim=-1) - query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num) - key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num) - - value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num) - value = value.permute(0, 2, 1, 3) - - embed_positions = self.embed_positions - if embed_positions.device != position_ids.device: - embed_positions = embed_positions.to(position_ids.device) - self.embed_positions = embed_positions - - sincos = embed_positions[position_ids] - sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) - - if self.rotary_dim is not None: - k_rot = key[:, :, :, : self.rotary_dim] - k_pass = key[:, :, :, self.rotary_dim :] - - q_rot = query[:, :, :, : self.rotary_dim] - q_pass = query[:, :, :, self.rotary_dim :] - - k_rot = apply_rotary_pos_emb(k_rot, sin, cos) - q_rot = apply_rotary_pos_emb(q_rot, sin, cos) - - key = torch.cat([k_rot, k_pass], dim=-1) - query = torch.cat([q_rot, q_pass], dim=-1) - else: - key = apply_rotary_pos_emb(key, sin, cos) - query = apply_rotary_pos_emb(query, sin, cos) - - key = key.permute(0, 2, 1, 3) - query = query.permute(0, 2, 1, 3) - - if layer_past is not None: - past_key = layer_past[0] - past_value = layer_past[1] - key = torch.cat((past_key, key), dim=-2) - value = torch.cat((past_value, value), dim=-2) - - if use_cache is True: - present = (key, value) - else: - present = None - - # compute self-attention: V x Softmax(QK^T) - attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) - - attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim) - attn_output = self.out_proj(attn_output) - attn_output = self.resid_dropout(attn_output) - - outputs = (attn_output, present) - if output_attentions: - outputs += (attn_weights,) - - return outputs # a, present, (attentions) - - -# Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->Moss -class MossMLP(nn.Module): - def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim - super().__init__() - embed_dim = config.n_embd - - self.fc_in = nn.Linear(embed_dim, intermediate_size) - self.fc_out = nn.Linear(intermediate_size, embed_dim) - - self.act = ACT2FN[config.activation_function] - self.dropout = nn.Dropout(config.resid_pdrop) - - def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: - hidden_states = self.fc_in(hidden_states) - hidden_states = self.act(hidden_states) - hidden_states = self.fc_out(hidden_states) - hidden_states = self.dropout(hidden_states) - return hidden_states - - -# Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->Moss -class MossBlock(nn.Module): - def __init__(self, config): - super().__init__() - inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd - self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) - self.attn = MossAttention(config) - self.mlp = MossMLP(inner_dim, config) - - def forward( - self, - hidden_states: Optional[torch.FloatTensor], - layer_past: Optional[Tuple[torch.Tensor]] = None, - attention_mask: Optional[torch.FloatTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = False, - output_attentions: Optional[bool] = False, - ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: - residual = hidden_states - hidden_states = self.ln_1(hidden_states) - attn_outputs = self.attn( - hidden_states=hidden_states, - layer_past=layer_past, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask, - use_cache=use_cache, - output_attentions=output_attentions, - ) - attn_output = attn_outputs[0] # output_attn: a, present, (attentions) - outputs = attn_outputs[1:] - - feed_forward_hidden_states = self.mlp(hidden_states) - hidden_states = attn_output + feed_forward_hidden_states + residual - - if use_cache: - outputs = (hidden_states,) + outputs - else: - outputs = (hidden_states,) + outputs[1:] - - return outputs # hidden_states, present, (attentions) - - -class MossPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = MossConfig - base_model_prefix = "transformer" - supports_gradient_checkpointing = True - _no_split_modules = ["MossBlock"] - - def __init__(self, *inputs, **kwargs): - super().__init__(*inputs, **kwargs) - - def _init_weights(self, module): - """Initialize the weights.""" - if isinstance(module, (nn.Linear,)): - # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, MossModel): - module.gradient_checkpointing = value - - -MOSS_START_DOCSTRING = r""" - This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use - it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and - behavior. - - Parameters: - config ([`MossConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -MOSS_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.n_positions - 1]`. - - [What are position IDs?](../glossary#position-ids) - head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert *input_ids* indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare Moss Model transformer outputting raw hidden-states without any specific head on top.", - MOSS_START_DOCSTRING, -) -class MossModel(MossPreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.embed_dim = config.n_embd - self.vocab_size = config.vocab_size - self.wte = nn.Embedding(config.vocab_size, self.embed_dim) - self.drop = nn.Dropout(config.embd_pdrop) - self.h = nn.ModuleList([MossBlock(config) for _ in range(config.n_layer)]) - self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) - self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads) - - self.gradient_checkpointing = False - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.wte - - def set_input_embeddings(self, new_embeddings): - self.wte = new_embeddings - - @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutputWithPast, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPast]: - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - batch_size = input_ids.shape[0] - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - batch_size = inputs_embeds.shape[0] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - device = input_ids.device if input_ids is not None else inputs_embeds.device - - if token_type_ids is not None: - token_type_ids = token_type_ids.view(-1, input_shape[-1]) - - if position_ids is not None: - position_ids = position_ids.view(-1, input_shape[-1]).long() - - if past_key_values is None: - past_length = 0 - past_key_values = tuple([None] * len(self.h)) - else: - past_length = past_key_values[0][0].size(-2) - - if position_ids is None: - position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) - position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) - - # Attention mask. - if attention_mask is not None: - if batch_size <= 0: - raise ValueError("batch_size has to be defined and > 0") - attention_mask = attention_mask.view(batch_size, -1) - # We create a 3D attention mask from a 2D tensor mask. - # Sizes are [batch_size, 1, 1, to_seq_length] - # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] - # this attention mask is more simple than the triangular masking of causal attention - # used in OpenAI GPT, we just need to prepare the broadcast dimension here. - attention_mask = attention_mask[:, None, None, :] - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and the dtype's smallest value for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility - attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x num_attention_heads x N x N - # head_mask has shape n_layer x batch x num_attention_heads x N x N - head_mask = self.get_head_mask(head_mask, self.config.n_layer) - - if inputs_embeds is None: - inputs_embeds = self.wte(input_ids) - - hidden_states = inputs_embeds - - if token_type_ids is not None: - token_type_embeds = self.wte(token_type_ids) - hidden_states = hidden_states + token_type_embeds - - hidden_states = self.drop(hidden_states) - - output_shape = input_shape + (hidden_states.size(-1),) - - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " - "`use_cache=False`..." - ) - use_cache = False - - presents = () if use_cache else None - all_self_attentions = () if output_attentions else None - all_hidden_states = () if output_hidden_states else None - for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - # None for past_key_value - return module(*inputs, use_cache, output_attentions) - - return custom_forward - - outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(block), - hidden_states, - None, - attention_mask, - position_ids, - head_mask[i], - ) - else: - outputs = block( - hidden_states=hidden_states, - layer_past=layer_past, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask[i], - use_cache=use_cache, - output_attentions=output_attentions, - ) - - hidden_states = outputs[0] - if use_cache is True: - presents = presents + (outputs[1],) - - if output_attentions: - all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) - - hidden_states = self.ln_f(hidden_states) - - hidden_states = hidden_states.view(output_shape) - # Add last hidden state - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) - - return BaseModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=presents, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - -@add_start_docstrings( - """ - The Moss Model transformer with a language modeling head on top. - """, - MOSS_START_DOCSTRING, -) -class MossForCausalLM(MossPreTrainedModel): - _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.causal_mask"] - - def __init__(self, config): - super().__init__(config) - self.transformer = MossModel(config) - self.lm_head = nn.Linear(config.n_embd, config.vocab_size) - - # Initialize weights and apply final processing - self.post_init() - - def get_output_embeddings(self): - return self.lm_head - - def set_output_embeddings(self, new_embeddings): - self.lm_head = new_embeddings - - def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): - token_type_ids = kwargs.get("token_type_ids", None) - # only last token for inputs_ids if past is defined in kwargs - if past_key_values: - input_ids = input_ids[:, -1].unsqueeze(-1) - if token_type_ids is not None: - token_type_ids = token_type_ids[:, -1].unsqueeze(-1) - - attention_mask = kwargs.get("attention_mask", None) - position_ids = kwargs.get("position_ids", None) - - if attention_mask is not None and position_ids is None: - # create position_ids on the fly for batch generation - position_ids = attention_mask.long().cumsum(-1) - 1 - position_ids.masked_fill_(attention_mask == 0, 1) - if past_key_values: - position_ids = position_ids[:, -1].unsqueeze(-1) - - return { - "input_ids": input_ids, - "past_key_values": past_key_values, - "use_cache": kwargs.get("use_cache"), - "position_ids": position_ids, - "attention_mask": attention_mask, - "token_type_ids": token_type_ids, - } - - @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=CausalLMOutputWithPast, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, - attention_mask: Optional[torch.FloatTensor] = None, - token_type_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - labels: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, CausalLMOutputWithPast]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set - `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` - are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - transformer_outputs = self.transformer( - input_ids, - past_key_values=past_key_values, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - hidden_states = transformer_outputs[0] - - # make sure sampling in fp16 works correctly and - # compute loss in fp32 to match with mesh-tf version - # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179 - lm_logits = self.lm_head(hidden_states).to(torch.float32) - - loss = None - if labels is not None: - # Shift so that tokens < n predict n - shift_logits = lm_logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - loss_fct = CrossEntropyLoss() - loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) - - loss = loss.to(hidden_states.dtype) - - if not return_dict: - output = (lm_logits,) + transformer_outputs[1:] - return ((loss,) + output) if loss is not None else output - - return CausalLMOutputWithPast( - loss=loss, - logits=lm_logits, - past_key_values=transformer_outputs.past_key_values, - hidden_states=transformer_outputs.hidden_states, - attentions=transformer_outputs.attentions, - ) - - @staticmethod - def _reorder_cache( - past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor - ) -> Tuple[Tuple[torch.Tensor]]: - """ - This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or - [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct - beam_idx at every generation step. - """ - return tuple( - tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) - for layer_past in past_key_values - ) diff --git a/spaces/ismot/1702t1/utils/__init__.py b/spaces/ismot/1702t1/utils/__init__.py deleted file mode 100644 index 02f1ee32e3c69bcf40722de4d5fb831ede759aae..0000000000000000000000000000000000000000 --- a/spaces/ismot/1702t1/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -@date: 2021/06/19 -@description: -""" \ No newline at end of file diff --git a/spaces/jackcao2023/THUDM-WebGLM/README.md b/spaces/jackcao2023/THUDM-WebGLM/README.md deleted file mode 100644 index 6422e528add6c9302fa0f9e96e7aa9492f822af4..0000000000000000000000000000000000000000 --- a/spaces/jackcao2023/THUDM-WebGLM/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: THUDM WebGLM -emoji: 🚀 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jbilcke-hf/VideoChain-UI/next.config.js b/spaces/jbilcke-hf/VideoChain-UI/next.config.js deleted file mode 100644 index b699464f86c30db1e6786ce8f42e54a208ebad5a..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/VideoChain-UI/next.config.js +++ /dev/null @@ -1,10 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - output: 'standalone', - - experimental: { - serverActions: true, - }, -} - -module.exports = nextConfig diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/app/server/utils/isRateLimitError.ts b/spaces/jbilcke-hf/ai-clip-factory/src/app/server/utils/isRateLimitError.ts deleted file mode 100644 index 48e1e42e059260ad5b99d761700055048358af9e..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/app/server/utils/isRateLimitError.ts +++ /dev/null @@ -1,4 +0,0 @@ -export function isRateLimitError(something: unknown) { - // yeah this is a very crude implementation - return `${something || ""}`.includes("Rate Limit Reached") -} \ No newline at end of file diff --git a/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/batchnorm.py b/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/batchnorm.py deleted file mode 100644 index 18318965335b37cc671004a6aceda3229dc7b477..0000000000000000000000000000000000000000 --- a/spaces/jgurzoni/image_background_swapper/models/ade20k/segm_lib/nn/modules/batchnorm.py +++ /dev/null @@ -1,329 +0,0 @@ -# -*- coding: utf-8 -*- -# File : batchnorm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import collections - -import torch -import torch.nn.functional as F - -from torch.nn.modules.batchnorm import _BatchNorm -from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast - -from .comm import SyncMaster - -__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d'] - - -def _sum_ft(tensor): - """sum over the first and last dimention""" - return tensor.sum(dim=0).sum(dim=-1) - - -def _unsqueeze_ft(tensor): - """add new dementions at the front and the tail""" - return tensor.unsqueeze(0).unsqueeze(-1) - - -_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size']) -_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std']) - - -class _SynchronizedBatchNorm(_BatchNorm): - def __init__(self, num_features, eps=1e-5, momentum=0.001, affine=True): - super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) - - self._sync_master = SyncMaster(self._data_parallel_master) - - self._is_parallel = False - self._parallel_id = None - self._slave_pipe = None - - # customed batch norm statistics - self._moving_average_fraction = 1. - momentum - self.register_buffer('_tmp_running_mean', torch.zeros(self.num_features)) - self.register_buffer('_tmp_running_var', torch.ones(self.num_features)) - self.register_buffer('_running_iter', torch.ones(1)) - self._tmp_running_mean = self.running_mean.clone() * self._running_iter - self._tmp_running_var = self.running_var.clone() * self._running_iter - - def forward(self, input): - # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation. - if not (self._is_parallel and self.training): - return F.batch_norm( - input, self.running_mean, self.running_var, self.weight, self.bias, - self.training, self.momentum, self.eps) - - # Resize the input to (B, C, -1). - input_shape = input.size() - input = input.view(input.size(0), self.num_features, -1) - - # Compute the sum and square-sum. - sum_size = input.size(0) * input.size(2) - input_sum = _sum_ft(input) - input_ssum = _sum_ft(input ** 2) - - # Reduce-and-broadcast the statistics. - if self._parallel_id == 0: - mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size)) - else: - mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size)) - - # Compute the output. - if self.affine: - # MJY:: Fuse the multiplication for speed. - output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias) - else: - output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std) - - # Reshape it. - return output.view(input_shape) - - def __data_parallel_replicate__(self, ctx, copy_id): - self._is_parallel = True - self._parallel_id = copy_id - - # parallel_id == 0 means master device. - if self._parallel_id == 0: - ctx.sync_master = self._sync_master - else: - self._slave_pipe = ctx.sync_master.register_slave(copy_id) - - def _data_parallel_master(self, intermediates): - """Reduce the sum and square-sum, compute the statistics, and broadcast it.""" - intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device()) - - to_reduce = [i[1][:2] for i in intermediates] - to_reduce = [j for i in to_reduce for j in i] # flatten - target_gpus = [i[1].sum.get_device() for i in intermediates] - - sum_size = sum([i[1].sum_size for i in intermediates]) - sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) - - mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size) - - broadcasted = Broadcast.apply(target_gpus, mean, inv_std) - - outputs = [] - for i, rec in enumerate(intermediates): - outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2]))) - - return outputs - - def _add_weighted(self, dest, delta, alpha=1, beta=1, bias=0): - """return *dest* by `dest := dest*alpha + delta*beta + bias`""" - return dest * alpha + delta * beta + bias - - def _compute_mean_std(self, sum_, ssum, size): - """Compute the mean and standard-deviation with sum and square-sum. This method - also maintains the moving average on the master device.""" - assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.' - mean = sum_ / size - sumvar = ssum - sum_ * mean - unbias_var = sumvar / (size - 1) - bias_var = sumvar / size - - self._tmp_running_mean = self._add_weighted(self._tmp_running_mean, mean.data, alpha=self._moving_average_fraction) - self._tmp_running_var = self._add_weighted(self._tmp_running_var, unbias_var.data, alpha=self._moving_average_fraction) - self._running_iter = self._add_weighted(self._running_iter, 1, alpha=self._moving_average_fraction) - - self.running_mean = self._tmp_running_mean / self._running_iter - self.running_var = self._tmp_running_var / self._running_iter - - return mean, bias_var.clamp(self.eps) ** -0.5 - - -class SynchronizedBatchNorm1d(_SynchronizedBatchNorm): - r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a - mini-batch. - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta - - This module differs from the built-in PyTorch BatchNorm1d as the mean and - standard-deviation are reduced across all devices during training. - - For example, when one uses `nn.DataParallel` to wrap the network during - training, PyTorch's implementation normalize the tensor on each device using - the statistics only on that device, which accelerated the computation and - is also easy to implement, but the statistics might be inaccurate. - Instead, in this synchronized version, the statistics will be computed - over all training samples distributed on multiple devices. - - Note that, for one-GPU or CPU-only case, this module behaves exactly same - as the built-in PyTorch implementation. - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size C (where C is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Because the BatchNorm is done over the `C` dimension, computing statistics - on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm - - Args: - num_features: num_features from an expected input of size - `batch_size x num_features [x width]` - eps: a value added to the denominator for numerical stability. - Default: 1e-5 - momentum: the value used for the running_mean and running_var - computation. Default: 0.1 - affine: a boolean value that when set to ``True``, gives the layer learnable - affine parameters. Default: ``True`` - - Shape: - - Input: :math:`(N, C)` or :math:`(N, C, L)` - - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = SynchronizedBatchNorm1d(100) - >>> # Without Learnable Parameters - >>> m = SynchronizedBatchNorm1d(100, affine=False) - >>> input = torch.autograd.Variable(torch.randn(20, 100)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 2 and input.dim() != 3: - raise ValueError('expected 2D or 3D input (got {}D input)' - .format(input.dim())) - super(SynchronizedBatchNorm1d, self)._check_input_dim(input) - - -class SynchronizedBatchNorm2d(_SynchronizedBatchNorm): - r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch - of 3d inputs - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta - - This module differs from the built-in PyTorch BatchNorm2d as the mean and - standard-deviation are reduced across all devices during training. - - For example, when one uses `nn.DataParallel` to wrap the network during - training, PyTorch's implementation normalize the tensor on each device using - the statistics only on that device, which accelerated the computation and - is also easy to implement, but the statistics might be inaccurate. - Instead, in this synchronized version, the statistics will be computed - over all training samples distributed on multiple devices. - - Note that, for one-GPU or CPU-only case, this module behaves exactly same - as the built-in PyTorch implementation. - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size C (where C is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Because the BatchNorm is done over the `C` dimension, computing statistics - on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm - - Args: - num_features: num_features from an expected input of - size batch_size x num_features x height x width - eps: a value added to the denominator for numerical stability. - Default: 1e-5 - momentum: the value used for the running_mean and running_var - computation. Default: 0.1 - affine: a boolean value that when set to ``True``, gives the layer learnable - affine parameters. Default: ``True`` - - Shape: - - Input: :math:`(N, C, H, W)` - - Output: :math:`(N, C, H, W)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = SynchronizedBatchNorm2d(100) - >>> # Without Learnable Parameters - >>> m = SynchronizedBatchNorm2d(100, affine=False) - >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 4: - raise ValueError('expected 4D input (got {}D input)' - .format(input.dim())) - super(SynchronizedBatchNorm2d, self)._check_input_dim(input) - - -class SynchronizedBatchNorm3d(_SynchronizedBatchNorm): - r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch - of 4d inputs - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta - - This module differs from the built-in PyTorch BatchNorm3d as the mean and - standard-deviation are reduced across all devices during training. - - For example, when one uses `nn.DataParallel` to wrap the network during - training, PyTorch's implementation normalize the tensor on each device using - the statistics only on that device, which accelerated the computation and - is also easy to implement, but the statistics might be inaccurate. - Instead, in this synchronized version, the statistics will be computed - over all training samples distributed on multiple devices. - - Note that, for one-GPU or CPU-only case, this module behaves exactly same - as the built-in PyTorch implementation. - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size C (where C is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Because the BatchNorm is done over the `C` dimension, computing statistics - on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm - or Spatio-temporal BatchNorm - - Args: - num_features: num_features from an expected input of - size batch_size x num_features x depth x height x width - eps: a value added to the denominator for numerical stability. - Default: 1e-5 - momentum: the value used for the running_mean and running_var - computation. Default: 0.1 - affine: a boolean value that when set to ``True``, gives the layer learnable - affine parameters. Default: ``True`` - - Shape: - - Input: :math:`(N, C, D, H, W)` - - Output: :math:`(N, C, D, H, W)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = SynchronizedBatchNorm3d(100) - >>> # Without Learnable Parameters - >>> m = SynchronizedBatchNorm3d(100, affine=False) - >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 5: - raise ValueError('expected 5D input (got {}D input)' - .format(input.dim())) - super(SynchronizedBatchNorm3d, self)._check_input_dim(input) diff --git a/spaces/jkang/demo-painttransformer/render_serial.py b/spaces/jkang/demo-painttransformer/render_serial.py deleted file mode 100644 index 49accd0e558b40df0752aba45907a24dc6ffbb01..0000000000000000000000000000000000000000 --- a/spaces/jkang/demo-painttransformer/render_serial.py +++ /dev/null @@ -1,283 +0,0 @@ -# !/usr/bin/env python3 -""" -codes for oilpainting style transfer. -""" -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import numpy as np -from PIL import Image -import math -# import cv2 -import render_utils -import time - - -def get_single_layer_lists(param, decision, ori_img, render_size_x, render_size_y, h, w, meta_brushes, dilation, erosion, stroke_num): - """ - get_single_layer_lists - """ - valid_foregrounds = render_utils.param2stroke(param[:, :], render_size_y, render_size_x, meta_brushes) - - valid_alphas = (valid_foregrounds > 0).astype('float32') - valid_foregrounds = valid_foregrounds.reshape([-1, stroke_num, 1, render_size_y, render_size_x]) - valid_alphas = valid_alphas.reshape([-1, stroke_num, 1, render_size_y, render_size_x]) - - temp = [dilation(valid_foregrounds[:, i, :, :, :]) for i in range(stroke_num)] - valid_foregrounds = paddle.stack(temp, axis=1) - valid_foregrounds = valid_foregrounds.reshape([-1, 1, render_size_y, render_size_x]) - - temp = [erosion(valid_alphas[:, i, :, :, :]) for i in range(stroke_num)] - valid_alphas = paddle.stack(temp, axis=1) - valid_alphas = valid_alphas.reshape([-1, 1, render_size_y, render_size_x]) - - patch_y = 4 * render_size_y // 5 - patch_x = 4 * render_size_x // 5 - - img_patch = ori_img.reshape([1, 3, h, ori_img.shape[2]//h, w, ori_img.shape[3]//w]) - img_patch = img_patch.transpose([0, 2, 4, 1, 3, 5])[0] - - xid_list = [] - yid_list = [] - error_list = [] - - for flag_idx, flag in enumerate(decision.cpu().numpy()): - if flag: - flag_idx = flag_idx // stroke_num - x_id = flag_idx % w - flag_idx = flag_idx // w - y_id = flag_idx % h - xid_list.append(x_id) - yid_list.append(y_id) - - inner_fores = valid_foregrounds[:, :, render_size_y // 10:9 * render_size_y // 10, - render_size_x // 10:9 * render_size_x // 10] - inner_alpha = valid_alphas[:, :, render_size_y // 10:9 * render_size_y // 10, - render_size_x // 10:9 * render_size_x // 10] - inner_fores = inner_fores.reshape([h * w, stroke_num, 1, patch_y, patch_x]) - inner_alpha = inner_alpha.reshape([h * w, stroke_num, 1, patch_y, patch_x]) - inner_real = img_patch.reshape([h * w, 3, patch_y, patch_x]).unsqueeze(1) - - R = param[:, 5] - G = param[:, 6] - B = param[:, 7]#, G, B = param[5:] - R = R.reshape([-1, stroke_num]).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) - G = G.reshape([-1, stroke_num]).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) - B = B.reshape([-1, stroke_num]).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) - error_R = R * inner_fores - inner_real[:, :, 0:1, :, :] - error_G = G * inner_fores - inner_real[:, :, 1:2, :, :] - error_B = B * inner_fores - inner_real[:, :, 2:3, :, :] - error = paddle.abs(error_R) + paddle.abs(error_G)+ paddle.abs(error_B) - - error = error * inner_alpha - error = paddle.sum(error, axis=(2, 3, 4)) / paddle.sum(inner_alpha, axis=(2, 3, 4)) - error_list = error.reshape([-1]).numpy()[decision.numpy()] - error_list = list(error_list) - - valid_foregrounds = paddle.to_tensor(valid_foregrounds.numpy()[decision.numpy()]) - valid_alphas = paddle.to_tensor(valid_alphas.numpy()[decision.numpy()]) - - selected_param = paddle.to_tensor(param.numpy()[decision.numpy()]) - return xid_list, yid_list, valid_foregrounds, valid_alphas, error_list, selected_param - - -def get_single_stroke_on_full_image_A(x_id, y_id, valid_foregrounds, valid_alphas, param, original_img, - render_size_x, render_size_y, patch_x, patch_y): - """ - get_single_stroke_on_full_image_A - """ - tmp_foreground = paddle.zeros_like(original_img) - - patch_y_num = original_img.shape[2] // patch_y - patch_x_num = original_img.shape[3] // patch_x - - brush = valid_foregrounds.unsqueeze(0) - color_map = param[5:] - brush = brush.tile([1, 3, 1, 1]) - color_map = color_map.unsqueeze(-1).unsqueeze(-1).unsqueeze(0)#.repeat(1, 1, H, W) - brush = brush * color_map - - pad_l = x_id * patch_x - pad_r = (patch_x_num - x_id - 1) * patch_x - pad_t = y_id * patch_y - pad_b = (patch_y_num - y_id - 1) * patch_y - tmp_foreground = nn.functional.pad(brush, [pad_l, pad_r, pad_t, pad_b]) - tmp_foreground = tmp_foreground[:, :, render_size_y // 10:-render_size_y // 10, - render_size_x // 10:-render_size_x // 10] - - tmp_alpha = nn.functional.pad(valid_alphas.unsqueeze(0), [pad_l, pad_r, pad_t, pad_b]) - tmp_alpha = tmp_alpha[:, :, render_size_y // 10:-render_size_y // 10, render_size_x // 10:-render_size_x // 10] - return tmp_foreground, tmp_alpha - -def get_single_stroke_on_full_image_B(x_id, y_id, valid_foregrounds, valid_alphas, param, - original_img, render_size_x, render_size_y, patch_x, patch_y): - """ - get_single_stroke_on_full_image_B - """ - x_expand = patch_x // 2 + render_size_x // 10 - y_expand = patch_y // 2 + render_size_y // 10 - - pad_l = x_id * patch_x - pad_r = original_img.shape[3] + 2 * x_expand - (x_id * patch_x + render_size_x) - pad_t = y_id * patch_y - pad_b = original_img.shape[2] + 2 * y_expand - (y_id * patch_y + render_size_y) - - brush = valid_foregrounds.unsqueeze(0) - color_map = param[5:] - brush = brush.tile([1, 3, 1, 1]) - color_map = color_map.unsqueeze(-1).unsqueeze(-1).unsqueeze(0)#.repeat(1, 1, H, W) - brush = brush * color_map - - tmp_foreground = nn.functional.pad(brush, [pad_l, pad_r, pad_t, pad_b]) - - tmp_foreground = tmp_foreground[:, :, y_expand:- y_expand, x_expand:-x_expand] - tmp_alpha = nn.functional.pad(valid_alphas.unsqueeze(0), [pad_l, pad_r, pad_t, pad_b]) - tmp_alpha = tmp_alpha[:, :, y_expand:- y_expand, x_expand:-x_expand] - return tmp_foreground, tmp_alpha - -def stroke_net_predict(img_patch, result_patch, patch_size, net_g, stroke_num): - """ - stroke_net_predict - """ - img_patch = img_patch.transpose([0, 2, 1]).reshape([-1, 3, patch_size, patch_size]) - result_patch = result_patch.transpose([0, 2, 1]).reshape([-1, 3, patch_size, patch_size]) - #*----- Stroke Predictor -----*# - shape_param, stroke_decision = net_g(img_patch, result_patch) - stroke_decision = (stroke_decision > 0).astype('float32') - #*----- sampling color -----*# - grid = shape_param[:, :, :2].reshape([img_patch.shape[0] * stroke_num, 1, 1, 2]) - img_temp = img_patch.unsqueeze(1).tile([1, stroke_num, 1, 1, 1]).reshape([ - img_patch.shape[0] * stroke_num, 3, patch_size, patch_size]) - color = nn.functional.grid_sample(img_temp, 2 * grid - 1, align_corners=False).reshape([ - img_patch.shape[0], stroke_num, 3]) - stroke_param = paddle.concat([shape_param, color], axis=-1) - - param = stroke_param.reshape([-1, 8]) - decision = stroke_decision.reshape([-1]).astype('bool') - param[:, :2] = param[:, :2] / 1.25 + 0.1 - param[:, 2:4] = param[:, 2:4] / 1.25 - return param, decision - - -def sort_strokes(params, decision, scores): - """ - sort_strokes - """ - sorted_scores, sorted_index = paddle.sort(scores, axis=1, descending=False) - sorted_params = [] - for idx in range(8): - tmp_pick_params = paddle.gather(params[:, :, idx], axis=1, index=sorted_index) - sorted_params.append(tmp_pick_params) - sorted_params = paddle.stack(sorted_params, axis=2) - sorted_decison = paddle.gather(decision.squeeze(2), axis=1, index=sorted_index) - return sorted_params, sorted_decison - - -def render_serial(original_img, net_g, meta_brushes): - - patch_size = 32 - stroke_num = 8 - H, W = original_img.shape[-2:] - K = max(math.ceil(math.log2(max(H, W) / patch_size)), 0) - - dilation = render_utils.Dilation2d(m=1) - erosion = render_utils.Erosion2d(m=1) - frames_per_layer = [20, 20, 30, 40, 60] - final_frame_list = [] - - with paddle.no_grad(): - #* ----- read in image and init canvas ----- *# - final_result = paddle.zeros_like(original_img) - - for layer in range(0, K + 1): - t0 = time.time() - layer_size = patch_size * (2 ** layer) - - img = nn.functional.interpolate(original_img, (layer_size, layer_size)) - result = nn.functional.interpolate(final_result, (layer_size, layer_size)) - img_patch = nn.functional.unfold(img, [patch_size, patch_size], - strides=[patch_size, patch_size]) - result_patch = nn.functional.unfold(result, [patch_size, patch_size], - strides=[patch_size, patch_size]) - h = (img.shape[2] - patch_size) // patch_size + 1 - w = (img.shape[3] - patch_size) // patch_size + 1 - render_size_y = int(1.25 * H // h) - render_size_x = int(1.25 * W // w) - - #* -------------------------------------------------------------*# - #* -------------generate strokes on window type A---------------*# - #* -------------------------------------------------------------*# - param, decision = stroke_net_predict(img_patch, result_patch, patch_size, net_g, stroke_num) - expand_img = original_img - wA_xid_list, wA_yid_list, wA_fore_list, wA_alpha_list, wA_error_list, wA_params = \ - get_single_layer_lists(param, decision, original_img, render_size_x, render_size_y, h, w, - meta_brushes, dilation, erosion, stroke_num) - - #* -------------------------------------------------------------*# - #* -------------generate strokes on window type B---------------*# - #* -------------------------------------------------------------*# - #*----- generate input canvas and target patches -----*# - wB_error_list = [] - - img = nn.functional.pad(img, [patch_size // 2, patch_size // 2, - patch_size // 2, patch_size // 2]) - result = nn.functional.pad(result, [patch_size // 2, patch_size // 2, - patch_size // 2, patch_size // 2]) - img_patch = nn.functional.unfold(img, [patch_size, patch_size], - strides=[patch_size, patch_size]) - result_patch = nn.functional.unfold(result, [patch_size, patch_size], - strides=[patch_size, patch_size]) - h += 1 - w += 1 - - param, decision = stroke_net_predict(img_patch, result_patch, patch_size, net_g, stroke_num) - - patch_y = 4 * render_size_y // 5 - patch_x = 4 * render_size_x // 5 - expand_img = nn.functional.pad(original_img, [patch_x // 2, patch_x // 2, - patch_y // 2, patch_y // 2]) - wB_xid_list, wB_yid_list, wB_fore_list, wB_alpha_list, wB_error_list, wB_params = \ - get_single_layer_lists(param, decision, expand_img, render_size_x, render_size_y, h, w, - meta_brushes, dilation, erosion, stroke_num) - #* -------------------------------------------------------------*# - #* -------------rank strokes and plot stroke one by one---------*# - #* -------------------------------------------------------------*# - numA = len(wA_error_list) - numB = len(wB_error_list) - total_error_list = wA_error_list + wB_error_list - sort_list = list(np.argsort(total_error_list)) - - sample = 0 - samples = np.linspace(0, len(sort_list) - 2, frames_per_layer[layer]).astype(int) - for ii in sort_list: - ii = int(ii) - if ii < numA: - x_id = wA_xid_list[ii] - y_id = wA_yid_list[ii] - valid_foregrounds = wA_fore_list[ii] - valid_alphas = wA_alpha_list[ii] - sparam = wA_params[ii] - tmp_foreground, tmp_alpha = get_single_stroke_on_full_image_A(x_id, y_id, - valid_foregrounds, valid_alphas, sparam, original_img, render_size_x, render_size_y, patch_x, patch_y) - else: - x_id = wB_xid_list[ii - numA] - y_id = wB_yid_list[ii - numA] - valid_foregrounds = wB_fore_list[ii - numA] - valid_alphas = wB_alpha_list[ii - numA] - sparam = wB_params[ii - numA] - tmp_foreground, tmp_alpha = get_single_stroke_on_full_image_B(x_id, y_id, - valid_foregrounds, valid_alphas, sparam, original_img, render_size_x, render_size_y, patch_x, patch_y) - - final_result = tmp_foreground * tmp_alpha + (1 - tmp_alpha) * final_result - if sample in samples: - saveframe = (final_result.numpy().squeeze().transpose([1,2,0])[:,:,::-1] * 255).astype(np.uint8) - final_frame_list.append(saveframe) - #saveframe = cv2.resize(saveframe, (ow, oh)) - - sample += 1 - print("layer %d cost: %.02f" %(layer, time.time() - t0)) - - - saveframe = (final_result.numpy().squeeze().transpose([1,2,0])[:,:,::-1] * 255).astype(np.uint8) - final_frame_list.append(saveframe) - return final_frame_list \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/IO/test_PBES.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/IO/test_PBES.py deleted file mode 100644 index b2a4f94acb7ec467334936b6621d3abe39c0f4cb..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/IO/test_PBES.py +++ /dev/null @@ -1,93 +0,0 @@ -# -# SelfTest/IO/test_PBES.py: Self-test for the _PBES module -# -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -"""Self-tests for Crypto.IO._PBES module""" - -import unittest -from Crypto.Util.py3compat import * - -from Crypto.IO._PBES import PBES2 - - -class TestPBES2(unittest.TestCase): - - def setUp(self): - self.ref = b("Test data") - self.passphrase = b("Passphrase") - - def test1(self): - ct = PBES2.encrypt(self.ref, self.passphrase, - 'PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC') - pt = PBES2.decrypt(ct, self.passphrase) - self.assertEqual(self.ref, pt) - - def test2(self): - ct = PBES2.encrypt(self.ref, self.passphrase, - 'PBKDF2WithHMAC-SHA1AndAES128-CBC') - pt = PBES2.decrypt(ct, self.passphrase) - self.assertEqual(self.ref, pt) - - def test3(self): - ct = PBES2.encrypt(self.ref, self.passphrase, - 'PBKDF2WithHMAC-SHA1AndAES192-CBC') - pt = PBES2.decrypt(ct, self.passphrase) - self.assertEqual(self.ref, pt) - - def test4(self): - ct = PBES2.encrypt(self.ref, self.passphrase, - 'scryptAndAES128-CBC') - pt = PBES2.decrypt(ct, self.passphrase) - self.assertEqual(self.ref, pt) - - def test5(self): - ct = PBES2.encrypt(self.ref, self.passphrase, - 'scryptAndAES192-CBC') - pt = PBES2.decrypt(ct, self.passphrase) - self.assertEqual(self.ref, pt) - - def test6(self): - ct = PBES2.encrypt(self.ref, self.passphrase, - 'scryptAndAES256-CBC') - pt = PBES2.decrypt(ct, self.passphrase) - self.assertEqual(self.ref, pt) - - -def get_tests(config={}): - from Crypto.SelfTest.st_common import list_test_cases - listTests = [] - listTests += list_test_cases(TestPBES2) - return listTests - -if __name__ == '__main__': - suite = lambda: unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/time64_config.h b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/time64_config.h deleted file mode 100644 index 9d4c111c95cd378f3314d722d4b3efb216eeb7b4..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/time64_config.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Configuration - ------------- - Define as appropriate for your system. - Sensible defaults provided. -*/ - - -#ifndef TIME64_CONFIG_H -# define TIME64_CONFIG_H - -/* Debugging - TIME_64_DEBUG - Define if you want debugging messages -*/ -/* #define TIME_64_DEBUG */ - - -/* INT_64_T - A 64 bit integer type to use to store time and others. - Must be defined. -*/ -#define INT_64_T long long - - -/* USE_TM64 - Should we use a 64 bit safe replacement for tm? This will - let you go past year 2 billion but the struct will be incompatible - with tm. Conversion functions will be provided. -*/ -/* #define USE_TM64 */ - - -/* Availability of system functions. - - HAS_GMTIME_R - Define if your system has gmtime_r() - - HAS_LOCALTIME_R - Define if your system has localtime_r() - - HAS_TIMEGM - Define if your system has timegm(), a GNU extension. -*/ -#if !defined(WIN32) && !defined(_MSC_VER) -#define HAS_GMTIME_R -#define HAS_LOCALTIME_R -#endif -/* #define HAS_TIMEGM */ - - -/* Details of non-standard tm struct elements. - - HAS_TM_TM_GMTOFF - True if your tm struct has a "tm_gmtoff" element. - A BSD extension. - - HAS_TM_TM_ZONE - True if your tm struct has a "tm_zone" element. - A BSD extension. -*/ -/* #define HAS_TM_TM_GMTOFF */ -/* #define HAS_TM_TM_ZONE */ - - -/* USE_SYSTEM_LOCALTIME - USE_SYSTEM_GMTIME - USE_SYSTEM_MKTIME - USE_SYSTEM_TIMEGM - Should we use the system functions if the time is inside their range? - Your system localtime() is probably more accurate, but our gmtime() is - fast and safe. -*/ -#define USE_SYSTEM_LOCALTIME -/* #define USE_SYSTEM_GMTIME */ -#define USE_SYSTEM_MKTIME -/* #define USE_SYSTEM_TIMEGM */ - -#endif /* TIME64_CONFIG_H */ diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/DS.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/DS.py deleted file mode 100644 index 097ecfa0e1a3a375765aba427fd288448b692c44..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/rdtypes/ANY/DS.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -import dns.immutable -import dns.rdtypes.dsbase - - -@dns.immutable.immutable -class DS(dns.rdtypes.dsbase.DSBase): - - """DS record""" diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/version.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/version.py deleted file mode 100644 index 1f1fbf2d5789ae0126dabc430e1a6dcbee2300f8..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/version.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""dnspython release version information.""" - -#: MAJOR -MAJOR = 2 -#: MINOR -MINOR = 4 -#: MICRO -MICRO = 2 -#: RELEASELEVEL -RELEASELEVEL = 0x0F -#: SERIAL -SERIAL = 0 - -if RELEASELEVEL == 0x0F: # pragma: no cover lgtm[py/unreachable-statement] - #: version - version = "%d.%d.%d" % (MAJOR, MINOR, MICRO) # lgtm[py/unreachable-statement] -elif RELEASELEVEL == 0x00: # pragma: no cover lgtm[py/unreachable-statement] - version = "%d.%d.%ddev%d" % ( - MAJOR, - MINOR, - MICRO, - SERIAL, - ) # lgtm[py/unreachable-statement] -elif RELEASELEVEL == 0x0C: # pragma: no cover lgtm[py/unreachable-statement] - version = "%d.%d.%drc%d" % ( - MAJOR, - MINOR, - MICRO, - SERIAL, - ) # lgtm[py/unreachable-statement] -else: # pragma: no cover lgtm[py/unreachable-statement] - version = "%d.%d.%d%x%d" % ( - MAJOR, - MINOR, - MICRO, - RELEASELEVEL, - SERIAL, - ) # lgtm[py/unreachable-statement] - -#: hexversion -hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | SERIAL diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/O_S_2f_2.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/O_S_2f_2.py deleted file mode 100644 index 7b403026aa4eabe03c7484f51f14db63ed2ebc5c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/O_S_2f_2.py +++ /dev/null @@ -1,617 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.roundTools import otRound -from fontTools.misc.textTools import safeEval, num2binary, binary2num -from fontTools.ttLib.tables import DefaultTable -import bisect -import logging - - -log = logging.getLogger(__name__) - -# panose classification - -panoseFormat = """ - bFamilyType: B - bSerifStyle: B - bWeight: B - bProportion: B - bContrast: B - bStrokeVariation: B - bArmStyle: B - bLetterForm: B - bMidline: B - bXHeight: B -""" - - -class Panose(object): - def __init__(self, **kwargs): - _, names, _ = sstruct.getformat(panoseFormat) - for name in names: - setattr(self, name, kwargs.pop(name, 0)) - for k in kwargs: - raise TypeError(f"Panose() got an unexpected keyword argument {k!r}") - - def toXML(self, writer, ttFont): - formatstring, names, fixes = sstruct.getformat(panoseFormat) - for name in names: - writer.simpletag(name, value=getattr(self, name)) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - setattr(self, name, safeEval(attrs["value"])) - - -# 'sfnt' OS/2 and Windows Metrics table - 'OS/2' - -OS2_format_0 = """ - > # big endian - version: H # version - xAvgCharWidth: h # average character width - usWeightClass: H # degree of thickness of strokes - usWidthClass: H # aspect ratio - fsType: H # type flags - ySubscriptXSize: h # subscript horizontal font size - ySubscriptYSize: h # subscript vertical font size - ySubscriptXOffset: h # subscript x offset - ySubscriptYOffset: h # subscript y offset - ySuperscriptXSize: h # superscript horizontal font size - ySuperscriptYSize: h # superscript vertical font size - ySuperscriptXOffset: h # superscript x offset - ySuperscriptYOffset: h # superscript y offset - yStrikeoutSize: h # strikeout size - yStrikeoutPosition: h # strikeout position - sFamilyClass: h # font family class and subclass - panose: 10s # panose classification number - ulUnicodeRange1: L # character range - ulUnicodeRange2: L # character range - ulUnicodeRange3: L # character range - ulUnicodeRange4: L # character range - achVendID: 4s # font vendor identification - fsSelection: H # font selection flags - usFirstCharIndex: H # first unicode character index - usLastCharIndex: H # last unicode character index - sTypoAscender: h # typographic ascender - sTypoDescender: h # typographic descender - sTypoLineGap: h # typographic line gap - usWinAscent: H # Windows ascender - usWinDescent: H # Windows descender -""" - -OS2_format_1_addition = """ - ulCodePageRange1: L - ulCodePageRange2: L -""" - -OS2_format_2_addition = ( - OS2_format_1_addition - + """ - sxHeight: h - sCapHeight: h - usDefaultChar: H - usBreakChar: H - usMaxContext: H -""" -) - -OS2_format_5_addition = ( - OS2_format_2_addition - + """ - usLowerOpticalPointSize: H - usUpperOpticalPointSize: H -""" -) - -bigendian = " > # big endian\n" - -OS2_format_1 = OS2_format_0 + OS2_format_1_addition -OS2_format_2 = OS2_format_0 + OS2_format_2_addition -OS2_format_5 = OS2_format_0 + OS2_format_5_addition -OS2_format_1_addition = bigendian + OS2_format_1_addition -OS2_format_2_addition = bigendian + OS2_format_2_addition -OS2_format_5_addition = bigendian + OS2_format_5_addition - - -class table_O_S_2f_2(DefaultTable.DefaultTable): - - """the OS/2 table""" - - dependencies = ["head"] - - def decompile(self, data, ttFont): - dummy, data = sstruct.unpack2(OS2_format_0, data, self) - - if self.version == 1: - dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self) - elif self.version in (2, 3, 4): - dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self) - elif self.version == 5: - dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self) - self.usLowerOpticalPointSize /= 20 - self.usUpperOpticalPointSize /= 20 - elif self.version != 0: - from fontTools import ttLib - - raise ttLib.TTLibError( - "unknown format for OS/2 table: version %s" % self.version - ) - if len(data): - log.warning("too much 'OS/2' table data") - - self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) - - def compile(self, ttFont): - self.updateFirstAndLastCharIndex(ttFont) - panose = self.panose - head = ttFont["head"] - if (self.fsSelection & 1) and not (head.macStyle & 1 << 1): - log.warning( - "fsSelection bit 0 (italic) and " - "head table macStyle bit 1 (italic) should match" - ) - if (self.fsSelection & 1 << 5) and not (head.macStyle & 1): - log.warning( - "fsSelection bit 5 (bold) and " - "head table macStyle bit 0 (bold) should match" - ) - if (self.fsSelection & 1 << 6) and (self.fsSelection & 1 + (1 << 5)): - log.warning( - "fsSelection bit 6 (regular) is set, " - "bits 0 (italic) and 5 (bold) must be clear" - ) - if self.version < 4 and self.fsSelection & 0b1110000000: - log.warning( - "fsSelection bits 7, 8 and 9 are only defined in " - "OS/2 table version 4 and up: version %s", - self.version, - ) - self.panose = sstruct.pack(panoseFormat, self.panose) - if self.version == 0: - data = sstruct.pack(OS2_format_0, self) - elif self.version == 1: - data = sstruct.pack(OS2_format_1, self) - elif self.version in (2, 3, 4): - data = sstruct.pack(OS2_format_2, self) - elif self.version == 5: - d = self.__dict__.copy() - d["usLowerOpticalPointSize"] = round(self.usLowerOpticalPointSize * 20) - d["usUpperOpticalPointSize"] = round(self.usUpperOpticalPointSize * 20) - data = sstruct.pack(OS2_format_5, d) - else: - from fontTools import ttLib - - raise ttLib.TTLibError( - "unknown format for OS/2 table: version %s" % self.version - ) - self.panose = panose - return data - - def toXML(self, writer, ttFont): - writer.comment( - "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n" - "will be recalculated by the compiler" - ) - writer.newline() - if self.version == 1: - format = OS2_format_1 - elif self.version in (2, 3, 4): - format = OS2_format_2 - elif self.version == 5: - format = OS2_format_5 - else: - format = OS2_format_0 - formatstring, names, fixes = sstruct.getformat(format) - for name in names: - value = getattr(self, name) - if name == "panose": - writer.begintag("panose") - writer.newline() - value.toXML(writer, ttFont) - writer.endtag("panose") - elif name in ( - "ulUnicodeRange1", - "ulUnicodeRange2", - "ulUnicodeRange3", - "ulUnicodeRange4", - "ulCodePageRange1", - "ulCodePageRange2", - ): - writer.simpletag(name, value=num2binary(value)) - elif name in ("fsType", "fsSelection"): - writer.simpletag(name, value=num2binary(value, 16)) - elif name == "achVendID": - writer.simpletag(name, value=repr(value)[1:-1]) - else: - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "panose": - self.panose = panose = Panose() - for element in content: - if isinstance(element, tuple): - name, attrs, content = element - panose.fromXML(name, attrs, content, ttFont) - elif name in ( - "ulUnicodeRange1", - "ulUnicodeRange2", - "ulUnicodeRange3", - "ulUnicodeRange4", - "ulCodePageRange1", - "ulCodePageRange2", - "fsType", - "fsSelection", - ): - setattr(self, name, binary2num(attrs["value"])) - elif name == "achVendID": - setattr(self, name, safeEval("'''" + attrs["value"] + "'''")) - else: - setattr(self, name, safeEval(attrs["value"])) - - def updateFirstAndLastCharIndex(self, ttFont): - if "cmap" not in ttFont: - return - codes = set() - for table in getattr(ttFont["cmap"], "tables", []): - if table.isUnicode(): - codes.update(table.cmap.keys()) - if codes: - minCode = min(codes) - maxCode = max(codes) - # USHORT cannot hold codepoints greater than 0xFFFF - self.usFirstCharIndex = min(0xFFFF, minCode) - self.usLastCharIndex = min(0xFFFF, maxCode) - - # misspelled attributes kept for legacy reasons - - @property - def usMaxContex(self): - return self.usMaxContext - - @usMaxContex.setter - def usMaxContex(self, value): - self.usMaxContext = value - - @property - def fsFirstCharIndex(self): - return self.usFirstCharIndex - - @fsFirstCharIndex.setter - def fsFirstCharIndex(self, value): - self.usFirstCharIndex = value - - @property - def fsLastCharIndex(self): - return self.usLastCharIndex - - @fsLastCharIndex.setter - def fsLastCharIndex(self, value): - self.usLastCharIndex = value - - def getUnicodeRanges(self): - """Return the set of 'ulUnicodeRange*' bits currently enabled.""" - bits = set() - ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2 - ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4 - for i in range(32): - if ul1 & (1 << i): - bits.add(i) - if ul2 & (1 << i): - bits.add(i + 32) - if ul3 & (1 << i): - bits.add(i + 64) - if ul4 & (1 << i): - bits.add(i + 96) - return bits - - def setUnicodeRanges(self, bits): - """Set the 'ulUnicodeRange*' fields to the specified 'bits'.""" - ul1, ul2, ul3, ul4 = 0, 0, 0, 0 - for bit in bits: - if 0 <= bit < 32: - ul1 |= 1 << bit - elif 32 <= bit < 64: - ul2 |= 1 << (bit - 32) - elif 64 <= bit < 96: - ul3 |= 1 << (bit - 64) - elif 96 <= bit < 123: - ul4 |= 1 << (bit - 96) - else: - raise ValueError("expected 0 <= int <= 122, found: %r" % bit) - self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2 - self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4 - - def recalcUnicodeRanges(self, ttFont, pruneOnly=False): - """Intersect the codepoints in the font's Unicode cmap subtables with - the Unicode block ranges defined in the OpenType specification (v1.7), - and set the respective 'ulUnicodeRange*' bits if there is at least ONE - intersection. - If 'pruneOnly' is True, only clear unused bits with NO intersection. - """ - unicodes = set() - for table in ttFont["cmap"].tables: - if table.isUnicode(): - unicodes.update(table.cmap.keys()) - if pruneOnly: - empty = intersectUnicodeRanges(unicodes, inverse=True) - bits = self.getUnicodeRanges() - empty - else: - bits = intersectUnicodeRanges(unicodes) - self.setUnicodeRanges(bits) - return bits - - def recalcAvgCharWidth(self, ttFont): - """Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table. - - Set it to 0 if the unlikely event 'hmtx' table is not found. - """ - avg_width = 0 - hmtx = ttFont.get("hmtx") - if hmtx is not None: - widths = [width for width, _ in hmtx.metrics.values() if width > 0] - if widths: - avg_width = otRound(sum(widths) / len(widths)) - self.xAvgCharWidth = avg_width - return avg_width - - -# Unicode ranges data from the OpenType OS/2 table specification v1.7 - -OS2_UNICODE_RANGES = ( - (("Basic Latin", (0x0000, 0x007F)),), - (("Latin-1 Supplement", (0x0080, 0x00FF)),), - (("Latin Extended-A", (0x0100, 0x017F)),), - (("Latin Extended-B", (0x0180, 0x024F)),), - ( - ("IPA Extensions", (0x0250, 0x02AF)), - ("Phonetic Extensions", (0x1D00, 0x1D7F)), - ("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)), - ), - ( - ("Spacing Modifier Letters", (0x02B0, 0x02FF)), - ("Modifier Tone Letters", (0xA700, 0xA71F)), - ), - ( - ("Combining Diacritical Marks", (0x0300, 0x036F)), - ("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)), - ), - (("Greek and Coptic", (0x0370, 0x03FF)),), - (("Coptic", (0x2C80, 0x2CFF)),), - ( - ("Cyrillic", (0x0400, 0x04FF)), - ("Cyrillic Supplement", (0x0500, 0x052F)), - ("Cyrillic Extended-A", (0x2DE0, 0x2DFF)), - ("Cyrillic Extended-B", (0xA640, 0xA69F)), - ), - (("Armenian", (0x0530, 0x058F)),), - (("Hebrew", (0x0590, 0x05FF)),), - (("Vai", (0xA500, 0xA63F)),), - (("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))), - (("NKo", (0x07C0, 0x07FF)),), - (("Devanagari", (0x0900, 0x097F)),), - (("Bengali", (0x0980, 0x09FF)),), - (("Gurmukhi", (0x0A00, 0x0A7F)),), - (("Gujarati", (0x0A80, 0x0AFF)),), - (("Oriya", (0x0B00, 0x0B7F)),), - (("Tamil", (0x0B80, 0x0BFF)),), - (("Telugu", (0x0C00, 0x0C7F)),), - (("Kannada", (0x0C80, 0x0CFF)),), - (("Malayalam", (0x0D00, 0x0D7F)),), - (("Thai", (0x0E00, 0x0E7F)),), - (("Lao", (0x0E80, 0x0EFF)),), - (("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))), - (("Balinese", (0x1B00, 0x1B7F)),), - (("Hangul Jamo", (0x1100, 0x11FF)),), - ( - ("Latin Extended Additional", (0x1E00, 0x1EFF)), - ("Latin Extended-C", (0x2C60, 0x2C7F)), - ("Latin Extended-D", (0xA720, 0xA7FF)), - ), - (("Greek Extended", (0x1F00, 0x1FFF)),), - ( - ("General Punctuation", (0x2000, 0x206F)), - ("Supplemental Punctuation", (0x2E00, 0x2E7F)), - ), - (("Superscripts And Subscripts", (0x2070, 0x209F)),), - (("Currency Symbols", (0x20A0, 0x20CF)),), - (("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),), - (("Letterlike Symbols", (0x2100, 0x214F)),), - (("Number Forms", (0x2150, 0x218F)),), - ( - ("Arrows", (0x2190, 0x21FF)), - ("Supplemental Arrows-A", (0x27F0, 0x27FF)), - ("Supplemental Arrows-B", (0x2900, 0x297F)), - ("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)), - ), - ( - ("Mathematical Operators", (0x2200, 0x22FF)), - ("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)), - ("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)), - ("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)), - ), - (("Miscellaneous Technical", (0x2300, 0x23FF)),), - (("Control Pictures", (0x2400, 0x243F)),), - (("Optical Character Recognition", (0x2440, 0x245F)),), - (("Enclosed Alphanumerics", (0x2460, 0x24FF)),), - (("Box Drawing", (0x2500, 0x257F)),), - (("Block Elements", (0x2580, 0x259F)),), - (("Geometric Shapes", (0x25A0, 0x25FF)),), - (("Miscellaneous Symbols", (0x2600, 0x26FF)),), - (("Dingbats", (0x2700, 0x27BF)),), - (("CJK Symbols And Punctuation", (0x3000, 0x303F)),), - (("Hiragana", (0x3040, 0x309F)),), - ( - ("Katakana", (0x30A0, 0x30FF)), - ("Katakana Phonetic Extensions", (0x31F0, 0x31FF)), - ), - (("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))), - (("Hangul Compatibility Jamo", (0x3130, 0x318F)),), - (("Phags-pa", (0xA840, 0xA87F)),), - (("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),), - (("CJK Compatibility", (0x3300, 0x33FF)),), - (("Hangul Syllables", (0xAC00, 0xD7AF)),), - (("Non-Plane 0 *", (0xD800, 0xDFFF)),), - (("Phoenician", (0x10900, 0x1091F)),), - ( - ("CJK Unified Ideographs", (0x4E00, 0x9FFF)), - ("CJK Radicals Supplement", (0x2E80, 0x2EFF)), - ("Kangxi Radicals", (0x2F00, 0x2FDF)), - ("Ideographic Description Characters", (0x2FF0, 0x2FFF)), - ("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)), - ("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)), - ("Kanbun", (0x3190, 0x319F)), - ), - (("Private Use Area (plane 0)", (0xE000, 0xF8FF)),), - ( - ("CJK Strokes", (0x31C0, 0x31EF)), - ("CJK Compatibility Ideographs", (0xF900, 0xFAFF)), - ("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)), - ), - (("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),), - (("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),), - (("Combining Half Marks", (0xFE20, 0xFE2F)),), - ( - ("Vertical Forms", (0xFE10, 0xFE1F)), - ("CJK Compatibility Forms", (0xFE30, 0xFE4F)), - ), - (("Small Form Variants", (0xFE50, 0xFE6F)),), - (("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),), - (("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),), - (("Specials", (0xFFF0, 0xFFFF)),), - (("Tibetan", (0x0F00, 0x0FFF)),), - (("Syriac", (0x0700, 0x074F)),), - (("Thaana", (0x0780, 0x07BF)),), - (("Sinhala", (0x0D80, 0x0DFF)),), - (("Myanmar", (0x1000, 0x109F)),), - ( - ("Ethiopic", (0x1200, 0x137F)), - ("Ethiopic Supplement", (0x1380, 0x139F)), - ("Ethiopic Extended", (0x2D80, 0x2DDF)), - ), - (("Cherokee", (0x13A0, 0x13FF)),), - (("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),), - (("Ogham", (0x1680, 0x169F)),), - (("Runic", (0x16A0, 0x16FF)),), - (("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))), - (("Mongolian", (0x1800, 0x18AF)),), - (("Braille Patterns", (0x2800, 0x28FF)),), - (("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))), - ( - ("Tagalog", (0x1700, 0x171F)), - ("Hanunoo", (0x1720, 0x173F)), - ("Buhid", (0x1740, 0x175F)), - ("Tagbanwa", (0x1760, 0x177F)), - ), - (("Old Italic", (0x10300, 0x1032F)),), - (("Gothic", (0x10330, 0x1034F)),), - (("Deseret", (0x10400, 0x1044F)),), - ( - ("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)), - ("Musical Symbols", (0x1D100, 0x1D1FF)), - ("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)), - ), - (("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),), - ( - ("Private Use (plane 15)", (0xF0000, 0xFFFFD)), - ("Private Use (plane 16)", (0x100000, 0x10FFFD)), - ), - ( - ("Variation Selectors", (0xFE00, 0xFE0F)), - ("Variation Selectors Supplement", (0xE0100, 0xE01EF)), - ), - (("Tags", (0xE0000, 0xE007F)),), - (("Limbu", (0x1900, 0x194F)),), - (("Tai Le", (0x1950, 0x197F)),), - (("New Tai Lue", (0x1980, 0x19DF)),), - (("Buginese", (0x1A00, 0x1A1F)),), - (("Glagolitic", (0x2C00, 0x2C5F)),), - (("Tifinagh", (0x2D30, 0x2D7F)),), - (("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),), - (("Syloti Nagri", (0xA800, 0xA82F)),), - ( - ("Linear B Syllabary", (0x10000, 0x1007F)), - ("Linear B Ideograms", (0x10080, 0x100FF)), - ("Aegean Numbers", (0x10100, 0x1013F)), - ), - (("Ancient Greek Numbers", (0x10140, 0x1018F)),), - (("Ugaritic", (0x10380, 0x1039F)),), - (("Old Persian", (0x103A0, 0x103DF)),), - (("Shavian", (0x10450, 0x1047F)),), - (("Osmanya", (0x10480, 0x104AF)),), - (("Cypriot Syllabary", (0x10800, 0x1083F)),), - (("Kharoshthi", (0x10A00, 0x10A5F)),), - (("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),), - ( - ("Cuneiform", (0x12000, 0x123FF)), - ("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)), - ), - (("Counting Rod Numerals", (0x1D360, 0x1D37F)),), - (("Sundanese", (0x1B80, 0x1BBF)),), - (("Lepcha", (0x1C00, 0x1C4F)),), - (("Ol Chiki", (0x1C50, 0x1C7F)),), - (("Saurashtra", (0xA880, 0xA8DF)),), - (("Kayah Li", (0xA900, 0xA92F)),), - (("Rejang", (0xA930, 0xA95F)),), - (("Cham", (0xAA00, 0xAA5F)),), - (("Ancient Symbols", (0x10190, 0x101CF)),), - (("Phaistos Disc", (0x101D0, 0x101FF)),), - ( - ("Carian", (0x102A0, 0x102DF)), - ("Lycian", (0x10280, 0x1029F)), - ("Lydian", (0x10920, 0x1093F)), - ), - (("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))), -) - - -_unicodeStarts = [] -_unicodeValues = [None] - - -def _getUnicodeRanges(): - # build the ranges of codepoints for each unicode range bit, and cache result - if not _unicodeStarts: - unicodeRanges = [ - (start, (stop, bit)) - for bit, blocks in enumerate(OS2_UNICODE_RANGES) - for _, (start, stop) in blocks - ] - for start, (stop, bit) in sorted(unicodeRanges): - _unicodeStarts.append(start) - _unicodeValues.append((stop, bit)) - return _unicodeStarts, _unicodeValues - - -def intersectUnicodeRanges(unicodes, inverse=False): - """Intersect a sequence of (int) Unicode codepoints with the Unicode block - ranges defined in the OpenType specification v1.7, and return the set of - 'ulUnicodeRanges' bits for which there is at least ONE intersection. - If 'inverse' is True, return the the bits for which there is NO intersection. - - >>> intersectUnicodeRanges([0x0410]) == {9} - True - >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122} - True - >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == ( - ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122}) - True - """ - unicodes = set(unicodes) - unicodestarts, unicodevalues = _getUnicodeRanges() - bits = set() - for code in unicodes: - stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)] - if code <= stop: - bits.add(bit) - # The spec says that bit 57 ("Non Plane 0") implies that there's - # at least one codepoint beyond the BMP; so I also include all - # the non-BMP codepoints here - if any(0x10000 <= code < 0x110000 for code in unicodes): - bits.add(57) - return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits - - -if __name__ == "__main__": - import doctest, sys - - sys.exit(doctest.testmod().failed) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_v_m_t_x.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_v_m_t_x.py deleted file mode 100644 index c965c94ee50904e57f7bca86b3b602c00520a9cc..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_v_m_t_x.py +++ /dev/null @@ -1,11 +0,0 @@ -from fontTools import ttLib - -superclass = ttLib.getTableClass("hmtx") - - -class table__v_m_t_x(superclass): - - headerTag = "vhea" - advanceName = "height" - sideBearingName = "tsb" - numberOfMetricsName = "numberOfVMetrics" diff --git a/spaces/jordonpeter01/MusicGen/audiocraft/data/__init__.py b/spaces/jordonpeter01/MusicGen/audiocraft/data/__init__.py deleted file mode 100644 index 708a3dcead8dda89374a021177481dacae9f7fe9..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen/audiocraft/data/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from . import audio, audio_dataset diff --git a/spaces/keneonyeachonam/Visualization-Plotly-Sunbursts-Treemaps-and-WebGL-020823/app.py b/spaces/keneonyeachonam/Visualization-Plotly-Sunbursts-Treemaps-and-WebGL-020823/app.py deleted file mode 100644 index 7e82b33b6fcf4e043710475b5be4d99624c99459..0000000000000000000000000000000000000000 --- a/spaces/keneonyeachonam/Visualization-Plotly-Sunbursts-Treemaps-and-WebGL-020823/app.py +++ /dev/null @@ -1,230 +0,0 @@ -import streamlit as st -import numpy as np -import plotly.express as px -import pandas as pd -import plotly.graph_objects as go - -st.set_page_config(page_title="Plotly Graphing Libraries",layout='wide') - -import streamlit as st - -uploaded_files = st.file_uploader("Choose a CSV file", accept_multiple_files=True) -for uploaded_file in uploaded_files: - bytes_data = uploaded_file.read() - st.write("filename:", uploaded_file.name) - st.write(bytes_data) - - if st.checkbox("FileDetails"): - - filevalue = uploaded_file.getvalue() - st.write(filevalue) - st.write(uploaded_file.name) - st.write(uploaded_file.type) - st.write(uploaded_file.size) - #st.write(uploaded_file.last_modified) - #st.write(uploaded_file.charset) - st.write(uploaded_file.getbuffer()) - st.write(uploaded_file.getbuffer().nbytes) - st.write(uploaded_file.getbuffer().tobytes()) - st.write(uploaded_file.getbuffer().tolist()) - st.write(uploaded_file.getbuffer().itemsize) - st.write(uploaded_file.getbuffer().ndim) - st.write(uploaded_file.getbuffer().shape) - st.write(uploaded_file.getbuffer().strides) - st.write(uploaded_file.getbuffer().suboffsets) - st.write(uploaded_file.getbuffer().readonly) - st.write(uploaded_file.getbuffer().c_contiguous) - st.write(uploaded_file.getbuffer().f_contiguous) - st.write(uploaded_file.getbuffer().contiguous) - st.write(uploaded_file.getbuffer().itemsize) - st.write(uploaded_file.getbuffer().nbytes) - st.write(uploaded_file.getbuffer().ndim) - st.write(uploaded_file.getbuffer().shape) - st.write(uploaded_file.getbuffer().strides) - st.write(uploaded_file.getbuffer().suboffsets) - st.write(uploaded_file.getbuffer().readonly) - st.write(uploaded_file.getbuffer().c_contiguous) - st.write(uploaded_file.getbuffer().f_contiguous) - st.write(uploaded_file.getbuffer().contiguous) - st.write(uploaded_file.getbuffer().itemsize) - st.write(uploaded_file.getbuffer().nbytes) - st.write(uploaded_file.getbuffer().ndim) - st.write(uploaded_file.getbuffer().shape) - st.write(uploaded_file.getbuffer().strides) - st.write(uploaded_file.getbuffer().suboffsets) - st.write(uploaded_file.getbuffer().readonly) - st.write(uploaded_file.getbuffer().c_contiguous) - st.write(uploaded_file.getbuffer().f_contiguous) - myDF = pd.DataFrame(uploaded_file.getbuffer().tolist()) - - - st.markdown("# Treemaps from upload data file: https://plotly.com/python/treemaps/") - #df = myDF.query("year == 2007") - df = myDF - fig = px.treemap(df, path=[px.Constant("time"), 'message', 'name'], values='content', - color='lifeExp', hover_data=['iso_alpha'], - color_continuous_scale='RdBu', - color_continuous_midpoint=np.average(df['name'], weights=df['content'])) # todo - debug this and get it working with the data - fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) - #fig.show() - st.plotly_chart(fig, use_container_width=True) - - - - -#show replace - if st.checkbox("replace"): - mydf = st.dataframe(df) - columns = st.selectbox("Select column", df.columns) - old_values = st.multiselect("Current Values",list(df[columns].unique()),list(df[columns].unique())) - with st.form(key='my_form'): - col1,col2 = st.beta_columns(2) - st_input = st.number_input if is_numeric_dtype(df[columns]) else st.text_input - with col1: - old_val = st_input("old value") - with col2: - new_val = st_input("new value") - if st.form_submit_button("Replace"): - df[columns]=df[columns].replace(old_val,new_val) - st.success("{} replace with {} successfully ".format(old_val,new_val)) - excel = df.to_excel(r"F:\book2.xlsx", index = False, header=True,encoding="utf-8") - df =pd.read_excel(r"F:\book2.xlsx") - mydf.add_rows(df) - -st.markdown("WebGL Rendering with 1,000,000 Points") -import plotly.graph_objects as go -import numpy as np -N = 1000000 -fig = go.Figure() -fig.add_trace( - go.Scattergl( - x = np.random.randn(N), - y = np.random.randn(N), - mode = 'markers', - marker = dict( - line = dict( - width = 1, - color = 'DarkSlateGrey') - ) - ) -) -#fig.show() -st.plotly_chart(fig, use_container_width=True) - - - -st.markdown("# WebGL Graph - ScatterGL") -fig = go.Figure() -trace_num = 10 -point_num = 5000 -for i in range(trace_num): - fig.add_trace( - go.Scattergl( - x = np.linspace(0, 1, point_num), - y = np.random.randn(point_num)+(i*5) - ) - ) -fig.update_layout(showlegend=False) -#fig.show() -st.plotly_chart(fig, use_container_width=True) - - -st.markdown("# Treemaps: https://plotly.com/python/treemaps/") -df = px.data.gapminder().query("year == 2007") -fig = px.treemap(df, path=[px.Constant("world"), 'continent', 'country'], values='pop', - color='lifeExp', hover_data=['iso_alpha'], - color_continuous_scale='RdBu', - color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop'])) -fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) -#fig.show() -st.plotly_chart(fig, use_container_width=True) - - -st.markdown("# Sunburst: https://plotly.com/python/sunburst-charts/") - - -st.markdown("# Life Expectancy Sunburst") -df = px.data.gapminder().query("year == 2007") -fig = px.sunburst(df, path=['continent', 'country'], values='pop', - color='lifeExp', hover_data=['iso_alpha'], - color_continuous_scale='RdBu', - color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop'])) -st.plotly_chart(fig, use_container_width=True) - - -st.markdown("# Coffee Aromas and Tastes Sunburst") -df1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/sunburst-coffee-flavors-complete.csv') -df2 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/coffee-flavors.csv') -fig = go.Figure() -fig.add_trace(go.Sunburst( - ids=df1.ids, - labels=df1.labels, - parents=df1.parents, - domain=dict(column=0) -)) -fig.add_trace(go.Sunburst( - ids=df2.ids, - labels=df2.labels, - parents=df2.parents, - domain=dict(column=1), - maxdepth=2 -)) -fig.update_layout( - grid= dict(columns=2, rows=1), - margin = dict(t=0, l=0, r=0, b=0) -) -st.plotly_chart(fig, use_container_width=True) - - - - - -# Sunburst -#data = dict( -# character=["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"], -# parent=["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve" ], -# value=[10, 14, 12, 10, 2, 6, 6, 4, 4]) -#fig = px.sunburst( -# data, -# names='character', -# parents='parent', -# values='value', -#) -#fig.show() -#st.plotly_chart(fig, use_container_width=True) - - -df = px.data.tips() -fig = px.treemap(df, path=[px.Constant("all"), 'sex', 'day', 'time'], - values='total_bill', color='time', - color_discrete_map={'(?)':'lightgrey', 'Lunch':'gold', 'Dinner':'darkblue'}) -fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) -#fig.show() -fig.update_traces(marker=dict(cornerradius=5)) - -st.plotly_chart(fig, use_container_width=True) - - -df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/96c0bd/sunburst-coffee-flavors-complete.csv') -fig = go.Figure(go.Treemap( - ids = df.ids, - labels = df.labels, - parents = df.parents, - pathbar_textfont_size=15, - root_color="lightgrey" -)) -fig.update_layout( - uniformtext=dict(minsize=10, mode='hide'), - margin = dict(t=50, l=25, r=25, b=25) -) -#fig.show() -st.plotly_chart(fig, use_container_width=True) - - -df = pd.read_pickle('bloom_dataset.pkl') -fig = px.treemap(df, path=[px.Constant("ROOTS"), 'Macroarea', 'Family', 'Genus', 'Language', 'dataset_name'], - values='num_bytes', maxdepth=4) -fig.update_traces(root_color="pink") -fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) - -st.plotly_chart(fig, use_container_width=True) \ No newline at end of file diff --git a/spaces/keras-io/timeseries-classification-from-scratch/README.md b/spaces/keras-io/timeseries-classification-from-scratch/README.md deleted file mode 100644 index c059c505e844365d21807a9db4831ceb3197eda9..0000000000000000000000000000000000000000 --- a/spaces/keras-io/timeseries-classification-from-scratch/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Timeseries Classification From Scratch -emoji: 🌍 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/Bark-with-Voice-Cloning/setup.py b/spaces/kevinwang676/Bark-with-Voice-Cloning/setup.py deleted file mode 100644 index 606849326a4002007fd42060b51e69a19c18675c..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bark-with-Voice-Cloning/setup.py +++ /dev/null @@ -1,3 +0,0 @@ -from setuptools import setup - -setup() diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/audio2pose_models/networks.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/audio2pose_models/networks.py deleted file mode 100644 index 8aa0b1390e7b4bb0e16057ac94d2fe84f48421af..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/audio2pose_models/networks.py +++ /dev/null @@ -1,140 +0,0 @@ -import torch.nn as nn -import torch - - -class ResidualConv(nn.Module): - def __init__(self, input_dim, output_dim, stride, padding): - super(ResidualConv, self).__init__() - - self.conv_block = nn.Sequential( - nn.BatchNorm2d(input_dim), - nn.ReLU(), - nn.Conv2d( - input_dim, output_dim, kernel_size=3, stride=stride, padding=padding - ), - nn.BatchNorm2d(output_dim), - nn.ReLU(), - nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1), - ) - self.conv_skip = nn.Sequential( - nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=1), - nn.BatchNorm2d(output_dim), - ) - - def forward(self, x): - - return self.conv_block(x) + self.conv_skip(x) - - -class Upsample(nn.Module): - def __init__(self, input_dim, output_dim, kernel, stride): - super(Upsample, self).__init__() - - self.upsample = nn.ConvTranspose2d( - input_dim, output_dim, kernel_size=kernel, stride=stride - ) - - def forward(self, x): - return self.upsample(x) - - -class Squeeze_Excite_Block(nn.Module): - def __init__(self, channel, reduction=16): - super(Squeeze_Excite_Block, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction, bias=False), - nn.ReLU(inplace=True), - nn.Linear(channel // reduction, channel, bias=False), - nn.Sigmoid(), - ) - - def forward(self, x): - b, c, _, _ = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) - return x * y.expand_as(x) - - -class ASPP(nn.Module): - def __init__(self, in_dims, out_dims, rate=[6, 12, 18]): - super(ASPP, self).__init__() - - self.aspp_block1 = nn.Sequential( - nn.Conv2d( - in_dims, out_dims, 3, stride=1, padding=rate[0], dilation=rate[0] - ), - nn.ReLU(inplace=True), - nn.BatchNorm2d(out_dims), - ) - self.aspp_block2 = nn.Sequential( - nn.Conv2d( - in_dims, out_dims, 3, stride=1, padding=rate[1], dilation=rate[1] - ), - nn.ReLU(inplace=True), - nn.BatchNorm2d(out_dims), - ) - self.aspp_block3 = nn.Sequential( - nn.Conv2d( - in_dims, out_dims, 3, stride=1, padding=rate[2], dilation=rate[2] - ), - nn.ReLU(inplace=True), - nn.BatchNorm2d(out_dims), - ) - - self.output = nn.Conv2d(len(rate) * out_dims, out_dims, 1) - self._init_weights() - - def forward(self, x): - x1 = self.aspp_block1(x) - x2 = self.aspp_block2(x) - x3 = self.aspp_block3(x) - out = torch.cat([x1, x2, x3], dim=1) - return self.output(out) - - def _init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - -class Upsample_(nn.Module): - def __init__(self, scale=2): - super(Upsample_, self).__init__() - - self.upsample = nn.Upsample(mode="bilinear", scale_factor=scale) - - def forward(self, x): - return self.upsample(x) - - -class AttentionBlock(nn.Module): - def __init__(self, input_encoder, input_decoder, output_dim): - super(AttentionBlock, self).__init__() - - self.conv_encoder = nn.Sequential( - nn.BatchNorm2d(input_encoder), - nn.ReLU(), - nn.Conv2d(input_encoder, output_dim, 3, padding=1), - nn.MaxPool2d(2, 2), - ) - - self.conv_decoder = nn.Sequential( - nn.BatchNorm2d(input_decoder), - nn.ReLU(), - nn.Conv2d(input_decoder, output_dim, 3, padding=1), - ) - - self.conv_attn = nn.Sequential( - nn.BatchNorm2d(output_dim), - nn.ReLU(), - nn.Conv2d(output_dim, 1, 1), - ) - - def forward(self, x1, x2): - out = self.conv_encoder(x1) + self.conv_decoder(x2) - out = self.conv_attn(out) - return out * x2 \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/extract_kp_videos_safe.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/extract_kp_videos_safe.py deleted file mode 100644 index ba3830b84bee98e02a7d0681803cc4b1719787c2..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/face3d/extract_kp_videos_safe.py +++ /dev/null @@ -1,151 +0,0 @@ -import os -import cv2 -import time -import glob -import argparse -import numpy as np -from PIL import Image -import torch -from tqdm import tqdm -from itertools import cycle -from torch.multiprocessing import Pool, Process, set_start_method - -from facexlib.alignment import landmark_98_to_68 -from facexlib.detection import init_detection_model - -from facexlib.utils import load_file_from_url -from facexlib.alignment.awing_arch import FAN - -def init_alignment_model(model_name, half=False, device='cuda', model_rootpath=None): - if model_name == 'awing_fan': - model = FAN(num_modules=4, num_landmarks=98, device=device) - model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/alignment_WFLW_4HG.pth' - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - model_path = load_file_from_url( - url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath) - model.load_state_dict(torch.load(model_path, map_location=device)['state_dict'], strict=True) - model.eval() - model = model.to(device) - return model - - -class KeypointExtractor(): - def __init__(self, device='cuda'): - - ### gfpgan/weights - try: - import webui # in webui - root_path = 'extensions/SadTalker/gfpgan/weights' - - except: - root_path = 'gfpgan/weights' - - self.detector = init_alignment_model('awing_fan',device=device, model_rootpath=root_path) - self.det_net = init_detection_model('retinaface_resnet50', half=False,device=device, model_rootpath=root_path) - - def extract_keypoint(self, images, name=None, info=True): - if isinstance(images, list): - keypoints = [] - if info: - i_range = tqdm(images,desc='landmark Det:') - else: - i_range = images - - for image in i_range: - current_kp = self.extract_keypoint(image) - # current_kp = self.detector.get_landmarks(np.array(image)) - if np.mean(current_kp) == -1 and keypoints: - keypoints.append(keypoints[-1]) - else: - keypoints.append(current_kp[None]) - - keypoints = np.concatenate(keypoints, 0) - np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1)) - return keypoints - else: - while True: - try: - with torch.no_grad(): - # face detection -> face alignment. - img = np.array(images) - bboxes = self.det_net.detect_faces(images, 0.97) - - bboxes = bboxes[0] - img = img[int(bboxes[1]):int(bboxes[3]), int(bboxes[0]):int(bboxes[2]), :] - - keypoints = landmark_98_to_68(self.detector.get_landmarks(img)) # [0] - - #### keypoints to the original location - keypoints[:,0] += int(bboxes[0]) - keypoints[:,1] += int(bboxes[1]) - - break - except RuntimeError as e: - if str(e).startswith('CUDA'): - print("Warning: out of memory, sleep for 1s") - time.sleep(1) - else: - print(e) - break - except TypeError: - print('No face detected in this image') - shape = [68, 2] - keypoints = -1. * np.ones(shape) - break - if name is not None: - np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1)) - return keypoints - -def read_video(filename): - frames = [] - cap = cv2.VideoCapture(filename) - while cap.isOpened(): - ret, frame = cap.read() - if ret: - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame = Image.fromarray(frame) - frames.append(frame) - else: - break - cap.release() - return frames - -def run(data): - filename, opt, device = data - os.environ['CUDA_VISIBLE_DEVICES'] = device - kp_extractor = KeypointExtractor() - images = read_video(filename) - name = filename.split('/')[-2:] - os.makedirs(os.path.join(opt.output_dir, name[-2]), exist_ok=True) - kp_extractor.extract_keypoint( - images, - name=os.path.join(opt.output_dir, name[-2], name[-1]) - ) - -if __name__ == '__main__': - set_start_method('spawn') - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--input_dir', type=str, help='the folder of the input files') - parser.add_argument('--output_dir', type=str, help='the folder of the output files') - parser.add_argument('--device_ids', type=str, default='0,1') - parser.add_argument('--workers', type=int, default=4) - - opt = parser.parse_args() - filenames = list() - VIDEO_EXTENSIONS_LOWERCASE = {'mp4'} - VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE}) - extensions = VIDEO_EXTENSIONS - - for ext in extensions: - os.listdir(f'{opt.input_dir}') - print(f'{opt.input_dir}/*.{ext}') - filenames = sorted(glob.glob(f'{opt.input_dir}/*.{ext}')) - print('Total number of videos:', len(filenames)) - pool = Pool(opt.workers) - args_list = cycle([opt]) - device_ids = opt.device_ids.split(",") - device_ids = cycle(device_ids) - for data in tqdm(pool.imap_unordered(run, zip(filenames, args_list, device_ids))): - None diff --git a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py b/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py deleted file mode 100644 index 55bd4c5d1889a1a998b52eb56793bbc1eef1b691..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/backbones/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200 -from .mobilefacenet import get_mbf - - -def get_model(name, **kwargs): - # resnet - if name == "r18": - return iresnet18(False, **kwargs) - elif name == "r34": - return iresnet34(False, **kwargs) - elif name == "r50": - return iresnet50(False, **kwargs) - elif name == "r100": - return iresnet100(False, **kwargs) - elif name == "r200": - return iresnet200(False, **kwargs) - elif name == "r2060": - from .iresnet2060 import iresnet2060 - return iresnet2060(False, **kwargs) - elif name == "mbf": - fp16 = kwargs.get("fp16", False) - num_features = kwargs.get("num_features", 512) - return get_mbf(fp16=fp16, num_features=num_features) - else: - raise ValueError() \ No newline at end of file diff --git a/spaces/kevinwang676/VoiceChanger/src/facerender/modules/generator.py b/spaces/kevinwang676/VoiceChanger/src/facerender/modules/generator.py deleted file mode 100644 index 5a9edcb3b328d3afc99072b2461d7ca69919f813..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/facerender/modules/generator.py +++ /dev/null @@ -1,255 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from src.facerender.modules.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, ResBlock3d, SPADEResnetBlock -from src.facerender.modules.dense_motion import DenseMotionNetwork - - -class OcclusionAwareGenerator(nn.Module): - """ - Generator follows NVIDIA architecture. - """ - - def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth, - num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False): - super(OcclusionAwareGenerator, self).__init__() - - if dense_motion_params is not None: - self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel, - estimate_occlusion_map=estimate_occlusion_map, - **dense_motion_params) - else: - self.dense_motion_network = None - - self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3)) - - down_blocks = [] - for i in range(num_down_blocks): - in_features = min(max_features, block_expansion * (2 ** i)) - out_features = min(max_features, block_expansion * (2 ** (i + 1))) - down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) - self.down_blocks = nn.ModuleList(down_blocks) - - self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1) - - self.reshape_channel = reshape_channel - self.reshape_depth = reshape_depth - - self.resblocks_3d = torch.nn.Sequential() - for i in range(num_resblocks): - self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1)) - - out_features = block_expansion * (2 ** (num_down_blocks)) - self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True) - self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1) - - self.resblocks_2d = torch.nn.Sequential() - for i in range(num_resblocks): - self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1)) - - up_blocks = [] - for i in range(num_down_blocks): - in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i))) - out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1))) - up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) - self.up_blocks = nn.ModuleList(up_blocks) - - self.final = nn.Conv2d(block_expansion, image_channel, kernel_size=(7, 7), padding=(3, 3)) - self.estimate_occlusion_map = estimate_occlusion_map - self.image_channel = image_channel - - def deform_input(self, inp, deformation): - _, d_old, h_old, w_old, _ = deformation.shape - _, _, d, h, w = inp.shape - if d_old != d or h_old != h or w_old != w: - deformation = deformation.permute(0, 4, 1, 2, 3) - deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear') - deformation = deformation.permute(0, 2, 3, 4, 1) - return F.grid_sample(inp, deformation) - - def forward(self, source_image, kp_driving, kp_source): - # Encoding (downsampling) part - out = self.first(source_image) - for i in range(len(self.down_blocks)): - out = self.down_blocks[i](out) - out = self.second(out) - bs, c, h, w = out.shape - # print(out.shape) - feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) - feature_3d = self.resblocks_3d(feature_3d) - - # Transforming feature representation according to deformation and occlusion - output_dict = {} - if self.dense_motion_network is not None: - dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving, - kp_source=kp_source) - output_dict['mask'] = dense_motion['mask'] - - if 'occlusion_map' in dense_motion: - occlusion_map = dense_motion['occlusion_map'] - output_dict['occlusion_map'] = occlusion_map - else: - occlusion_map = None - deformation = dense_motion['deformation'] - out = self.deform_input(feature_3d, deformation) - - bs, c, d, h, w = out.shape - out = out.view(bs, c*d, h, w) - out = self.third(out) - out = self.fourth(out) - - if occlusion_map is not None: - if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]: - occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear') - out = out * occlusion_map - - # output_dict["deformed"] = self.deform_input(source_image, deformation) # 3d deformation cannot deform 2d image - - # Decoding part - out = self.resblocks_2d(out) - for i in range(len(self.up_blocks)): - out = self.up_blocks[i](out) - out = self.final(out) - out = F.sigmoid(out) - - output_dict["prediction"] = out - - return output_dict - - -class SPADEDecoder(nn.Module): - def __init__(self): - super().__init__() - ic = 256 - oc = 64 - norm_G = 'spadespectralinstance' - label_nc = 256 - - self.fc = nn.Conv2d(ic, 2 * ic, 3, padding=1) - self.G_middle_0 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_1 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_2 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_3 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_4 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_5 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.up_0 = SPADEResnetBlock(2 * ic, ic, norm_G, label_nc) - self.up_1 = SPADEResnetBlock(ic, oc, norm_G, label_nc) - self.conv_img = nn.Conv2d(oc, 3, 3, padding=1) - self.up = nn.Upsample(scale_factor=2) - - def forward(self, feature): - seg = feature - x = self.fc(feature) - x = self.G_middle_0(x, seg) - x = self.G_middle_1(x, seg) - x = self.G_middle_2(x, seg) - x = self.G_middle_3(x, seg) - x = self.G_middle_4(x, seg) - x = self.G_middle_5(x, seg) - x = self.up(x) - x = self.up_0(x, seg) # 256, 128, 128 - x = self.up(x) - x = self.up_1(x, seg) # 64, 256, 256 - - x = self.conv_img(F.leaky_relu(x, 2e-1)) - # x = torch.tanh(x) - x = F.sigmoid(x) - - return x - - -class OcclusionAwareSPADEGenerator(nn.Module): - - def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth, - num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False): - super(OcclusionAwareSPADEGenerator, self).__init__() - - if dense_motion_params is not None: - self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel, - estimate_occlusion_map=estimate_occlusion_map, - **dense_motion_params) - else: - self.dense_motion_network = None - - self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1)) - - down_blocks = [] - for i in range(num_down_blocks): - in_features = min(max_features, block_expansion * (2 ** i)) - out_features = min(max_features, block_expansion * (2 ** (i + 1))) - down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) - self.down_blocks = nn.ModuleList(down_blocks) - - self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1) - - self.reshape_channel = reshape_channel - self.reshape_depth = reshape_depth - - self.resblocks_3d = torch.nn.Sequential() - for i in range(num_resblocks): - self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1)) - - out_features = block_expansion * (2 ** (num_down_blocks)) - self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True) - self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1) - - self.estimate_occlusion_map = estimate_occlusion_map - self.image_channel = image_channel - - self.decoder = SPADEDecoder() - - def deform_input(self, inp, deformation): - _, d_old, h_old, w_old, _ = deformation.shape - _, _, d, h, w = inp.shape - if d_old != d or h_old != h or w_old != w: - deformation = deformation.permute(0, 4, 1, 2, 3) - deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear') - deformation = deformation.permute(0, 2, 3, 4, 1) - return F.grid_sample(inp, deformation) - - def forward(self, source_image, kp_driving, kp_source): - # Encoding (downsampling) part - out = self.first(source_image) - for i in range(len(self.down_blocks)): - out = self.down_blocks[i](out) - out = self.second(out) - bs, c, h, w = out.shape - # print(out.shape) - feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) - feature_3d = self.resblocks_3d(feature_3d) - - # Transforming feature representation according to deformation and occlusion - output_dict = {} - if self.dense_motion_network is not None: - dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving, - kp_source=kp_source) - output_dict['mask'] = dense_motion['mask'] - - # import pdb; pdb.set_trace() - - if 'occlusion_map' in dense_motion: - occlusion_map = dense_motion['occlusion_map'] - output_dict['occlusion_map'] = occlusion_map - else: - occlusion_map = None - deformation = dense_motion['deformation'] - out = self.deform_input(feature_3d, deformation) - - bs, c, d, h, w = out.shape - out = out.view(bs, c*d, h, w) - out = self.third(out) - out = self.fourth(out) - - # occlusion_map = torch.where(occlusion_map < 0.95, 0, occlusion_map) - - if occlusion_map is not None: - if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]: - occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear') - out = out * occlusion_map - - # Decoding part - out = self.decoder(out) - - output_dict["prediction"] = out - - return output_dict \ No newline at end of file diff --git a/spaces/kingabzpro/Loan_Classifier/README.md b/spaces/kingabzpro/Loan_Classifier/README.md deleted file mode 100644 index 063b47e4405217a6509398e674a7e56b1a86491f..0000000000000000000000000000000000000000 --- a/spaces/kingabzpro/Loan_Classifier/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Loan Classifier -emoji: 📚 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kira4424/VITS-fast-fine-tuning/text/sanskrit.py b/spaces/kira4424/VITS-fast-fine-tuning/text/sanskrit.py deleted file mode 100644 index 0223aaac384a2f850f5bc20651fc18eb964607d0..0000000000000000000000000000000000000000 --- a/spaces/kira4424/VITS-fast-fine-tuning/text/sanskrit.py +++ /dev/null @@ -1,62 +0,0 @@ -import re -from indic_transliteration import sanscript - - -# List of (iast, ipa) pairs: -_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('a', 'ə'), - ('ā', 'aː'), - ('ī', 'iː'), - ('ū', 'uː'), - ('ṛ', 'ɹ`'), - ('ṝ', 'ɹ`ː'), - ('ḷ', 'l`'), - ('ḹ', 'l`ː'), - ('e', 'eː'), - ('o', 'oː'), - ('k', 'k⁼'), - ('k⁼h', 'kʰ'), - ('g', 'g⁼'), - ('g⁼h', 'gʰ'), - ('ṅ', 'ŋ'), - ('c', 'ʧ⁼'), - ('ʧ⁼h', 'ʧʰ'), - ('j', 'ʥ⁼'), - ('ʥ⁼h', 'ʥʰ'), - ('ñ', 'n^'), - ('ṭ', 't`⁼'), - ('t`⁼h', 't`ʰ'), - ('ḍ', 'd`⁼'), - ('d`⁼h', 'd`ʰ'), - ('ṇ', 'n`'), - ('t', 't⁼'), - ('t⁼h', 'tʰ'), - ('d', 'd⁼'), - ('d⁼h', 'dʰ'), - ('p', 'p⁼'), - ('p⁼h', 'pʰ'), - ('b', 'b⁼'), - ('b⁼h', 'bʰ'), - ('y', 'j'), - ('ś', 'ʃ'), - ('ṣ', 's`'), - ('r', 'ɾ'), - ('l̤', 'l`'), - ('h', 'ɦ'), - ("'", ''), - ('~', '^'), - ('ṃ', '^') -]] - - -def devanagari_to_ipa(text): - text = text.replace('ॐ', 'ओम्') - text = re.sub(r'\s*।\s*$', '.', text) - text = re.sub(r'\s*।\s*', ', ', text) - text = re.sub(r'\s*॥', '.', text) - text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST) - for regex, replacement in _iast_to_ipa: - text = re.sub(regex, replacement, text) - text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0) - [:-1]+'h'+x.group(1)+'*', text) - return text diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py deleted file mode 100644 index 336c7b254fe392b4703039fec86a83acdbd2e1a5..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = './cityscapes.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (769, 769) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py deleted file mode 100644 index 5674a39854cafd1f2e363bac99c58ccae62f24da..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py +++ /dev/null @@ -1,46 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='NLHead', - in_channels=2048, - in_index=3, - channels=512, - dropout_ratio=0.1, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/color.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/color.py deleted file mode 100644 index 9041e0e6b7581c3356795d6a3c5e84667c88f025..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/visualization/color.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from enum import Enum - -import numpy as np - -from annotator.uniformer.mmcv.utils import is_str - - -class Color(Enum): - """An enum that defines common colors. - - Contains red, green, blue, cyan, yellow, magenta, white and black. - """ - red = (0, 0, 255) - green = (0, 255, 0) - blue = (255, 0, 0) - cyan = (255, 255, 0) - yellow = (0, 255, 255) - magenta = (255, 0, 255) - white = (255, 255, 255) - black = (0, 0, 0) - - -def color_val(color): - """Convert various input to color tuples. - - Args: - color (:obj:`Color`/str/tuple/int/ndarray): Color inputs - - Returns: - tuple[int]: A tuple of 3 integers indicating BGR channels. - """ - if is_str(color): - return Color[color].value - elif isinstance(color, Color): - return color.value - elif isinstance(color, tuple): - assert len(color) == 3 - for channel in color: - assert 0 <= channel <= 255 - return color - elif isinstance(color, int): - assert 0 <= color <= 255 - return color, color, color - elif isinstance(color, np.ndarray): - assert color.ndim == 1 and color.size == 3 - assert np.all((color >= 0) & (color <= 255)) - color = color.astype(np.uint8) - return tuple(color) - else: - raise TypeError(f'Invalid type for color: {type(color)}') diff --git a/spaces/kobayashi123/bingo/Dockerfile b/spaces/kobayashi123/bingo/Dockerfile deleted file mode 100644 index c677b05b75f7e4b2beee8c97fb47957a0861a83e..0000000000000000000000000000000000000000 --- a/spaces/kobayashi123/bingo/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM weaigc/bingo:latest - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -CMD npm start diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/macCreatorType.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/macCreatorType.py deleted file mode 100644 index 36b15aca51c564c7a9c05ebfcff8f17925ec1630..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/macCreatorType.py +++ /dev/null @@ -1,56 +0,0 @@ -from fontTools.misc.textTools import Tag, bytesjoin, strjoin - -try: - import xattr -except ImportError: - xattr = None - - -def _reverseString(s): - s = list(s) - s.reverse() - return strjoin(s) - - -def getMacCreatorAndType(path): - """Returns file creator and file type codes for a path. - - Args: - path (str): A file path. - - Returns: - A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first - representing the file creator and the second representing the - file type. - """ - if xattr is not None: - try: - finderInfo = xattr.getxattr(path, "com.apple.FinderInfo") - except (KeyError, IOError): - pass - else: - fileType = Tag(finderInfo[:4]) - fileCreator = Tag(finderInfo[4:8]) - return fileCreator, fileType - return None, None - - -def setMacCreatorAndType(path, fileCreator, fileType): - """Set file creator and file type codes for a path. - - Note that if the ``xattr`` module is not installed, no action is - taken but no error is raised. - - Args: - path (str): A file path. - fileCreator: A four-character file creator tag. - fileType: A four-character file type tag. - - """ - if xattr is not None: - from fontTools.misc.textTools import pad - - if not all(len(s) == 4 for s in (fileCreator, fileType)): - raise TypeError("arg must be string of 4 chars") - finderInfo = pad(bytesjoin([fileType, fileCreator]), 32) - xattr.setxattr(path, "com.apple.FinderInfo", finderInfo) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/zip.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/zip.py deleted file mode 100644 index 96bc71eccaf32585922254a5dd0fcfbdb941d3cf..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/implementations/zip.py +++ /dev/null @@ -1,127 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import zipfile - -import fsspec -from fsspec.archive import AbstractArchiveFileSystem - - -class ZipFileSystem(AbstractArchiveFileSystem): - """Read/Write contents of ZIP archive as a file-system - - Keeps file object open while instance lives. - - This class is pickleable, but not necessarily thread-safe - """ - - root_marker = "" - protocol = "zip" - cachable = False - - def __init__( - self, - fo="", - mode="r", - target_protocol=None, - target_options=None, - compression=zipfile.ZIP_STORED, - allowZip64=True, - compresslevel=None, - **kwargs, - ): - """ - Parameters - ---------- - fo: str or file-like - Contains ZIP, and must exist. If a str, will fetch file using - :meth:`~fsspec.open_files`, which must return one file exactly. - mode: str - Accept: "r", "w", "a" - target_protocol: str (optional) - If ``fo`` is a string, this value can be used to override the - FS protocol inferred from a URL - target_options: dict (optional) - Kwargs passed when instantiating the target FS, if ``fo`` is - a string. - compression, allowZip64, compresslevel: passed to ZipFile - Only relevant when creating a ZIP - """ - super().__init__(self, **kwargs) - if mode not in set("rwa"): - raise ValueError(f"mode '{mode}' no understood") - self.mode = mode - if isinstance(fo, str): - fo = fsspec.open( - fo, mode=mode + "b", protocol=target_protocol, **(target_options or {}) - ) - self.of = fo - self.fo = fo.__enter__() # the whole instance is a context - self.zip = zipfile.ZipFile( - self.fo, - mode=mode, - compression=compression, - allowZip64=allowZip64, - compresslevel=compresslevel, - ) - self.dir_cache = None - - @classmethod - def _strip_protocol(cls, path): - # zip file paths are always relative to the archive root - return super()._strip_protocol(path).lstrip("/") - - def __del__(self): - if hasattr(self, "zip"): - self.close() - del self.zip - - def close(self): - """Commits any write changes to the file. Done on ``del`` too.""" - self.zip.close() - - def _get_dirs(self): - if self.dir_cache is None or self.mode in set("wa"): - # when writing, dir_cache is always in the ZipFile's attributes, - # not read from the file. - files = self.zip.infolist() - self.dir_cache = { - dirname + "/": {"name": dirname + "/", "size": 0, "type": "directory"} - for dirname in self._all_dirnames(self.zip.namelist()) - } - for z in files: - f = {s: getattr(z, s, None) for s in zipfile.ZipInfo.__slots__} - f.update( - { - "name": z.filename, - "size": z.file_size, - "type": ("directory" if z.is_dir() else "file"), - } - ) - self.dir_cache[f["name"]] = f - - def pipe_file(self, path, value, **kwargs): - # override upstream, because we know the exact file size in this case - self.zip.writestr(path, value, **kwargs) - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - cache_options=None, - **kwargs, - ): - path = self._strip_protocol(path) - if "r" in mode and self.mode in set("wa"): - if self.exists(path): - raise IOError("ZipFS can only be open for reading or writing, not both") - raise FileNotFoundError(path) - if "r" in self.mode and "w" in mode: - raise IOError("ZipFS can only be open for reading or writing, not both") - out = self.zip.open(path, mode.strip("b")) - if "r" in mode: - info = self.info(path) - out.size = info["size"] - out.name = info["name"] - return out diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/tests/_helpers.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/tests/_helpers.py deleted file mode 100644 index 754ff83095e72b175a9905439f96af8bb189c07a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/jsonschema/tests/_helpers.py +++ /dev/null @@ -1,25 +0,0 @@ -from urllib.parse import urljoin - - -def issues_url(organization, repository): - return urljoin( - "https://github.com/", f"{organization}/{repository}/issues/", - ) - - -ISSUES_URL = issues_url("python-jsonschema", "jsonschema") -TEST_SUITE_ISSUES_URL = issues_url("json-schema-org", "JSON-Schema-Test-Suite") - - -def bug(issue=None): - message = "A known bug." - if issue is not None: - message += f" See {urljoin(ISSUES_URL, str(issue))}." - return message - - -def test_suite_bug(issue): - return ( - "A known test suite bug. " - f"See {urljoin(TEST_SUITE_ISSUES_URL, str(issue))}." - ) diff --git a/spaces/lambdalabs/image-mixer-demo/app.py b/spaces/lambdalabs/image-mixer-demo/app.py deleted file mode 100644 index 3bdd667b6f612f0e79f06547940644bb1767abb3..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/image-mixer-demo/app.py +++ /dev/null @@ -1,245 +0,0 @@ -from io import BytesIO -import torch -import numpy as np -from PIL import Image -from einops import rearrange -from torch import autocast -from contextlib import nullcontext -import requests -import functools - -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.plms import PLMSSampler -from ldm.extras import load_model_from_config, load_training_dir -import clip - -from PIL import Image - -from huggingface_hub import hf_hub_download -ckpt = hf_hub_download(repo_id="lambdalabs/image-mixer", filename="image-mixer-pruned.ckpt") -config = hf_hub_download(repo_id="lambdalabs/image-mixer", filename="image-mixer-config.yaml") - -device = "cuda:0" -model = load_model_from_config(config, ckpt, device=device, verbose=False) -model = model.to(device).half() - -clip_model, preprocess = clip.load("ViT-L/14", device=device) - -n_inputs = 5 - -torch.cuda.empty_cache() - -@functools.lru_cache() -def get_url_im(t): - user_agent = {'User-agent': 'gradio-app'} - response = requests.get(t, headers=user_agent) - return Image.open(BytesIO(response.content)) - -@torch.no_grad() -def get_im_c(im_path, clip_model): - # im = Image.open(im_path).convert("RGB") - prompts = preprocess(im_path).to(device).unsqueeze(0) - return clip_model.encode_image(prompts).float() - -@torch.no_grad() -def get_txt_c(txt, clip_model): - text = clip.tokenize([txt,]).to(device) - return clip_model.encode_text(text) - -def get_txt_diff(txt1, txt2, clip_model): - return get_txt_c(txt1, clip_model) - get_txt_c(txt2, clip_model) - -def to_im_list(x_samples_ddim): - x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - ims = [] - for x_sample in x_samples_ddim: - x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') - ims.append(Image.fromarray(x_sample.astype(np.uint8))) - return ims - -@torch.no_grad() -def sample(sampler, model, c, uc, scale, start_code, h=512, w=512, precision="autocast",ddim_steps=50): - ddim_eta=0.0 - precision_scope = autocast if precision=="autocast" else nullcontext - with precision_scope("cuda"): - shape = [4, h // 8, w // 8] - samples_ddim, _ = sampler.sample(S=ddim_steps, - conditioning=c, - batch_size=c.shape[0], - shape=shape, - verbose=False, - unconditional_guidance_scale=scale, - unconditional_conditioning=uc, - eta=ddim_eta, - x_T=start_code) - - x_samples_ddim = model.decode_first_stage(samples_ddim) - return to_im_list(x_samples_ddim) - -def run(*args): - - inps = [] - for i in range(0, len(args)-4, n_inputs): - inps.append(args[i:i+n_inputs]) - - scale, n_samples, seed, steps = args[-4:] - h = w = 640 - - sampler = DDIMSampler(model) - # sampler = PLMSSampler(model) - - torch.manual_seed(seed) - start_code = torch.randn(n_samples, 4, h//8, w//8, device=device) - conds = [] - - for b, t, im, s in zip(*inps): - if b == "Image": - this_cond = s*get_im_c(im, clip_model) - elif b == "Text/URL": - if t.startswith("http"): - im = get_url_im(t) - this_cond = s*get_im_c(im, clip_model) - else: - this_cond = s*get_txt_c(t, clip_model) - else: - this_cond = torch.zeros((1, 768), device=device) - conds.append(this_cond) - conds = torch.cat(conds, dim=0).unsqueeze(0) - conds = conds.tile(n_samples, 1, 1) - - ims = sample(sampler, model, conds, 0*conds, scale, start_code, ddim_steps=steps) - # return make_row(ims) - - # Clear GPU memory cache so less likely to OOM - torch.cuda.empty_cache() - return ims - - -import gradio as gr -from functools import partial -from itertools import chain - -def change_visible(txt1, im1, val): - outputs = {} - if val == "Image": - outputs[im1] = gr.update(visible=True) - outputs[txt1] = gr.update(visible=False) - elif val == "Text/URL": - outputs[im1] = gr.update(visible=False) - outputs[txt1] = gr.update(visible=True) - elif val == "Nothing": - outputs[im1] = gr.update(visible=False) - outputs[txt1] = gr.update(visible=False) - return outputs - - -with gr.Blocks(title="Image Mixer", css=".gr-box {border-color: #8136e2}") as demo: - - gr.Markdown("") - gr.Markdown( -""" -# Image Mixer - -_Created by [Justin Pinkney](https://www.justinpinkney.com) at [Lambda Labs](https://lambdalabs.com/)_ - -To skip the queue you can try it on - -### __Provide one or more images to be mixed together by a fine-tuned Stable Diffusion model (see tips and advice below👇).__ - -![banner-large.jpeg](https://s3.amazonaws.com/moonup/production/uploads/1674039767068-62bd5f951e22ec84279820e8.jpeg) - -""") - - btns = [] - txts = [] - ims = [] - strengths = [] - - with gr.Row(): - for i in range(n_inputs): - with gr.Box(): - with gr.Column(): - btn1 = gr.Radio( - choices=["Image", "Text/URL", "Nothing"], - label=f"Input {i} type", - interactive=True, - value="Nothing", - ) - txt1 = gr.Textbox(label="Text or Image URL", visible=False, interactive=True) - im1 = gr.Image(label="Image", interactive=True, visible=False, type="pil") - strength = gr.Slider(label="Strength", minimum=0, maximum=5, step=0.05, value=1, interactive=True) - - fn = partial(change_visible, txt1, im1) - btn1.change(fn=fn, inputs=[btn1], outputs=[txt1, im1], queue=False) - - btns.append(btn1) - txts.append(txt1) - ims.append(im1) - strengths.append(strength) - with gr.Row(): - cfg_scale = gr.Slider(label="CFG scale", value=3, minimum=1, maximum=10, step=0.5) - n_samples = gr.Slider(label="Num samples", value=1, minimum=1, maximum=1, step=1) - seed = gr.Slider(label="Seed", value=0, minimum=0, maximum=10000, step=1) - steps = gr.Slider(label="Steps", value=30, minimum=10, maximum=100, step=5) - - with gr.Row(): - submit = gr.Button("Generate") - output = gr.Gallery().style(grid=[1,2], height="640px") - - inps = list(chain(btns, txts, ims, strengths)) - inps.extend([cfg_scale,n_samples,seed, steps,]) - submit.click(fn=run, inputs=inps, outputs=[output]) - - ex = gr.Examples([ - [ - "Image", "Image", "Text/URL", "Nothing", "Nothing", - "","","central symmetric figure detailed artwork","","", - "gainsborough.jpeg","blonder.jpeg","blonder.jpeg","blonder.jpeg","blonder.jpeg", - 1,1.35,1.4,1,1, - 3.0, 1, 0, 30, - ], - [ - "Image", "Image", "Text/URL", "Image", "Nothing", - "","","flowers","","", - "ex2-1.jpeg","ex2-2.jpeg","blonder.jpeg","ex2-3.jpeg","blonder.jpeg", - 1,1,1.5,1.25,1, - 3.0, 1, 0, 30, - ], - [ - "Image", "Image", "Image", "Nothing", "Nothing", - "","","","","", - "ex1-1.jpeg","ex1-2.jpeg","ex1-3.jpeg","blonder.jpeg","blonder.jpeg", - 1.1,1,1.4,1,1, - 3.0, 1, 0, 30, - ], - ], - fn=run, inputs=inps, outputs=[output], cache_examples=True) - - gr.Markdown( -""" - -## Tips - -- You can provide between 1 and 5 inputs, these can either be an uploaded image a text prompt or a url to an image file. -- The order of the inputs shouldn't matter, any images will be centre cropped before use. -- Each input has an individual strength parameter which controls how big an influence it has on the output. -- The model was not trained using text and can not interpret complex text prompts. -- Using only text prompts doesn't work well, make sure there is at least one image or URL to an image. -- The parameters on the bottom row such as cfg scale do the same as for a normal Stable Diffusion model. -- Balancing the different inputs requires tweaking of the strengths, I suggest getting the right balance for a small number of samples and with few steps until you're -happy with the result then increase the steps for better quality. -- Outputs are 640x640 by default. -- If you want to run locally see the instruction on the [Model Card](https://huggingface.co/lambdalabs/image-mixer). - -## How does this work? - -This model is based on the [Stable Diffusion Image Variations model](https://huggingface.co/lambdalabs/sd-image-variations-diffusers) -but it has been fined tuned to take multiple CLIP image embeddings. During training, up to 5 random crops were taken from the training images and -the CLIP image embeddings were computed, these were then concatenated and used as the conditioning for the model. At inference time we can combine the image -embeddings from multiple images to mix their concepts (and we can also use the text encoder to add text concepts too). - -The model was trained on a subset of LAION Improved Aesthetics at a resolution of 640x640 and was trained using 8xA100 GPUs on [Lambda GPU Cloud](https://lambdalabs.com/service/gpu-cloud). - -""") - -demo.launch() diff --git a/spaces/lighdow/anime-cute-tts/app.py b/spaces/lighdow/anime-cute-tts/app.py deleted file mode 100644 index 4fe90ce979a525d4e0bb80c40d5ecb0d5c97e7d6..0000000000000000000000000000000000000000 --- a/spaces/lighdow/anime-cute-tts/app.py +++ /dev/null @@ -1,323 +0,0 @@ -import argparse -import json -import os -import re -import tempfile -from pathlib import Path - -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -import gradio.utils as gr_utils -import gradio_client.utils as gr_processing_utils -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from mel_processing import spectrogram_torch - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -audio_postprocess_ori = gr.Audio.postprocess - - -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) - - -gr.Audio.postprocess = audio_postprocess - - -def get_text(text, hps, is_symbol): - text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, speed, is_symbol): - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 150 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - sid = LongTensor([speaker_id]).to(device) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - original_speaker_id = speaker_ids[original_speaker] - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != hps.data.sampling_rate: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) - with no_grad(): - y = torch.FloatTensor(audio) - y = y.unsqueeze(0) - spec = spectrogram_torch(y, hps.data.filter_length, - hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, - center=False).to(device) - spec_lengths = LongTensor([spec.size(-1)]).to(device) - sid_src = LongTensor([original_speaker_id]).to(device) - sid_tgt = LongTensor([target_speaker_id]).to(device) - audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ - 0, 0].data.cpu().float().numpy() - del y, spec, spec_lengths, sid_src, sid_tgt - return "Success", (hps.data.sampling_rate, audio) - - return vc_fn - - -def create_soft_vc_fn(model, hps, speaker_ids): - def soft_vc_fn(target_speaker, input_audio1, input_audio2): - input_audio = input_audio1 - if input_audio is None: - input_audio = input_audio2 - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - with torch.inference_mode(): - units = hubert.units(torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0).to(device)) - with no_grad(): - unit_lengths = LongTensor([units.size(1)]).to(device) - sid = LongTensor([target_speaker_id]).to(device) - audio = model.infer(units, unit_lengths, sid=sid, noise_scale=.667, - noise_scale_w=0.8)[0][0, 0].data.cpu().float().numpy() - del units, unit_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return soft_vc_fn - - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_text): - return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ - else (temp_text, temp_text) - - return to_symbol_fn - - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#{audio_id}").querySelector("audio"); - if (audio == undefined) - return; - audio = audio.src; - let oA = document.createElement("a"); - oA.download = Math.floor(Math.random()*100000000)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - - device = torch.device(args.device) - models_tts = [] - models_vc = [] - models_soft_vc = [] - with open("saved_model/info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for i, info in models_info.items(): - name = info["title"] - author = info["author"] - lang = info["lang"] - example = info["example"] - config_path = f"saved_model/{i}/config.json" - model_path = f"saved_model/{i}/model.pth" - cover = info["cover"] - cover_path = f"saved_model/{i}/{cover}" if cover else None - hps = utils.get_hparams_from_file(config_path) - model = SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval().to(device) - if isinstance(hps.speakers, utils.HParams): - speakers, speaker_ids = zip(*hps.speakers.items()) - else: - speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"] - speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"] - - t = info["type"] - if t == "vits": - models_tts.append((name, author, cover_path, speakers, lang, example, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_symbol_fn(hps))) - models_vc.append((name, author, cover_path, speakers, create_vc_fn(model, hps, speaker_ids))) - elif t == "soft-vits-vc": - models_soft_vc.append((name, author, cover_path, speakers, create_soft_vc_fn(model, hps, speaker_ids))) - - hubert = torch.hub.load("bshall/hubert:main", "hubert_soft", trust_repo=True).to(device) - - app = gr.Blocks() - - with app: - gr.Markdown("# Moe TTS And Voice Conversion Using VITS Model\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.moegoe)\n\n" - "[Open In Colab]" - "(https://colab.research.google.com/drive/14Pb8lpmwZL-JI5Ub6jpG4sz2-8KS0kbS?usp=sharing)" - " without queue and length limitation.\n\n" - "Feel free to [open discussion](https://huggingface.co/spaces/skytnt/moe-tts/discussions/new) " - "if you want to add your model to this app.") - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, lang, example, symbols, tts_fn, - to_symbol_fn) in enumerate(models_tts): - with gr.TabItem(f"model{i}"): - with gr.Column(): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}\n\n" - f"language: {lang}") - tts_input1 = gr.TextArea(label="Text (150 words limitation)", value=example, - elem_id=f"tts-input{i}") - tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, - type="index", value=speakers[0]) - tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.5, maximum=2, step=0.1) - with gr.Accordion(label="Advanced Options", open=False): - temp_text_var = gr.Variable() - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[tts_input1], - samples=[[x] for x in symbols], - elem_id=f"symbol-list{i}") - symbol_list_json = gr.Json(value=symbols, visible=False) - tts_submit = gr.Button("Generate", variant="primary") - tts_output1 = gr.Textbox(label="Output Message") - tts_output2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"tts-audio{i}")) - - tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, symbol_input], - [tts_output1, tts_output2]) - symbol_input.change(to_symbol_fn, - [symbol_input, tts_input1, temp_text_var], - [tts_input1, temp_text_var]) - symbol_list.click(None, [symbol_list, symbol_list_json], [], - _js=f""" - (i,symbols) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input{i}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - return []; - }}""") - - with gr.TabItem("Voice Conversion"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, vc_fn) in enumerate(models_vc): - with gr.TabItem(f"model{i}"): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}") - vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index", - value=speakers[0]) - vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", - value=speakers[min(len(speakers) - 1, 1)]) - vc_input3 = gr.Audio(label="Input Audio (30s limitation)") - vc_submit = gr.Button("Convert", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio", elem_id=f"vc-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"vc-audio{i}")) - vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2]) - with gr.TabItem("Soft Voice Conversion"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, soft_vc_fn) in enumerate(models_soft_vc): - with gr.TabItem(f"model{i}"): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}") - vc_input1 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", - value=speakers[0]) - source_tabs = gr.Tabs() - with source_tabs: - with gr.TabItem("microphone"): - vc_input2 = gr.Audio(label="Input Audio (30s limitation)", source="microphone") - with gr.TabItem("upload"): - vc_input3 = gr.Audio(label="Input Audio (30s limitation)", source="upload") - vc_submit = gr.Button("Convert", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio", elem_id=f"svc-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"svc-audio{i}")) - # clear inputs - source_tabs.set_event_trigger("select", None, [], [vc_input2, vc_input3], - js="()=>[null,null]") - vc_submit.click(soft_vc_fn, [vc_input1, vc_input2, vc_input3], - [vc_output1, vc_output2]) - gr.Markdown( - "unofficial demo for \n\n" - "- [https://github.com/CjangCjengh/MoeGoe](https://github.com/CjangCjengh/MoeGoe)\n" - "- [https://github.com/Francis-Komizu/VITS](https://github.com/Francis-Komizu/VITS)\n" - "- [https://github.com/luoyily/MoeTTS](https://github.com/luoyily/MoeTTS)\n" - "- [https://github.com/Francis-Komizu/Sovits](https://github.com/Francis-Komizu/Sovits)" - ) - app.queue(concurrency_count=3).launch(show_api=False, share=args.share) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Hotel RnR Free Download Crack Serial Key Keygen [TOP].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Hotel RnR Free Download Crack Serial Key Keygen [TOP].md deleted file mode 100644 index bca860d9ab36a1526f6e0b1ee43123f05e3a1324..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Hotel RnR Free Download Crack Serial Key Keygen [TOP].md +++ /dev/null @@ -1,6 +0,0 @@ -

Hotel R'n'R Free Download Crack Serial Key Keygen


Download Filehttps://bytlly.com/2uGvQQ



- --xv-full-game-crack-serial-code-and-key-generator-download-free\/\r\n \r\n \r\nThe ... for its great variety on environments inside a considerably portable ... 4d29de3e1b
-
-
-

diff --git a/spaces/lint/sdpipe_webui/utils/functions.py b/spaces/lint/sdpipe_webui/utils/functions.py deleted file mode 100644 index 6f41a9fa1f690a29b5c9ffc9b0cabb524714570c..0000000000000000000000000000000000000000 --- a/spaces/lint/sdpipe_webui/utils/functions.py +++ /dev/null @@ -1,273 +0,0 @@ -import gradio as gr -import torch -import random -from PIL import Image -import os -import argparse -import shutil -import gc -import importlib -import json - -from diffusers import ( - StableDiffusionPipeline, - StableDiffusionImg2ImgPipeline, -) - - -from .inpaint_pipeline import SDInpaintPipeline as StableDiffusionInpaintPipelineLegacy - -from .textual_inversion import main as run_textual_inversion -from .shared import default_scheduler, scheduler_dict, model_ids - - -_xformers_available = importlib.util.find_spec("xformers") is not None -device = "cuda" if torch.cuda.is_available() else "cpu" -# device = 'cpu' -dtype = torch.float16 if device == "cuda" else torch.float32 -low_vram_mode = False - - -tab_to_pipeline = { - 1: StableDiffusionPipeline, - 2: StableDiffusionImg2ImgPipeline, - 3: StableDiffusionInpaintPipelineLegacy, -} - - -def load_pipe(model_id, scheduler_name, tab_index=1, pipe_kwargs="{}"): - global pipe, loaded_model_id - - scheduler = scheduler_dict[scheduler_name] - - pipe_class = tab_to_pipeline[tab_index] - - # load new weights from disk only when changing model_id - if model_id != loaded_model_id: - pipe = pipe_class.from_pretrained( - model_id, - torch_dtype=dtype, - safety_checker=None, - requires_safety_checker=False, - scheduler=scheduler.from_pretrained(model_id, subfolder="scheduler"), - **json.loads(pipe_kwargs), - ) - loaded_model_id = model_id - - # if same model_id, instantiate new pipeline with same underlying pytorch objects to avoid reloading weights from disk - elif pipe_class != pipe.__class__ or not isinstance(pipe.scheduler, scheduler): - pipe.components["scheduler"] = scheduler.from_pretrained( - model_id, subfolder="scheduler" - ) - pipe = pipe_class(**pipe.components) - - if device == "cuda": - pipe = pipe.to(device) - if _xformers_available: - pipe.enable_xformers_memory_efficient_attention() - print("using xformers") - if low_vram_mode: - pipe.enable_attention_slicing() - print("using attention slicing to lower VRAM") - - return pipe - - -pipe = None -loaded_model_id = "" -pipe = load_pipe(model_ids[0], default_scheduler) - - -def pad_image(image): - w, h = image.size - if w == h: - return image - elif w > h: - new_image = Image.new(image.mode, (w, w), (0, 0, 0)) - new_image.paste(image, (0, (w - h) // 2)) - return new_image - else: - new_image = Image.new(image.mode, (h, h), (0, 0, 0)) - new_image.paste(image, ((h - w) // 2, 0)) - return new_image - - -@torch.no_grad() -def generate( - model_name, - scheduler_name, - prompt, - guidance, - steps, - n_images=1, - width=512, - height=512, - seed=0, - image=None, - strength=0.5, - inpaint_image=None, - inpaint_strength=0.5, - inpaint_radio="", - neg_prompt="", - tab_index=1, - pipe_kwargs="{}", - progress=gr.Progress(track_tqdm=True), -): - - if seed == -1: - seed = random.randint(0, 2147483647) - - generator = torch.Generator(device).manual_seed(seed) - - pipe = load_pipe( - model_id=model_name, - scheduler_name=scheduler_name, - tab_index=tab_index, - pipe_kwargs=pipe_kwargs, - ) - - status_message = f"Prompt: '{prompt}' | Seed: {seed} | Guidance: {guidance} | Scheduler: {scheduler_name} | Steps: {steps}" - - if tab_index == 1: - status_message = "Text to Image " + status_message - - result = pipe( - prompt, - negative_prompt=neg_prompt, - num_images_per_prompt=n_images, - num_inference_steps=int(steps), - guidance_scale=guidance, - width=width, - height=height, - generator=generator, - ) - - elif tab_index == 2: - - status_message = "Image to Image " + status_message - print(image.size) - image = image.resize((width, height)) - print(image.size) - - result = pipe( - prompt, - negative_prompt=neg_prompt, - num_images_per_prompt=n_images, - image=image, - num_inference_steps=int(steps), - strength=strength, - guidance_scale=guidance, - generator=generator, - ) - - elif tab_index == 3: - status_message = "Inpainting " + status_message - - init_image = inpaint_image["image"].resize((width, height)) - mask = inpaint_image["mask"].resize((width, height)) - - result = pipe( - prompt, - negative_prompt=neg_prompt, - num_images_per_prompt=n_images, - image=init_image, - mask_image=mask, - num_inference_steps=int(steps), - strength=inpaint_strength, - preserve_unmasked_image=( - inpaint_radio == "preserve non-masked portions of image" - ), - guidance_scale=guidance, - generator=generator, - ) - - else: - return None, f"Unhandled tab index: {tab_index}" - - return result.images, status_message - - -# based on lvkaokao/textual-inversion-training -def train_textual_inversion( - model_name, - scheduler_name, - type_of_thing, - files, - concept_word, - init_word, - text_train_steps, - text_train_bsz, - text_learning_rate, - progress=gr.Progress(track_tqdm=True), -): - - if device == "cpu": - raise gr.Error("Textual inversion training not supported on CPU") - - pipe = load_pipe( - model_id=model_name, - scheduler_name=scheduler_name, - tab_index=1, - ) - - pipe.disable_xformers_memory_efficient_attention() # xformers handled by textual inversion script - - concept_dir = "concept_images" - output_dir = "output_model" - training_resolution = 512 - - if os.path.exists(output_dir): - shutil.rmtree("output_model") - if os.path.exists(concept_dir): - shutil.rmtree("concept_images") - - os.makedirs(concept_dir, exist_ok=True) - os.makedirs(output_dir, exist_ok=True) - - gc.collect() - torch.cuda.empty_cache() - - if concept_word == "" or concept_word == None: - raise gr.Error("You forgot to define your concept prompt") - - for j, file_temp in enumerate(files): - file = Image.open(file_temp.name) - image = pad_image(file) - image = image.resize((training_resolution, training_resolution)) - extension = file_temp.name.split(".")[1] - image = image.convert("RGB") - image.save(f"{concept_dir}/{j+1}.{extension}", quality=100) - - args_general = argparse.Namespace( - train_data_dir=concept_dir, - learnable_property=type_of_thing, - placeholder_token=concept_word, - initializer_token=init_word, - resolution=training_resolution, - train_batch_size=text_train_bsz, - gradient_accumulation_steps=1, - gradient_checkpointing=True, - mixed_precision="fp16", - use_bf16=False, - max_train_steps=int(text_train_steps), - learning_rate=text_learning_rate, - scale_lr=True, - lr_scheduler="constant", - lr_warmup_steps=0, - output_dir=output_dir, - ) - - try: - final_result = run_textual_inversion(pipe, args_general) - except Exception as e: - raise gr.Error(e) - - pipe.text_encoder = pipe.text_encoder.eval().to(device, dtype=dtype) - pipe.unet = pipe.unet.eval().to(device, dtype=dtype) - - gc.collect() - torch.cuda.empty_cache() - - return ( - f"Finished training! Check the {output_dir} directory for saved model weights" - ) diff --git a/spaces/lolakshi/dhoni/README.md b/spaces/lolakshi/dhoni/README.md deleted file mode 100644 index d2f5f7ecdfa6b0cc923138acc0bc7a5c3ce636ec..0000000000000000000000000000000000000000 --- a/spaces/lolakshi/dhoni/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Dhoni -emoji: ⚡ -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/luost26/DiffAb/diffab/tools/eval/base.py b/spaces/luost26/DiffAb/diffab/tools/eval/base.py deleted file mode 100644 index 867eaa9aab04fd54ef1c3c8300e3f63ad05ff8f0..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/diffab/tools/eval/base.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -import re -import json -import shelve -from Bio import PDB -from typing import Optional, Tuple, List -from dataclasses import dataclass, field - - -@dataclass -class EvalTask: - in_path: str - ref_path: str - info: dict - structure: str - name: str - method: str - cdr: str - ab_chains: List - - residue_first: Optional[Tuple] = None - residue_last: Optional[Tuple] = None - - scores: dict = field(default_factory=dict) - - def get_gen_biopython_model(self): - parser = PDB.PDBParser(QUIET=True) - return parser.get_structure(self.in_path, self.in_path)[0] - - def get_ref_biopython_model(self): - parser = PDB.PDBParser(QUIET=True) - return parser.get_structure(self.ref_path, self.ref_path)[0] - - def save_to_db(self, db: shelve.Shelf): - db[self.in_path] = self - - def to_report_dict(self): - return { - 'method': self.method, - 'structure': self.structure, - 'cdr': self.cdr, - 'filename': os.path.basename(self.in_path), - **self.scores - } - - -class TaskScanner: - - def __init__(self, root, postfix=None, db: Optional[shelve.Shelf]=None): - super().__init__() - self.root = root - self.postfix = postfix - self.visited = set() - self.db = db - if db is not None: - for k in db.keys(): - self.visited.add(k) - - def _get_metadata(self, fpath): - json_path = os.path.join( - os.path.dirname(os.path.dirname(fpath)), - 'metadata.json' - ) - tag_name = os.path.basename(os.path.dirname(fpath)) - method_name = os.path.basename( - os.path.dirname(os.path.dirname(os.path.dirname(fpath))) - ) - try: - antibody_chains = set() - info = None - with open(json_path, 'r') as f: - metadata = json.load(f) - for item in metadata['items']: - if item['tag'] == tag_name: - info = item - antibody_chains.add(item['residue_first'][0]) - if info is not None: - info['antibody_chains'] = list(antibody_chains) - info['structure'] = metadata['identifier'] - info['method'] = method_name - return info - except (json.JSONDecodeError, FileNotFoundError) as e: - return None - - def scan(self) -> List[EvalTask]: - tasks = [] - if self.postfix is None or not self.postfix: - input_fname_pattern = '^\d+\.pdb$' - ref_fname = 'REF1.pdb' - else: - input_fname_pattern = f'^\d+\_{self.postfix}\.pdb$' - ref_fname = f'REF1_{self.postfix}.pdb' - for parent, _, files in os.walk(self.root): - for fname in files: - fpath = os.path.join(parent, fname) - if not re.match(input_fname_pattern, fname): - continue - if os.path.getsize(fpath) == 0: - continue - if fpath in self.visited: - continue - - # Path to the reference structure - ref_path = os.path.join(parent, ref_fname) - if not os.path.exists(ref_path): - continue - - # CDR information - info = self._get_metadata(fpath) - if info is None: - continue - tasks.append(EvalTask( - in_path = fpath, - ref_path = ref_path, - info = info, - structure = info['structure'], - name = info['name'], - method = info['method'], - cdr = info['tag'], - ab_chains = info['antibody_chains'], - residue_first = info.get('residue_first', None), - residue_last = info.get('residue_last', None), - )) - self.visited.add(fpath) - return tasks diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/managed_memory_pointer.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/managed_memory_pointer.h deleted file mode 100644 index c6a4c9756be37a9ba03806132ba6fb3381c21354..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/managed_memory_pointer.h +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright 2020 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace cuda -{ -namespace detail -{ - -// forward decl for iterator traits: -template -class managed_memory_pointer; - -} // end namespace detail -} // end namespace cuda -} // end namespace system - -// Specialize iterator traits to define `pointer` to something meaningful. -template -struct iterator_traits > > { -private: - typedef thrust::pointer< - Element, - Tag, - Reference, - thrust::system::cuda::detail::managed_memory_pointer > - ptr; - -public: - typedef typename ptr::iterator_category iterator_category; - typedef typename ptr::value_type value_type; - typedef typename ptr::difference_type difference_type; - typedef Element* pointer; - typedef typename ptr::reference reference; -}; // end iterator_traits - -namespace system -{ -namespace cuda -{ -namespace detail -{ - -/*! A version of thrust::cuda_cub::pointer that uses c++ references instead - * of thrust::cuda::reference. This is to allow managed memory pointers to - * be used with host-side code in standard libraries that are not compatible - * with proxy references. - */ -template -class managed_memory_pointer - : public thrust::pointer< - T, - thrust::cuda_cub::tag, - typename thrust::detail::add_reference::type, - thrust::system::cuda::detail::managed_memory_pointer > -{ -private: - typedef thrust::pointer< - T, - thrust::cuda_cub::tag, - typename thrust::detail::add_reference::type, - thrust::system::cuda::detail::managed_memory_pointer > - super_t; - -public: - typedef typename super_t::raw_pointer pointer; - - /*! \p managed_memory_pointer's no-argument constructor initializes its - * encapsulated pointer to \c 0. - */ - __host__ __device__ managed_memory_pointer() - : super_t() - {} - -#if THRUST_CPP_DIALECT >= 2011 - // NOTE: This is needed so that Thrust smart pointers can be used in - // `std::unique_ptr`. - __host__ __device__ managed_memory_pointer(decltype(nullptr)) - : super_t(nullptr) - {} -#endif - - /*! This constructor allows construction of a from a - * T*. - * - * \param ptr A raw pointer to copy from, presumed to point to a location - * in memory accessible by the \p cuda system. \tparam OtherT \p OtherT - * shall be convertible to \p T. - */ - template - __host__ __device__ explicit managed_memory_pointer(OtherT* ptr) - : super_t(ptr) - {} - - /*! This constructor allows construction from another pointer-like object - * with related type. - * - * \param other The \p OtherPointer to copy. - * \tparam OtherPointer The system tag associated with \p OtherPointer - * shall be convertible to \p thrust::system::cuda::tag and its element - * type shall be convertible to \p T. - */ - template - __host__ __device__ managed_memory_pointer( - const OtherPointer& other, - typename thrust::detail::enable_if_pointer_is_convertible< - OtherPointer, - managed_memory_pointer>::type* = 0) - : super_t(other) - {} - - /*! This constructor allows construction from another pointer-like object - * with \p void type. - * - * \param other The \p OtherPointer to copy. - * \tparam OtherPointer The system tag associated with \p OtherPointer - * shall be convertible to \p thrust::system::cuda::tag and its element - * type shall be \p void. - */ - template - __host__ __device__ explicit managed_memory_pointer( - const OtherPointer& other, - typename thrust::detail::enable_if_void_pointer_is_system_convertible< - OtherPointer, - managed_memory_pointer>::type* = 0) - : super_t(other) - {} - - /*! Assignment operator allows assigning from another pointer-like object - * with related type. - * - * \param other The other pointer-like object to assign from. - * \tparam OtherPointer The system tag associated with \p OtherPointer - * shall be convertible to \p thrust::system::cuda::tag and its element - * type shall be convertible to \p T. - */ - template - __host__ __device__ typename thrust::detail::enable_if_pointer_is_convertible< - OtherPointer, - managed_memory_pointer, - managed_memory_pointer&>::type - operator=(const OtherPointer& other) - { - return super_t::operator=(other); - } - -#if THRUST_CPP_DIALECT >= 2011 - // NOTE: This is needed so that Thrust smart pointers can be used in - // `std::unique_ptr`. - __host__ __device__ managed_memory_pointer& operator=(decltype(nullptr)) - { - super_t::operator=(nullptr); - return *this; - } -#endif - - __host__ __device__ - pointer operator->() const - { - return this->get(); - } - -}; // class managed_memory_pointer - -} // namespace detail -} // namespace cuda -} // namespace system -} // namespace thrust diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system_error.h b/spaces/ma-xu/LIVE/thrust/thrust/system_error.h deleted file mode 100644 index 7119ac4b63c1c05687b064eb17d07be92ca1b074..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system_error.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! \file thrust/system_error.h - * \brief System diagnostics - */ - -#pragma once - -#include - -namespace thrust -{ - -/*! \addtogroup system - * \{ - */ - -/*! \namespace thrust::system - * \brief \p thrust::system is the namespace which contains functionality for manipulating - * memory specific to one of Thrust's backend systems. It also contains functionality - * for reporting error conditions originating from the operating system or other - * low-level application program interfaces such as the CUDA runtime. - * They are provided in a separate namespace for import convenience but are - * also aliased in the top-level \p thrust namespace for easy access. - */ -namespace system -{ -} // end system - -/*! \} // end system - */ - -} // end thrust - -#include -#include - diff --git a/spaces/ma-xu/LIVE/within_distance.h b/spaces/ma-xu/LIVE/within_distance.h deleted file mode 100644 index e81537786189b9ded312cdb9b0472b2eef7bd512..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/within_distance.h +++ /dev/null @@ -1,446 +0,0 @@ -#pragma once - -#include "diffvg.h" -#include "edge_query.h" -#include "shape.h" -#include "vector.h" - -DEVICE -inline -bool within_distance(const Circle &circle, const Vector2f &pt, float r) { - auto dist_to_center = distance(circle.center, pt); - if (fabs(dist_to_center - circle.radius) < r) { - return true; - } - return false; -} - -DEVICE -inline -bool within_distance(const Path &path, const BVHNode *bvh_nodes, const Vector2f &pt, float r) { - auto num_segments = path.num_base_points; - constexpr auto max_bvh_size = 128; - int bvh_stack[max_bvh_size]; - auto stack_size = 0; - bvh_stack[stack_size++] = 2 * num_segments - 2; - while (stack_size > 0) { - const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; - if (node.child1 < 0) { - // leaf - auto base_point_id = node.child0; - auto point_id = - node.child1 - 1; - assert(base_point_id < num_segments); - assert(point_id < path.num_points); - if (path.num_control_points[base_point_id] == 0) { - // Straight line - auto i0 = point_id; - auto i1 = (point_id + 1) % path.num_points; - auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; - auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; - // project pt to line - auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); - auto r0 = r; - auto r1 = r; - // override radius if path has thickness - if (path.thickness != nullptr) { - r0 = path.thickness[i0]; - r1 = path.thickness[i1]; - } - if (t < 0) { - if (distance_squared(p0, pt) < r0 * r0) { - return true; - } - } else if (t > 1) { - if (distance_squared(p1, pt) < r1 * r1) { - return true; - } - } else { - auto r = r0 + t * (r1 - r0); - if (distance_squared(p0 + t * (p1 - p0), pt) < r * r) { - return true; - } - } - } else if (path.num_control_points[base_point_id] == 1) { - // Quadratic Bezier curve - auto i0 = point_id; - auto i1 = point_id + 1; - auto i2 = (point_id + 2) % path.num_points; - auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; - auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; - auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; - if (path.use_distance_approx) { - auto cp = quadratic_closest_pt_approx(p0, p1, p2, pt); - return distance_squared(cp, pt) < r * r; - } - auto eval = [&](float t) -> Vector2f { - auto tt = 1 - t; - return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; - }; - auto r0 = r; - auto r1 = r; - auto r2 = r; - // override radius if path has thickness - if (path.thickness != nullptr) { - r0 = path.thickness[i0]; - r1 = path.thickness[i1]; - r2 = path.thickness[i2]; - } - if (distance_squared(eval(0), pt) < r0 * r0) { - return true; - } - if (distance_squared(eval(1), pt) < r2 * r2) { - return true; - } - - // The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2 - // = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 = q - // Want to solve (q - pt) dot q' = 0 - // q' = (p0-2p1+p2)t + (-p0+p1) - // Expanding (p0-2p1+p2)^2 t^3 + - // 3(p0-2p1+p2)(-p0+p1) t^2 + - // (2(-p0+p1)^2+(p0-2p1+p2)(p0-pt))t + - // (-p0+p1)(p0-pt) = 0 - auto A = sum((p0-2*p1+p2)*(p0-2*p1+p2)); - auto B = sum(3*(p0-2*p1+p2)*(-p0+p1)); - auto C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)); - auto D = sum((-p0+p1)*(p0-pt)); - float t[3]; - int num_sol = solve_cubic(A, B, C, D, t); - for (int j = 0; j < num_sol; j++) { - if (t[j] >= 0 && t[j] <= 1) { - auto tt = 1 - t[j]; - auto r = (tt*tt)*r0 + (2*tt*t[j])*r1 + (t[j]*t[j])*r2; - auto p = eval(t[j]); - if (distance_squared(p, pt) < r*r) { - return true; - } - } - } - } else if (path.num_control_points[base_point_id] == 2) { - // Cubic Bezier curve - auto i0 = point_id; - auto i1 = point_id + 1; - auto i2 = point_id + 2; - auto i3 = (point_id + 3) % path.num_points; - auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; - auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; - auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; - auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; - auto eval = [&](float t) -> Vector2f { - auto tt = 1 - t; - return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; - }; - auto r0 = r; - auto r1 = r; - auto r2 = r; - auto r3 = r; - // override radius if path has thickness - if (path.thickness != nullptr) { - r0 = path.thickness[i0]; - r1 = path.thickness[i1]; - r2 = path.thickness[i2]; - r3 = path.thickness[i3]; - } - if (distance_squared(eval(0), pt) < r0*r0) { - return true; - } - if (distance_squared(eval(1), pt) < r3*r3) { - return true; - } - // The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3 - // = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 - // Want to solve (q - pt) dot q' = 0 - // q' = 3*(-p0+3p1-3p2+p3)t^2 + 2*(3p0-6p1+3p2)t + (-3p0+3p1) - // Expanding - // 3*(-p0+3p1-3p2+p3)^2 t^5 - // 5*(-p0+3p1-3p2+p3)(3p0-6p1+3p2) t^4 - // 4*(-p0+3p1-3p2+p3)(-3p0+3p1) + 2*(3p0-6p1+3p2)^2 t^3 - // 3*(3p0-6p1+3p2)(-3p0+3p1) + 3*(-p0+3p1-3p2+p3)(p0-pt) t^2 - // (-3p0+3p1)^2+2(p0-pt)(3p0-6p1+3p2) t - // (p0-pt)(-3p0+3p1) - double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)); - double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); - double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); - double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); - double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); - double F = sum((p0-pt)*(-3*p0+3*p1)); - // normalize the polynomial - B /= A; - C /= A; - D /= A; - E /= A; - F /= A; - // Isolator Polynomials: - // https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.2233&rep=rep1&type=pdf - // x/5 + B/25 - // /----------------------------------------------------- - // 5x^4 + 4B x^3 + 3C x^2 + 2D x + E / x^5 + B x^4 + C x^3 + D x^2 + E x + F - // x^5 + 4B/5 x^4 + 3C/5 x^3 + 2D/5 x^2 + E/5 x - // ---------------------------------------------------- - // B/5 x^4 + 2C/5 x^3 + 3D/5 x^2 + 4E/5 x + F - // B/5 x^4 + 4B^2/25 x^3 + 3BC/25 x^2 + 2BD/25 x + BE/25 - // ---------------------------------------------------- - // (2C/5 - 4B^2/25)x^3 + (3D/5-3BC/25)x^2 + (4E/5-2BD/25) + (F-BE/25) - auto p1A = ((2 / 5.f) * C - (4 / 25.f) * B * B); - auto p1B = ((3 / 5.f) * D - (3 / 25.f) * B * C); - auto p1C = ((4 / 5.f) * E - (2 / 25.f) * B * D); - auto p1D = F - B * E / 25.f; - // auto q1A = 1 / 5.f; - // auto q1B = B / 25.f; - // x/5 + B/25 = 0 - // x = -B/5 - auto q_root = -B/5.f; - double p_roots[3]; - int num_sol = solve_cubic(p1A, p1B, p1C, p1D, p_roots); - float intervals[4]; - if (q_root >= 0 && q_root <= 1) { - intervals[0] = q_root; - } - for (int j = 0; j < num_sol; j++) { - intervals[j + 1] = p_roots[j]; - } - auto num_intervals = 1 + num_sol; - // sort intervals - for (int j = 1; j < num_intervals; j++) { - for (int k = j; k > 0 && intervals[k - 1] > intervals[k]; k--) { - auto tmp = intervals[k]; - intervals[k] = intervals[k - 1]; - intervals[k - 1] = tmp; - } - } - auto eval_polynomial = [&] (double t) { - return t*t*t*t*t+ - B*t*t*t*t+ - C*t*t*t+ - D*t*t+ - E*t+ - F; - }; - auto eval_polynomial_deriv = [&] (double t) { - return 5*t*t*t*t+ - 4*B*t*t*t+ - 3*C*t*t+ - 2*D*t+ - E; - }; - auto lower_bound = 0.f; - for (int j = 0; j < num_intervals + 1; j++) { - if (j < num_intervals && intervals[j] < 0.f) { - continue; - } - auto upper_bound = j < num_intervals ? - min(intervals[j], 1.f) : 1.f; - auto lb = lower_bound; - auto ub = upper_bound; - auto lb_eval = eval_polynomial(lb); - auto ub_eval = eval_polynomial(ub); - if (lb_eval * ub_eval > 0) { - // Doesn't have root - continue; - } - if (lb_eval > ub_eval) { - swap_(lb, ub); - } - auto t = 0.5f * (lb + ub); - for (int it = 0; it < 20; it++) { - if (!(t >= lb && t <= ub)) { - t = 0.5f * (lb + ub); - } - auto value = eval_polynomial(t); - if (fabs(value) < 1e-5f || it == 19) { - break; - } - // The derivative may not be entirely accurate, - // but the bisection is going to handle this - if (value > 0.f) { - ub = t; - } else { - lb = t; - } - auto derivative = eval_polynomial_deriv(t); - t -= value / derivative; - } - auto tt = 1 - t; - auto r = (tt*tt*tt)*r0 + (3*tt*tt*t)*r1 + (3*tt*t*t)*r2 + (t*t*t)*r3; - if (distance_squared(eval(t), pt) < r * r) { - return true; - } - if (upper_bound >= 1.f) { - break; - } - lower_bound = upper_bound; - } - } else { - assert(false); - } - } else { - assert(node.child0 >= 0 && node.child1 >= 0); - const AABB &b0 = bvh_nodes[node.child0].box; - if (within_distance(b0, pt, bvh_nodes[node.child0].max_radius)) { - bvh_stack[stack_size++] = node.child0; - } - const AABB &b1 = bvh_nodes[node.child1].box; - if (within_distance(b1, pt, bvh_nodes[node.child1].max_radius)) { - bvh_stack[stack_size++] = node.child1; - } - assert(stack_size <= max_bvh_size); - } - } - return false; -} - -DEVICE -inline -int within_distance(const Rect &rect, const Vector2f &pt, float r) { - auto test = [&](const Vector2f &p0, const Vector2f &p1) { - // project pt to line - auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); - if (t < 0) { - if (distance_squared(p0, pt) < r * r) { - return true; - } - } else if (t > 1) { - if (distance_squared(p1, pt) < r * r) { - return true; - } - } else { - if (distance_squared(p0 + t * (p1 - p0), pt) < r * r) { - return true; - } - } - return false; - }; - auto left_top = rect.p_min; - auto right_top = Vector2f{rect.p_max.x, rect.p_min.y}; - auto left_bottom = Vector2f{rect.p_min.x, rect.p_max.y}; - auto right_bottom = rect.p_max; - // left - if (test(left_top, left_bottom)) { - return true; - } - // top - if (test(left_top, right_top)) { - return true; - } - // right - if (test(right_top, right_bottom)) { - return true; - } - // bottom - if (test(left_bottom, right_bottom)) { - return true; - } - return false; -} - -DEVICE -inline -bool within_distance(const Shape &shape, const BVHNode *bvh_nodes, const Vector2f &pt, float r) { - switch (shape.type) { - case ShapeType::Circle: - return within_distance(*(const Circle *)shape.ptr, pt, r); - case ShapeType::Ellipse: - // https://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf - assert(false); - return false; - case ShapeType::Path: - return within_distance(*(const Path *)shape.ptr, bvh_nodes, pt, r); - case ShapeType::Rect: - return within_distance(*(const Rect *)shape.ptr, pt, r); - } - assert(false); - return false; -} - -DEVICE -inline -bool within_distance(const SceneData &scene, - int shape_group_id, - const Vector2f &pt) { - const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; - // pt is in canvas space, transform it to shape's local space - auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); - - constexpr auto max_bvh_stack_size = 64; - int bvh_stack[max_bvh_stack_size]; - auto stack_size = 0; - bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; - const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id]; - - while (stack_size > 0) { - const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; - if (node.child1 < 0) { - // leaf - auto shape_id = node.child0; - const auto &shape = scene.shapes[shape_id]; - if (within_distance(shape, scene.path_bvhs[shape_id], - local_pt, shape.stroke_width)) { - return true; - } - } else { - assert(node.child0 >= 0 && node.child1 >= 0); - const AABB &b0 = bvh_nodes[node.child0].box; - if (inside(b0, local_pt, bvh_nodes[node.child0].max_radius)) { - bvh_stack[stack_size++] = node.child0; - } - const AABB &b1 = bvh_nodes[node.child1].box; - if (inside(b1, local_pt, bvh_nodes[node.child1].max_radius)) { - bvh_stack[stack_size++] = node.child1; - } - assert(stack_size <= max_bvh_stack_size); - } - } - - return false; -} - -DEVICE -inline -bool within_distance(const SceneData &scene, - int shape_group_id, - const Vector2f &pt, - EdgeQuery *edge_query) { - if (edge_query == nullptr || shape_group_id != edge_query->shape_group_id) { - // Specialized version - return within_distance(scene, shape_group_id, pt); - } - const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; - // pt is in canvas space, transform it to shape's local space - auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); - - constexpr auto max_bvh_stack_size = 64; - int bvh_stack[max_bvh_stack_size]; - auto stack_size = 0; - bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; - const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id]; - - auto ret = false; - while (stack_size > 0) { - const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; - if (node.child1 < 0) { - // leaf - auto shape_id = node.child0; - const auto &shape = scene.shapes[shape_id]; - if (within_distance(shape, scene.path_bvhs[shape_id], - local_pt, shape.stroke_width)) { - ret = true; - if (shape_id == edge_query->shape_id) { - edge_query->hit = true; - } - } - } else { - assert(node.child0 >= 0 && node.child1 >= 0); - const AABB &b0 = bvh_nodes[node.child0].box; - if (inside(b0, local_pt, bvh_nodes[node.child0].max_radius)) { - bvh_stack[stack_size++] = node.child0; - } - const AABB &b1 = bvh_nodes[node.child1].box; - if (inside(b1, local_pt, bvh_nodes[node.child1].max_radius)) { - bvh_stack[stack_size++] = node.child1; - } - assert(stack_size <= max_bvh_stack_size); - } - } - - return ret; -} diff --git a/spaces/maminghui/ChatGPT/README.md b/spaces/maminghui/ChatGPT/README.md deleted file mode 100644 index e480de7b25ab44894a247cf70e9954fd1b15f934..0000000000000000000000000000000000000000 --- a/spaces/maminghui/ChatGPT/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChuanhuChatGPT -emoji: 🐯 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.22.1 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: JohnSmith9982/ChuanhuChatGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/models/networks/Synchronized-BatchNorm-PyTorch/tests/test_numeric_batchnorm.py b/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/models/networks/Synchronized-BatchNorm-PyTorch/tests/test_numeric_batchnorm.py deleted file mode 100644 index 63661389782806ea2182c049448df5d05fc6d2f1..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/models/networks/Synchronized-BatchNorm-PyTorch/tests/test_numeric_batchnorm.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# File : test_numeric_batchnorm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. - -import unittest - -import torch -import torch.nn as nn -from torch.autograd import Variable - -from sync_batchnorm.unittest import TorchTestCase - - -def handy_var(a, unbias=True): - n = a.size(0) - asum = a.sum(dim=0) - as_sum = (a ** 2).sum(dim=0) # a square sum - sumvar = as_sum - asum * asum / n - if unbias: - return sumvar / (n - 1) - else: - return sumvar / n - - -class NumericTestCase(TorchTestCase): - def testNumericBatchNorm(self): - a = torch.rand(16, 10) - bn = nn.BatchNorm1d(10, momentum=1, eps=1e-5, affine=False) - bn.train() - - a_var1 = Variable(a, requires_grad=True) - b_var1 = bn(a_var1) - loss1 = b_var1.sum() - loss1.backward() - - a_var2 = Variable(a, requires_grad=True) - a_mean2 = a_var2.mean(dim=0, keepdim=True) - a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-5)) - # a_std2 = torch.sqrt(a_var2.var(dim=0, keepdim=True, unbiased=False) + 1e-5) - b_var2 = (a_var2 - a_mean2) / a_std2 - loss2 = b_var2.sum() - loss2.backward() - - self.assertTensorClose(bn.running_mean, a.mean(dim=0)) - self.assertTensorClose(bn.running_var, handy_var(a)) - self.assertTensorClose(a_var1.data, a_var2.data) - self.assertTensorClose(b_var1.data, b_var2.data) - self.assertTensorClose(a_var1.grad, a_var2.grad) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/masakhane/dialogue-chat/style.css b/spaces/masakhane/dialogue-chat/style.css deleted file mode 100644 index 303c3d7ef3b06c42b211797cd2d5af9800589092..0000000000000000000000000000000000000000 --- a/spaces/masakhane/dialogue-chat/style.css +++ /dev/null @@ -1,16 +0,0 @@ -h1 { - text-align: center; -} - -#duplicate-button { - margin: auto; - color: white; - background: #1565c0; - border-radius: 100vh; -} - -#component-0 { - max-width: 900px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/mateuseap/magic-vocals/utils.py b/spaces/mateuseap/magic-vocals/utils.py deleted file mode 100644 index 62be8d03a8e8b839f8747310ef0ec0e82fb8ff0a..0000000000000000000000000000000000000000 --- a/spaces/mateuseap/magic-vocals/utils.py +++ /dev/null @@ -1,151 +0,0 @@ -import ffmpeg -import numpy as np - -# import praatio -# import praatio.praat_scripts -import os -import sys - -import random - -import csv - -platform_stft_mapping = { - "linux": "stftpitchshift", - "darwin": "stftpitchshift", - "win32": "stftpitchshift.exe", -} - -stft = platform_stft_mapping.get(sys.platform) -# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe") - - -def CSVutil(file, rw, type, *args): - if type == "formanting": - if rw == "r": - with open(file) as fileCSVread: - csv_reader = list(csv.reader(fileCSVread)) - return ( - (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2]) - if csv_reader is not None - else (lambda: exec('raise ValueError("No data")'))() - ) - else: - if args: - doformnt = args[0] - else: - doformnt = False - qfr = args[1] if len(args) > 1 else 1.0 - tmb = args[2] if len(args) > 2 else 1.0 - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([doformnt, qfr, tmb]) - elif type == "stop": - stop = args[0] if args else False - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([stop]) - - -def load_audio(file, sr, DoFormant, Quefrency, Timbre): - converted = False - DoFormant, Quefrency, Timbre = CSVutil("csvdb/formanting.csv", "r", "formanting") - try: - # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26 - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - file = ( - file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # 防止小白拷路径头尾带了空格和"和回车 - file_formanted = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - - # print(f"dofor={bool(DoFormant)} timbr={Timbre} quef={Quefrency}\n") - - if ( - lambda DoFormant: True - if DoFormant.lower() == "true" - else (False if DoFormant.lower() == "false" else DoFormant) - )(DoFormant): - numerator = round(random.uniform(1, 4), 4) - # os.system(f"stftpitchshift -i {file} -q {Quefrency} -t {Timbre} -o {file_formanted}") - # print('stftpitchshift -i "%s" -p 1.0 --rms -w 128 -v 8 -q %s -t %s -o "%s"' % (file, Quefrency, Timbre, file_formanted)) - - if not file.endswith(".wav"): - if not os.path.isfile(f"{file_formanted}.wav"): - converted = True - # print(f"\nfile = {file}\n") - # print(f"\nfile_formanted = {file_formanted}\n") - converting = ( - ffmpeg.input(file_formanted, threads=0) - .output(f"{file_formanted}.wav") - .run( - cmd=["ffmpeg", "-nostdin"], - capture_stdout=True, - capture_stderr=True, - ) - ) - else: - pass - - file_formanted = ( - f"{file_formanted}.wav" - if not file_formanted.endswith(".wav") - else file_formanted - ) - - print(f" · Formanting {file_formanted}...\n") - - os.system( - '%s -i "%s" -q "%s" -t "%s" -o "%sFORMANTED_%s.wav"' - % ( - stft, - file_formanted, - Quefrency, - Timbre, - file_formanted, - str(numerator), - ) - ) - - print(f" · Formanted {file_formanted}!\n") - - # filepraat = (os.path.abspath(os.getcwd()) + '\\' + file).replace('/','\\') - # file_formantedpraat = ('"' + os.path.abspath(os.getcwd()) + '/' + 'formanted'.join(file_formanted) + '"').replace('/','\\') - # print("%sFORMANTED_%s.wav" % (file_formanted, str(numerator))) - - out, _ = ( - ffmpeg.input( - "%sFORMANTED_%s.wav" % (file_formanted, str(numerator)), threads=0 - ) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - - try: - os.remove("%sFORMANTED_%s.wav" % (file_formanted, str(numerator))) - except Exception: - pass - print("couldn't remove formanted type of file") - - else: - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - except Exception as e: - raise RuntimeError(f"Failed to load audio: {e}") - - if converted: - try: - os.remove(file_formanted) - except Exception: - pass - print("couldn't remove converted type of file") - converted = False - - return np.frombuffer(out, np.float32).flatten() diff --git a/spaces/menghanxia/ReversibleHalftoning/checkpoints/ckpt_download.sh b/spaces/menghanxia/ReversibleHalftoning/checkpoints/ckpt_download.sh deleted file mode 100644 index 49d15e862f4f99d9f56c18f780ef468ca0922b51..0000000000000000000000000000000000000000 --- a/spaces/menghanxia/ReversibleHalftoning/checkpoints/ckpt_download.sh +++ /dev/null @@ -1 +0,0 @@ -wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1kw-FoS8lF_tgdiCkGG51UaUtmCcFvKiD' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1kw-FoS8lF_tgdiCkGG51UaUtmCcFvKiD" -O model_best.pth.tar && rm -rf /tmp/cookies.txt diff --git a/spaces/merve/anonymization/source/private-and-fair/umap-digit.js b/spaces/merve/anonymization/source/private-and-fair/umap-digit.js deleted file mode 100644 index f2fd20ea8d672ab49ca2698135c581605524bb46..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/private-and-fair/umap-digit.js +++ /dev/null @@ -1,139 +0,0 @@ - -!(async function(){ - var data = await util.getFile('mnist_train.csv') - data.forEach(d => { - delete d[''] - d.i = +d.i - }) - - var sel = d3.select('.umap-digit').html('') - .at({role: 'graphics-document', 'aria-label': `Color coded UMAP of MNIST 1s showing that increasing privacy will misclassify slanted and serif “1” digits first.`}) - - var umapSel = sel.append('div') - .append('div.chart-title').text('Sensitivity to higher privacy levels →') - .parent() - .st({maxWidth: 600, margin: '0 auto', marginBottom: 10}) - .append('div') - - - var buttonSel = sel.append('div.digit-button-container') - .appendMany('div.button', d3.range(10)) - .text(d => d) - .on('click', d => drawDigitUmap(d)) - - - drawDigitUmap(1) - - - async function drawDigitUmap(digit){ - buttonSel.classed('active', d => d == digit) - - // var umap = await util.getFile(`umap_train_${digit}.npy`) - var umap = await util.getFile(`cns-cache/umap_train_784_${digit}.npy`) - util.getFile(`cns-cache/mnist_train_raw_${digit}.npy`) - - var digitData = data - .filter(d => d.y == digit) - .map((d, i) => ({ - rawPos: [umap.data[i*2 + 0], umap.data[i*2 + 1]], - priv_order: d.priv_order, - y: d.y, - i: d.i - })) - - var c = d3.conventions({ - sel: umapSel.html(''), - width: 600, - height: 600, - layers: 'sdc', - margin: {top: 45} - }) - - var nTicks = 200 - c.svg.appendMany('rect', d3.range(nTicks)) - .at({ - height: 15, - width: 1, - fill: i => d3.interpolatePlasma(i/nTicks), - }) - .translate(i => [c.width/2 - nTicks/2 - 20 + i, -c.margin.top + 5]) - - - c.x.domain(d3.extent(digitData, d => d.rawPos[0])) - c.y.domain(d3.extent(digitData, d => d.rawPos[1]))//.range([0, c.height]) - digitData.forEach(d => d.pos = [c.x(d.rawPos[0]), c.y(d.rawPos[1])]) - - c.sel.select('canvas').st({pointerEvents: 'none'}) - var divSel = c.layers[1].st({pointerEvents: 'none'}) - var ctx = c.layers[2] - - digitData.forEach(d => { - ctx.beginPath() - ctx.fillStyle = d3.interpolatePlasma(1 - d.priv_order/60000) - ctx.rect(d.pos[0], d.pos[1], 2, 2) - ctx.fill() - }) - - var p = 10 - c.svg - .append('rect').at({width: c.width + p*2, height: c.height + p*2, x: -p, y: -p}) - .parent() - .call(d3.attachTooltip) - .on('mousemove', function(){ - var [px, py] = d3.mouse(this) - - var minPoint = _.minBy(digitData, d => { - var dx = d.pos[0] - px - var dy = d.pos[1] - py - - return dx*dx + dy*dy - }) - - var s = 4 - var c = d3.conventions({ - sel: ttSel.html('').append('div'), - width: 4*28, - height: 4*28, - layers: 'cs', - margin: {top: 0, left: 0, right: 0, bottom: 0} - }) - - //
Label: ${minPoint.y}
- // ttSel.append('div').html(` - //
Privacy Rank ${d3.format(',')(minPoint.priv_order)}
- // `) - - ttSel.classed('tooltip-footnote', 0).st({width: 112}) - - util.drawDigit(c.layers[0], +minPoint.i, s) - }) - - if (digit == 1){ - var circleDigits = [ - {r: 40, index: 1188}, - {r: 53, index: 18698}, - {r: 40, index: 1662} - ] - circleDigits.forEach(d => { - d.pos = digitData.filter(e => e.priv_order == d.index)[0].pos - }) - - c.svg.append('g') - .appendMany('g', circleDigits) - .translate(d => d.pos) - .append('circle') - .at({r: d => d.r, fill: 'none', stroke: '#fff', strokeDasharray: '2 3', strokeWidth: 1}) - - var {r, pos} = circleDigits[0] - - - divSel - .append('div').translate(pos) - .append('div').translate([r + 20, -r + 10]) - .st({width: 150, fontWeight: 300, fontSize: 14, color: '#fff', xbackground: 'rgba(255,0,0,.2)', lineHeight: '1.2em'}) - .text('Increasing privacy will misclassify slanted and serif “1” digits first') - } - } -})() - - diff --git a/spaces/merve/uncertainty-calibration/server-side/fill-in-the-blank/scatter-plot-colab/spearman-compare/watch-files.js b/spaces/merve/uncertainty-calibration/server-side/fill-in-the-blank/scatter-plot-colab/spearman-compare/watch-files.js deleted file mode 100644 index 8ab520922aa2b8cb8086ca86f5119fc0b46ac433..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/server-side/fill-in-the-blank/scatter-plot-colab/spearman-compare/watch-files.js +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright 2021 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - -!(function(){ - function watchFile(path){ - var lastStr = '' - - console.log(path) - function check(){ - d3.text(path + '?' + Math.random(), (err, nextStr) => { - if (err){ - console.log(err) - return check() - } - - if (nextStr == lastStr) return - lastStr = nextStr - - if (path.includes('.js')){ - console.log('js', new Date()) - Function(nextStr.replace('\n', ';').replace('\n', ';'))() - } - - if (path.includes('.css')){ - console.log('css', new Date()) - - Array.from(document.querySelectorAll('link')) - .filter(d => d.href.includes(path) || d.href.includes('__hs_placeholder')) - .filter((d, i) => i == 0) - .forEach(d => d.href = path + '?' + Math.random()) - } - }) - - if (python_settings.isDev) setTimeout(check, 100) - } - check() - } - - ;[ - 'list.css', - 'style.css', - '../two-sentences/init-scatter.js', - '../two-sentences/init-util.js', - '../two-sentences/init-pair.js', - 'init.js' - ].forEach(filename => { - var root = document.currentScript.src.replace('watch-files.js', '').split('?')[0] - var path = root + filename - - if (python_settings.isDev){ - watchFile(path) - } else { - if (path.includes('.js')){ - var node = document.createElement('script') - node.setAttribute('src', path) - document.body.appendChild(node) - } - - if (path.includes('.css')){ - Array.from(document.querySelectorAll('link')) - .filter(d => d.href.includes(path) || d.href.includes('__hs_placeholder')) - .filter((d, i) => i == 0) - .forEach(d => d.href = path + '?' + Math.random()) - } - } - }) -})() - - - diff --git a/spaces/mikkoar/marco/src/components/tailwind-indicator.tsx b/spaces/mikkoar/marco/src/components/tailwind-indicator.tsx deleted file mode 100644 index f2a1291213dd67055fcebe67fab574c8441338df..0000000000000000000000000000000000000000 --- a/spaces/mikkoar/marco/src/components/tailwind-indicator.tsx +++ /dev/null @@ -1,14 +0,0 @@ -export function TailwindIndicator() { - if (process.env.NODE_ENV === 'production') return null - - return ( -
-
xs
-
sm
-
md
-
lg
-
xl
-
2xl
-
- ) -} diff --git a/spaces/miku-hutao/vits-uma-genshin-honkai/utils.py b/spaces/miku-hutao/vits-uma-genshin-honkai/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/miku-hutao/vits-uma-genshin-honkai/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/mmecheri/Rakuten_Streamlit/README.md b/spaces/mmecheri/Rakuten_Streamlit/README.md deleted file mode 100644 index 3015c29be154a0410c3cccc1d1a6e0f6e60f996e..0000000000000000000000000000000000000000 --- a/spaces/mmecheri/Rakuten_Streamlit/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Streamlit App -emoji: 📊 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/branching/caption/ofa_mini_caption_stage_1_pretrain_qa_ground_10epmore.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/branching/caption/ofa_mini_caption_stage_1_pretrain_qa_ground_10epmore.sh deleted file mode 100644 index d06c0e0640deba9ae15bbe3862bcc602d0c0d5fe..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/branching/caption/ofa_mini_caption_stage_1_pretrain_qa_ground_10epmore.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -#SBATCH --job-name=ofa_mini_caption_stage_1_pretrain_qa_ground_10epmore -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --gpus=8 -#SBATCH --threads-per-core=2 -#SBATCH --gpu-bind=closest -#SBATCH -C MI250 -#SBATCH -A gda2204 -#SBATCH --time=10:00:00 -#SBATCH --mail-type=END,FAIL -#SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_mini_caption_stage_1_pretrain_qa_ground_10epmore.out -#SBATCH --exclusive -#SBATCH --mail-user=mustafa.shukor@isir.upmc.fr - - -cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts -source /lus/home/NAT/gda2204/mshukor/.bashrc - -conda activate main - - -rm core-python3* - - -srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/branching/caption/ofa_mini_caption_stage_1_pretrain_qa_ground_10epmore.sh - - diff --git a/spaces/mya-mya/SentenceMixer/t5mixer.py b/spaces/mya-mya/SentenceMixer/t5mixer.py deleted file mode 100644 index 36c3323e147ecaa78f22f6cf20c349dc49a3f945..0000000000000000000000000000000000000000 --- a/spaces/mya-mya/SentenceMixer/t5mixer.py +++ /dev/null @@ -1,51 +0,0 @@ -from mixer import Mixer -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -import torch - - -class T5Mixer(Mixer): - def __init__(self) -> None: - super().__init__() - self.tokenizer = AutoTokenizer.from_pretrained( - "llm-book/t5-base-long-livedoor-news-corpus") - self.model = AutoModelForSeq2SeqLM.from_pretrained( - "llm-book/t5-base-long-livedoor-news-corpus") - self.tokenid_to_tokentext = {i: t for t, - i in self.tokenizer.get_vocab().items()} - - def get_encoder_state(self, sentence: str): - inputs = self.tokenizer(sentence, return_tensors="pt") - eo = self.model.encoder.forward(**inputs) - es = eo["last_hidden_state"] - return es - - def get_mixed_encode_state( - self, sentence_A: str, sentence_B: str, A_ratio: float = 0.5): - es_A = self.get_encoder_state(sentence_A) - es_B = self.get_encoder_state(sentence_B) - n_tokens_A = es_A.size(1) - n_tokens_B = es_B.size(1) - if n_tokens_A >= n_tokens_B: - es = es_A.clone().detach()*A_ratio - es[:, :n_tokens_B, :] += es_B*(1.-A_ratio) - else: - es = es_B.clone().detach()*(1.-A_ratio) - es[:, :n_tokens_A, :] += es_A*A_ratio - return es - - def mix_sentences(self, sentence_A: str, sentence_B: str, A_ratio: float, max_n_tokens: int = 140): - es = self.get_mixed_encode_state(sentence_A, sentence_B, A_ratio) - to = torch.tensor([[self.tokenizer.pad_token_id]]) - for i in range(max_n_tokens): - od = self.model.decoder.forward( - input_ids=to, - encoder_hidden_states=es - ) - sd = od.last_hidden_state - l = self.model.lm_head(sd[0, -1, :]) - t_next = l.argmax() - to = torch.cat((to, t_next[None, None]), dim=-1) - if t_next == self.tokenizer.eos_token_id: - break - sentence = self.tokenizer.batch_decode(to)[0] - return sentence diff --git a/spaces/mygyasir/fast_diffusion/app.py b/spaces/mygyasir/fast_diffusion/app.py deleted file mode 100644 index d2f1e458bf21916754209f7af8c5a3c9fa4877cf..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/fast_diffusion/app.py +++ /dev/null @@ -1,148 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path - -models = [ - "Yntec/photoMovieX", - "Yntec/Toonify2", - -] -current_model = models[0] - -text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") - -models2=[ - gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False), - gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False), - -] - - -def text_it1(inputs,text_gen1=text_gen1): - go_t1=text_gen1(inputs) - return(go_t1) - -def set_model(current_model): - current_model = models[current_model] - return gr.update(label=(f"{current_model}")) - - -def send_it1(inputs, model_choice): - proc1=models2[model_choice] - output1=proc1(inputs) - return(output1) -css="""""" - - -with gr.Blocks(css=css) as myface: - gr.HTML(""" - - - - - - - - - - - - - - - - - -""") - with gr.Row(): - with gr.Tab("Title"): - gr.HTML(""" Minimum Multiplier
-

Fill the Textbox at the top and click Generate Image

-

The first time you load a model it takes 200 seconds

-

But after it loads each image takes 20 seconds to generate!

- - """) - - with gr.Tab("Description"): - gr.HTML("""
-

As many Text-to-Image Models as I can fit here


-

Suggest more up in the "Community" button

- -
""") - - with gr.Tab("Tools"): - with gr.Tab("View"): - with gr.Row(): - with gr.Column(style="width=50%, height=70%"): - gr.Pil(label="Crop") - with gr.Column(style="width=50%, height=70%"): - gr.Pil(label="Crop") - - - with gr.Tab("Draw"): - with gr.Column(style="width=50%, height=70%"): - gr.Pil(label="Crop") - with gr.Column(style="width=50%, height=70%"): - gr.Pil(label="Draw") - - - gr.ImagePaint(label="Draw") - - with gr.Tab("Text"): - with gr.Row(): - - with gr.Column(scale=50): - gr.Textbox(label="", lines=8, interactive=True) - - - with gr.Column(scale=50): - gr.Textbox(label="", lines=8, interactive=True) - - with gr.Tab("Color Picker"): - with gr.Row(): - - with gr.Column(scale=50): - gr.ColorPicker(label="Color", interactive=True) - - - with gr.Column(scale=50): - gr.ImagePaint(label="Draw", interactive=True) - with gr.Row(): - with gr.Column(scale=100): - magic1=gr.Textbox(lines=4) - gr.HTML("""""") - run=gr.Button("Generate Image") - with gr.Row(): - with gr.Column(scale=100): - #Model selection dropdown - model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True) - with gr.Row(): - with gr.Column(style="width=800px"): - output1=gr.Image(label=(f"{current_model}")) - - - with gr.Row(): - with gr.Column(scale=50): - input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2) - use_short=gr.Button("Use Short Prompt") - see_prompts=gr.Button("Extend Idea") - - - def short_prompt(inputs): - return(inputs) - - model_name1.change(set_model,inputs=model_name1,outputs=[output1]) - - run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1]) - - use_short.click(short_prompt,inputs=[input_text],outputs=magic1) - - see_prompts.click(text_it1,inputs=[input_text],outputs=magic1) - -myface.queue(concurrency_count=200) -myface.launch(inline=True, show_api=False, max_threads=400) \ No newline at end of file diff --git a/spaces/mygyasir/stablediffusionapi-dreamlike-photoreal1/app.py b/spaces/mygyasir/stablediffusionapi-dreamlike-photoreal1/app.py deleted file mode 100644 index 5d4b0e6db735133a17ca9590ee53e2200c19d64c..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/stablediffusionapi-dreamlike-photoreal1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stablediffusionapi/dreamlike-photoreal1").launch() \ No newline at end of file diff --git a/spaces/nateraw/deepafx-st/deepafx_st/models/efficient_net/utils.py b/spaces/nateraw/deepafx-st/deepafx_st/models/efficient_net/utils.py deleted file mode 100644 index 826a62790920706d2c9f742fbe18386bf712ae4b..0000000000000000000000000000000000000000 --- a/spaces/nateraw/deepafx-st/deepafx_st/models/efficient_net/utils.py +++ /dev/null @@ -1,616 +0,0 @@ -"""utils.py - Helper functions for building the model and for loading model parameters. - These helper functions are built to mirror those in the official TensorFlow implementation. -""" - -# Author: lukemelas (github username) -# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch -# With adjustments and added comments by workingcoder (github username). - -import re -import math -import collections -from functools import partial -import torch -from torch import nn -from torch.nn import functional as F -from torch.utils import model_zoo - - -################################################################################ -# Help functions for model architecture -################################################################################ - -# GlobalParams and BlockArgs: Two namedtuples -# Swish and MemoryEfficientSwish: Two implementations of the method -# round_filters and round_repeats: -# Functions to calculate params for scaling model width and depth ! ! ! -# get_width_and_height_from_size and calculate_output_image_size -# drop_connect: A structural design -# get_same_padding_conv2d: -# Conv2dDynamicSamePadding -# Conv2dStaticSamePadding -# get_same_padding_maxPool2d: -# MaxPool2dDynamicSamePadding -# MaxPool2dStaticSamePadding -# It's an additional function, not used in EfficientNet, -# but can be used in other model (such as EfficientDet). - -# Parameters for the entire model (stem, all blocks, and head) -GlobalParams = collections.namedtuple('GlobalParams', [ - 'width_coefficient', 'depth_coefficient', 'image_size', 'dropout_rate', - 'num_classes', 'batch_norm_momentum', 'batch_norm_epsilon', - 'drop_connect_rate', 'depth_divisor', 'min_depth', 'include_top']) - -# Parameters for an individual model block -BlockArgs = collections.namedtuple('BlockArgs', [ - 'num_repeat', 'kernel_size', 'stride', 'expand_ratio', - 'input_filters', 'output_filters', 'se_ratio', 'id_skip']) - -# Set GlobalParams and BlockArgs's defaults -GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields) -BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields) - -# Swish activation function -if hasattr(nn, 'SiLU'): - Swish = nn.SiLU -else: - # For compatibility with old PyTorch versions - class Swish(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -# A memory-efficient implementation of Swish function -class SwishImplementation(torch.autograd.Function): - @staticmethod - def forward(ctx, i): - result = i * torch.sigmoid(i) - ctx.save_for_backward(i) - return result - - @staticmethod - def backward(ctx, grad_output): - i = ctx.saved_tensors[0] - sigmoid_i = torch.sigmoid(i) - return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) - - -class MemoryEfficientSwish(nn.Module): - def forward(self, x): - return SwishImplementation.apply(x) - - -def round_filters(filters, global_params): - """Calculate and round number of filters based on width multiplier. - Use width_coefficient, depth_divisor and min_depth of global_params. - - Args: - filters (int): Filters number to be calculated. - global_params (namedtuple): Global params of the model. - - Returns: - new_filters: New filters number after calculating. - """ - multiplier = global_params.width_coefficient - if not multiplier: - return filters - # TODO: modify the params names. - # maybe the names (width_divisor,min_width) - # are more suitable than (depth_divisor,min_depth). - divisor = global_params.depth_divisor - min_depth = global_params.min_depth - filters *= multiplier - min_depth = min_depth or divisor # pay attention to this line when using min_depth - # follow the formula transferred from official TensorFlow implementation - new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) - if new_filters < 0.9 * filters: # prevent rounding by more than 10% - new_filters += divisor - return int(new_filters) - - -def round_repeats(repeats, global_params): - """Calculate module's repeat number of a block based on depth multiplier. - Use depth_coefficient of global_params. - - Args: - repeats (int): num_repeat to be calculated. - global_params (namedtuple): Global params of the model. - - Returns: - new repeat: New repeat number after calculating. - """ - multiplier = global_params.depth_coefficient - if not multiplier: - return repeats - # follow the formula transferred from official TensorFlow implementation - return int(math.ceil(multiplier * repeats)) - - -def drop_connect(inputs, p, training): - """Drop connect. - - Args: - input (tensor: BCWH): Input of this structure. - p (float: 0.0~1.0): Probability of drop connection. - training (bool): The running mode. - - Returns: - output: Output after drop connection. - """ - assert 0 <= p <= 1, 'p must be in range of [0,1]' - - if not training: - return inputs - - batch_size = inputs.shape[0] - keep_prob = 1 - p - - # generate binary_tensor mask according to probability (p for 0, 1-p for 1) - random_tensor = keep_prob - random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device) - binary_tensor = torch.floor(random_tensor) - - output = inputs / keep_prob * binary_tensor - return output - - -def get_width_and_height_from_size(x): - """Obtain height and width from x. - - Args: - x (int, tuple or list): Data size. - - Returns: - size: A tuple or list (H,W). - """ - if isinstance(x, int): - return x, x - if isinstance(x, list) or isinstance(x, tuple): - return x - else: - raise TypeError() - - -def calculate_output_image_size(input_image_size, stride): - """Calculates the output image size when using Conv2dSamePadding with a stride. - Necessary for static padding. Thanks to mannatsingh for pointing this out. - - Args: - input_image_size (int, tuple or list): Size of input image. - stride (int, tuple or list): Conv2d operation's stride. - - Returns: - output_image_size: A list [H,W]. - """ - if input_image_size is None: - return None - image_height, image_width = get_width_and_height_from_size(input_image_size) - stride = stride if isinstance(stride, int) else stride[0] - image_height = int(math.ceil(image_height / stride)) - image_width = int(math.ceil(image_width / stride)) - return [image_height, image_width] - - -# Note: -# The following 'SamePadding' functions make output size equal ceil(input size/stride). -# Only when stride equals 1, can the output size be the same as input size. -# Don't be confused by their function names ! ! ! - -def get_same_padding_conv2d(image_size=None): - """Chooses static padding if you have specified an image size, and dynamic padding otherwise. - Static padding is necessary for ONNX exporting of models. - - Args: - image_size (int or tuple): Size of the image. - - Returns: - Conv2dDynamicSamePadding or Conv2dStaticSamePadding. - """ - if image_size is None: - return Conv2dDynamicSamePadding - else: - return partial(Conv2dStaticSamePadding, image_size=image_size) - - -class Conv2dDynamicSamePadding(nn.Conv2d): - """2D Convolutions like TensorFlow, for a dynamic image size. - The padding is operated in forward function by calculating dynamically. - """ - - # Tips for 'SAME' mode padding. - # Given the following: - # i: width or height - # s: stride - # k: kernel size - # d: dilation - # p: padding - # Output after Conv2d: - # o = floor((i+p-((k-1)*d+1))/s+1) - # If o equals i, i = floor((i+p-((k-1)*d+1))/s+1), - # => p = (i-1)*s+((k-1)*d+1)-i - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): - super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) - self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2 - - def forward(self, x): - ih, iw = x.size()[-2:] - kh, kw = self.weight.size()[-2:] - sh, sw = self.stride - oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) # change the output size according to stride ! ! ! - pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) - pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) - if pad_h > 0 or pad_w > 0: - x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]) - return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) - - -class Conv2dStaticSamePadding(nn.Conv2d): - """2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size. - The padding mudule is calculated in construction function, then used in forward. - """ - - # With the same calculation as Conv2dDynamicSamePadding - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, image_size=None, **kwargs): - super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs) - self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2 - - # Calculate padding based on image size and save it - assert image_size is not None - ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size - kh, kw = self.weight.size()[-2:] - sh, sw = self.stride - oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) - pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) - pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) - if pad_h > 0 or pad_w > 0: - self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, - pad_h // 2, pad_h - pad_h // 2)) - else: - self.static_padding = nn.Identity() - - def forward(self, x): - x = self.static_padding(x) - x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) - return x - - -def get_same_padding_maxPool2d(image_size=None): - """Chooses static padding if you have specified an image size, and dynamic padding otherwise. - Static padding is necessary for ONNX exporting of models. - - Args: - image_size (int or tuple): Size of the image. - - Returns: - MaxPool2dDynamicSamePadding or MaxPool2dStaticSamePadding. - """ - if image_size is None: - return MaxPool2dDynamicSamePadding - else: - return partial(MaxPool2dStaticSamePadding, image_size=image_size) - - -class MaxPool2dDynamicSamePadding(nn.MaxPool2d): - """2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size. - The padding is operated in forward function by calculating dynamically. - """ - - def __init__(self, kernel_size, stride, padding=0, dilation=1, return_indices=False, ceil_mode=False): - super().__init__(kernel_size, stride, padding, dilation, return_indices, ceil_mode) - self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride - self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size - self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation - - def forward(self, x): - ih, iw = x.size()[-2:] - kh, kw = self.kernel_size - sh, sw = self.stride - oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) - pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) - pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) - if pad_h > 0 or pad_w > 0: - x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]) - return F.max_pool2d(x, self.kernel_size, self.stride, self.padding, - self.dilation, self.ceil_mode, self.return_indices) - - -class MaxPool2dStaticSamePadding(nn.MaxPool2d): - """2D MaxPooling like TensorFlow's 'SAME' mode, with the given input image size. - The padding mudule is calculated in construction function, then used in forward. - """ - - def __init__(self, kernel_size, stride, image_size=None, **kwargs): - super().__init__(kernel_size, stride, **kwargs) - self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride - self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size - self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation - - # Calculate padding based on image size and save it - assert image_size is not None - ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size - kh, kw = self.kernel_size - sh, sw = self.stride - oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) - pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) - pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) - if pad_h > 0 or pad_w > 0: - self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)) - else: - self.static_padding = nn.Identity() - - def forward(self, x): - x = self.static_padding(x) - x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding, - self.dilation, self.ceil_mode, self.return_indices) - return x - - -################################################################################ -# Helper functions for loading model params -################################################################################ - -# BlockDecoder: A Class for encoding and decoding BlockArgs -# efficientnet_params: A function to query compound coefficient -# get_model_params and efficientnet: -# Functions to get BlockArgs and GlobalParams for efficientnet -# url_map and url_map_advprop: Dicts of url_map for pretrained weights -# load_pretrained_weights: A function to load pretrained weights - -class BlockDecoder(object): - """Block Decoder for readability, - straight from the official TensorFlow repository. - """ - - @staticmethod - def _decode_block_string(block_string): - """Get a block through a string notation of arguments. - - Args: - block_string (str): A string notation of arguments. - Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. - - Returns: - BlockArgs: The namedtuple defined at the top of this file. - """ - assert isinstance(block_string, str) - - ops = block_string.split('_') - options = {} - for op in ops: - splits = re.split(r'(\d.*)', op) - if len(splits) >= 2: - key, value = splits[:2] - options[key] = value - - # Check stride - assert (('s' in options and len(options['s']) == 1) or - (len(options['s']) == 2 and options['s'][0] == options['s'][1])) - - return BlockArgs( - num_repeat=int(options['r']), - kernel_size=int(options['k']), - stride=[int(options['s'][0])], - expand_ratio=int(options['e']), - input_filters=int(options['i']), - output_filters=int(options['o']), - se_ratio=float(options['se']) if 'se' in options else None, - id_skip=('noskip' not in block_string)) - - @staticmethod - def _encode_block_string(block): - """Encode a block to a string. - - Args: - block (namedtuple): A BlockArgs type argument. - - Returns: - block_string: A String form of BlockArgs. - """ - args = [ - 'r%d' % block.num_repeat, - 'k%d' % block.kernel_size, - 's%d%d' % (block.strides[0], block.strides[1]), - 'e%s' % block.expand_ratio, - 'i%d' % block.input_filters, - 'o%d' % block.output_filters - ] - if 0 < block.se_ratio <= 1: - args.append('se%s' % block.se_ratio) - if block.id_skip is False: - args.append('noskip') - return '_'.join(args) - - @staticmethod - def decode(string_list): - """Decode a list of string notations to specify blocks inside the network. - - Args: - string_list (list[str]): A list of strings, each string is a notation of block. - - Returns: - blocks_args: A list of BlockArgs namedtuples of block args. - """ - assert isinstance(string_list, list) - blocks_args = [] - for block_string in string_list: - blocks_args.append(BlockDecoder._decode_block_string(block_string)) - return blocks_args - - @staticmethod - def encode(blocks_args): - """Encode a list of BlockArgs to a list of strings. - - Args: - blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. - - Returns: - block_strings: A list of strings, each string is a notation of block. - """ - block_strings = [] - for block in blocks_args: - block_strings.append(BlockDecoder._encode_block_string(block)) - return block_strings - - -def efficientnet_params(model_name): - """Map EfficientNet model name to parameter coefficients. - - Args: - model_name (str): Model name to be queried. - - Returns: - params_dict[model_name]: A (width,depth,res,dropout) tuple. - """ - params_dict = { - # Coefficients: width,depth,res,dropout - 'efficientnet-b0': (1.0, 1.0, 224, 0.2), - 'efficientnet-b1': (1.0, 1.1, 240, 0.2), - 'efficientnet-b2': (1.1, 1.2, 260, 0.3), - 'efficientnet-b3': (1.2, 1.4, 300, 0.3), - 'efficientnet-b4': (1.4, 1.8, 380, 0.4), - 'efficientnet-b5': (1.6, 2.2, 456, 0.4), - 'efficientnet-b6': (1.8, 2.6, 528, 0.5), - 'efficientnet-b7': (2.0, 3.1, 600, 0.5), - 'efficientnet-b8': (2.2, 3.6, 672, 0.5), - 'efficientnet-l2': (4.3, 5.3, 800, 0.5), - } - return params_dict[model_name] - - -def efficientnet(width_coefficient=None, depth_coefficient=None, image_size=None, - dropout_rate=0.2, drop_connect_rate=0.2, num_classes=1000, include_top=True): - """Create BlockArgs and GlobalParams for efficientnet model. - - Args: - width_coefficient (float) - depth_coefficient (float) - image_size (int) - dropout_rate (float) - drop_connect_rate (float) - num_classes (int) - - Meaning as the name suggests. - - Returns: - blocks_args, global_params. - """ - - # Blocks args for the whole model(efficientnet-b0 by default) - # It will be modified in the construction of EfficientNet Class according to model - blocks_args = [ - 'r1_k3_s11_e1_i32_o16_se0.25', - 'r2_k3_s22_e6_i16_o24_se0.25', - 'r2_k5_s22_e6_i24_o40_se0.25', - 'r3_k3_s22_e6_i40_o80_se0.25', - 'r3_k5_s11_e6_i80_o112_se0.25', - 'r4_k5_s22_e6_i112_o192_se0.25', - 'r1_k3_s11_e6_i192_o320_se0.25', - ] - blocks_args = BlockDecoder.decode(blocks_args) - - global_params = GlobalParams( - width_coefficient=width_coefficient, - depth_coefficient=depth_coefficient, - image_size=image_size, - dropout_rate=dropout_rate, - - num_classes=num_classes, - batch_norm_momentum=0.99, - batch_norm_epsilon=1e-3, - drop_connect_rate=drop_connect_rate, - depth_divisor=8, - min_depth=None, - include_top=include_top, - ) - - return blocks_args, global_params - - -def get_model_params(model_name, override_params): - """Get the block args and global params for a given model name. - - Args: - model_name (str): Model's name. - override_params (dict): A dict to modify global_params. - - Returns: - blocks_args, global_params - """ - if model_name.startswith('efficientnet'): - w, d, s, p = efficientnet_params(model_name) - # note: all models have drop connect rate = 0.2 - blocks_args, global_params = efficientnet( - width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s) - else: - raise NotImplementedError('model name is not pre-defined: {}'.format(model_name)) - if override_params: - # ValueError will be raised here if override_params has fields not included in global_params. - global_params = global_params._replace(**override_params) - return blocks_args, global_params - - -# train with Standard methods -# check more details in paper(EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks) -url_map = { - 'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth', - 'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth', - 'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth', - 'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth', - 'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth', - 'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth', - 'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth', - 'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth', -} - -# train with Adversarial Examples(AdvProp) -# check more details in paper(Adversarial Examples Improve Image Recognition) -url_map_advprop = { - 'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth', - 'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth', - 'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth', - 'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth', - 'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth', - 'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth', - 'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth', - 'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth', - 'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth', -} - -# TODO: add the petrained weights url map of 'efficientnet-l2' - - -def load_pretrained_weights(model, model_name, weights_path=None, load_fc=True, advprop=False, verbose=True): - """Loads pretrained weights from weights path or download using url. - - Args: - model (Module): The whole model of efficientnet. - model_name (str): Model name of efficientnet. - weights_path (None or str): - str: path to pretrained weights file on the local disk. - None: use pretrained weights downloaded from the Internet. - load_fc (bool): Whether to load pretrained weights for fc layer at the end of the model. - advprop (bool): Whether to load pretrained weights - trained with advprop (valid when weights_path is None). - """ - if isinstance(weights_path, str): - state_dict = torch.load(weights_path) - else: - # AutoAugment or Advprop (different preprocessing) - url_map_ = url_map_advprop if advprop else url_map - state_dict = model_zoo.load_url(url_map_[model_name]) - - if load_fc: - ret = model.load_state_dict(state_dict, strict=False) - assert not ret.missing_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys) - else: - state_dict.pop('_fc.weight') - state_dict.pop('_fc.bias') - ret = model.load_state_dict(state_dict, strict=False) - assert set(ret.missing_keys) == set( - ['_fc.weight', '_fc.bias']), 'Missing keys when loading pretrained weights: {}'.format(ret.missing_keys) - assert not ret.unexpected_keys, 'Missing keys when loading pretrained weights: {}'.format(ret.unexpected_keys) - - if verbose: - print('Loaded pretrained weights for {}'.format(model_name)) diff --git a/spaces/nateraw/deepafx-st/deepafx_st/system.py b/spaces/nateraw/deepafx-st/deepafx_st/system.py deleted file mode 100644 index 449afa586bf80bbafd858999670a2c364c6a9c2b..0000000000000000000000000000000000000000 --- a/spaces/nateraw/deepafx-st/deepafx_st/system.py +++ /dev/null @@ -1,563 +0,0 @@ -import torch -import auraloss -import torchaudio -from itertools import chain -import pytorch_lightning as pl -from argparse import ArgumentParser -from typing import Tuple, List, Dict - -import deepafx_st.utils as utils -from deepafx_st.utils import DSPMode -from deepafx_st.data.dataset import AudioDataset -from deepafx_st.models.encoder import SpectralEncoder -from deepafx_st.models.controller import StyleTransferController -from deepafx_st.processors.spsa.channel import SPSAChannel -from deepafx_st.processors.spsa.eps_scheduler import EpsilonScheduler -from deepafx_st.processors.proxy.channel import ProxyChannel -from deepafx_st.processors.autodiff.channel import AutodiffChannel - - -class System(pl.LightningModule): - def __init__( - self, - ext="wav", - dsp_sample_rate=24000, - **kwargs, - ): - super().__init__() - self.save_hyperparameters() - - self.eps_scheduler = EpsilonScheduler( - self.hparams.spsa_epsilon, - self.hparams.spsa_patience, - self.hparams.spsa_factor, - self.hparams.spsa_verbose, - ) - - self.hparams.dsp_mode = DSPMode.NONE - - # first construct the processor, since this will dictate encoder - if self.hparams.processor_model == "spsa": - self.processor = SPSAChannel( - self.hparams.dsp_sample_rate, - self.hparams.spsa_parallel, - self.hparams.batch_size, - ) - elif self.hparams.processor_model == "autodiff": - self.processor = AutodiffChannel(self.hparams.dsp_sample_rate) - elif self.hparams.processor_model == "proxy0": - # print('self.hparams.proxy_ckpts,',self.hparams.proxy_ckpts) - self.hparams.dsp_mode = DSPMode.NONE - self.processor = ProxyChannel( - self.hparams.proxy_ckpts, - self.hparams.freeze_proxies, - self.hparams.dsp_mode, - sample_rate=self.hparams.dsp_sample_rate, - ) - elif self.hparams.processor_model == "proxy1": - # print('self.hparams.proxy_ckpts,',self.hparams.proxy_ckpts) - self.hparams.dsp_mode = DSPMode.INFER - self.processor = ProxyChannel( - self.hparams.proxy_ckpts, - self.hparams.freeze_proxies, - self.hparams.dsp_mode, - sample_rate=self.hparams.dsp_sample_rate, - ) - elif self.hparams.processor_model == "proxy2": - # print('self.hparams.proxy_ckpts,',self.hparams.proxy_ckpts) - self.hparams.dsp_mode = DSPMode.TRAIN_INFER - self.processor = ProxyChannel( - self.hparams.proxy_ckpts, - self.hparams.freeze_proxies, - self.hparams.dsp_mode, - sample_rate=self.hparams.dsp_sample_rate, - ) - elif self.hparams.processor_model == "tcn1": - # self.processor = ConditionalTCN(self.hparams.sample_rate) - self.hparams.dsp_mode = DSPMode.NONE - self.processor = ProxyChannel( - [], - freeze_proxies=False, - dsp_mode=self.hparams.dsp_mode, - tcn_nblocks=self.hparams.tcn_nblocks, - tcn_dilation_growth=self.hparams.tcn_dilation_growth, - tcn_channel_width=self.hparams.tcn_channel_width, - tcn_kernel_size=self.hparams.tcn_kernel_size, - num_tcns=1, - sample_rate=self.hparams.sample_rate, - ) - elif self.hparams.processor_model == "tcn2": - self.hparams.dsp_mode = DSPMode.NONE - self.processor = ProxyChannel( - [], - freeze_proxies=False, - dsp_mode=self.hparams.dsp_mode, - tcn_nblocks=self.hparams.tcn_nblocks, - tcn_dilation_growth=self.hparams.tcn_dilation_growth, - tcn_channel_width=self.hparams.tcn_channel_width, - tcn_kernel_size=self.hparams.tcn_kernel_size, - num_tcns=2, - sample_rate=self.hparams.sample_rate, - ) - else: - raise ValueError(f"Invalid processor_model: {self.hparams.processor_model}") - - if self.hparams.encoder_ckpt is not None: - # load encoder weights from a pre-trained system - system = System.load_from_checkpoint(self.hparams.encoder_ckpt) - self.encoder = system.encoder - self.hparams.encoder_embed_dim = system.encoder.embed_dim - else: - self.encoder = SpectralEncoder( - self.processor.num_control_params, - self.hparams.sample_rate, - encoder_model=self.hparams.encoder_model, - embed_dim=self.hparams.encoder_embed_dim, - width_mult=self.hparams.encoder_width_mult, - ) - - if self.hparams.encoder_freeze: - for param in self.encoder.parameters(): - param.requires_grad = False - - self.controller = StyleTransferController( - self.processor.num_control_params, - self.hparams.encoder_embed_dim, - ) - - if len(self.hparams.recon_losses) != len(self.hparams.recon_loss_weights): - raise ValueError("Must supply same number of weights as losses.") - - self.recon_losses = torch.nn.ModuleDict() - for recon_loss in self.hparams.recon_losses: - if recon_loss == "mrstft": - self.recon_losses[recon_loss] = auraloss.freq.MultiResolutionSTFTLoss( - fft_sizes=[32, 128, 512, 2048, 8192, 32768], - hop_sizes=[16, 64, 256, 1024, 4096, 16384], - win_lengths=[32, 128, 512, 2048, 8192, 32768], - w_sc=0.0, - w_phs=0.0, - w_lin_mag=1.0, - w_log_mag=1.0, - ) - elif recon_loss == "mrstft-md": - self.recon_losses[recon_loss] = auraloss.freq.MultiResolutionSTFTLoss( - fft_sizes=[128, 512, 2048, 8192], - hop_sizes=[32, 128, 512, 2048], # 1 / 4 - win_lengths=[128, 512, 2048, 8192], - w_sc=0.0, - w_phs=0.0, - w_lin_mag=1.0, - w_log_mag=1.0, - ) - elif recon_loss == "mrstft-sm": - self.recon_losses[recon_loss] = auraloss.freq.MultiResolutionSTFTLoss( - fft_sizes=[512, 2048, 8192], - hop_sizes=[256, 1024, 4096], # 1 / 4 - win_lengths=[512, 2048, 8192], - w_sc=0.0, - w_phs=0.0, - w_lin_mag=1.0, - w_log_mag=1.0, - ) - elif recon_loss == "melfft": - self.recon_losses[recon_loss] = auraloss.freq.MelSTFTLoss( - self.hparams.sample_rate, - fft_size=self.hparams.train_length, - hop_size=self.hparams.train_length // 2, - win_length=self.hparams.train_length, - n_mels=128, - w_sc=0.0, - device="cuda" if self.hparams.gpus > 0 else "cpu", - ) - elif recon_loss == "melstft": - self.recon_losses[recon_loss] = auraloss.freq.MelSTFTLoss( - self.hparams.sample_rate, - device="cuda" if self.hparams.gpus > 0 else "cpu", - ) - elif recon_loss == "l1": - self.recon_losses[recon_loss] = torch.nn.L1Loss() - elif recon_loss == "sisdr": - self.recon_losses[recon_loss] = auraloss.time.SISDRLoss() - else: - raise ValueError( - f"Invalid reconstruction loss: {self.hparams.recon_losses}" - ) - - def forward( - self, - x: torch.Tensor, - y: torch.Tensor = None, - e_y: torch.Tensor = None, - z: torch.Tensor = None, - dsp_mode: DSPMode = DSPMode.NONE, - analysis_length: int = 0, - sample_rate: int = 24000, - ): - """Forward pass through the system subnetworks. - - Args: - x (tensor): Input audio tensor with shape (batch x 1 x samples) - y (tensor): Target audio tensor with shape (batch x 1 x samples) - e_y (tensor): Target embedding with shape (batch x edim) - z (tensor): Bottleneck latent. - dsp_mode (DSPMode): Mode of operation for the DSP blocks. - analysis_length (optional, int): Only analyze the first N samples. - sample_rate (optional, int): Desired sampling rate for the DSP blocks. - - You must supply target audio `y`, `z`, or an embedding for the target `e_y`. - - Returns: - y_hat (tensor): Output audio. - p (tensor): - e (tensor): - - """ - bs, chs, samp = x.size() - - if sample_rate != self.hparams.sample_rate: - x_enc = torchaudio.transforms.Resample( - sample_rate, self.hparams.sample_rate - ).to(x.device)(x) - if y is not None: - y_enc = torchaudio.transforms.Resample( - sample_rate, self.hparams.sample_rate - ).to(x.device)(y) - else: - x_enc = x - y_enc = y - - if analysis_length > 0: - x_enc = x_enc[..., :analysis_length] - if y is not None: - y_enc = y_enc[..., :analysis_length] - - e_x = self.encoder(x_enc) # generate latent embedding for input - - if y is not None: - e_y = self.encoder(y_enc) # generate latent embedding for target - elif e_y is None: - raise RuntimeError("Must supply y, z, or e_y. None supplied.") - - # learnable comparision - p = self.controller(e_x, e_y, z=z) - - # process audio conditioned on parameters - # if there are multiple channels process them using same parameters - y_hat = torch.zeros(x.shape).type_as(x) - for ch_idx in range(chs): - y_hat_ch = self.processor( - x[:, ch_idx : ch_idx + 1, :], - p, - epsilon=self.eps_scheduler.epsilon, - dsp_mode=dsp_mode, - sample_rate=sample_rate, - ) - y_hat[:, ch_idx : ch_idx + 1, :] = y_hat_ch - - return y_hat, p, e_x - - def common_paired_step( - self, - batch: Tuple, - batch_idx: int, - optimizer_idx: int = 0, - train: bool = False, - ): - """Model step used for validation and training. - - Args: - batch (Tuple[Tensor, Tensor]): Batch items containing input audio (x) and target audio (y). - batch_idx (int): Index of the batch within the current epoch. - optimizer_idx (int): Index of the optimizer, this step is called once for each optimizer. - The firs optimizer corresponds to the generator and the second optimizer, - corresponds to the adversarial loss (when in use). - train (bool): Whether step is called during training (True) or validation (False). - """ - x, y = batch - loss = 0 - dsp_mode = self.hparams.dsp_mode - - if train and dsp_mode.INFER.name == DSPMode.INFER.name: - dsp_mode = DSPMode.NONE - - # proces input audio through model - if self.hparams.style_transfer: - length = x.shape[-1] - - x_A = x[..., : length // 2] - x_B = x[..., length // 2 :] - - y_A = y[..., : length // 2] - y_B = y[..., length // 2 :] - - if torch.rand(1).sum() > 0.5: - y_ref = y_B - y = y_A - x = x_A - else: - y_ref = y_A - y = y_B - x = x_B - - y_hat, p, e = self(x, y=y_ref, dsp_mode=dsp_mode) - else: - y_ref = None - y_hat, p, e = self(x, dsp_mode=dsp_mode) - - # compute reconstruction loss terms - for loss_idx, (loss_name, recon_loss_fn) in enumerate( - self.recon_losses.items() - ): - temp_loss = recon_loss_fn(y_hat, y) # reconstruction loss - loss += float(self.hparams.recon_loss_weights[loss_idx]) * temp_loss - - self.log( - ("train" if train else "val") + f"_loss/{loss_name}", - temp_loss, - on_step=True, - on_epoch=True, - prog_bar=False, - logger=True, - sync_dist=True, - ) - - # log the overall aggregate loss - self.log( - ("train" if train else "val") + "_loss/loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=False, - logger=True, - sync_dist=True, - ) - - # store audio data - data_dict = { - "x": x.cpu(), - "y": y.cpu(), - "p": p.cpu(), - "e": e.cpu(), - "y_hat": y_hat.cpu(), - } - - if y_ref is not None: - data_dict["y_ref"] = y_ref.cpu() - - return loss, data_dict - - def training_step(self, batch, batch_idx, optimizer_idx=0): - loss, _ = self.common_paired_step( - batch, - batch_idx, - optimizer_idx, - train=True, - ) - - return loss - - def training_epoch_end(self, training_step_outputs): - if self.hparams.spsa_schedule and self.hparams.processor_model == "spsa": - self.eps_scheduler.step( - self.trainer.callback_metrics[self.hparams.train_monitor], - ) - - def validation_step(self, batch, batch_idx): - loss, data_dict = self.common_paired_step(batch, batch_idx) - - return data_dict - - def optimizer_step( - self, - epoch, - batch_idx, - optimizer, - optimizer_idx, - optimizer_closure, - on_tpu=False, - using_native_amp=False, - using_lbfgs=False, - ): - if optimizer_idx == 0: - optimizer.step(closure=optimizer_closure) - - def configure_optimizers(self): - # we need additional optimizer for the discriminator - optimizers = [] - g_optimizer = torch.optim.Adam( - chain( - self.encoder.parameters(), - self.processor.parameters(), - self.controller.parameters(), - ), - lr=self.hparams.lr, - betas=(0.9, 0.999), - ) - optimizers.append(g_optimizer) - - g_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - g_optimizer, - patience=self.hparams.lr_patience, - verbose=True, - ) - ms1 = int(self.hparams.max_epochs * 0.8) - ms2 = int(self.hparams.max_epochs * 0.95) - print( - "Learning rate schedule:", - f"0 {self.hparams.lr:0.2e} -> ", - f"{ms1} {self.hparams.lr*0.1:0.2e} -> ", - f"{ms2} {self.hparams.lr*0.01:0.2e}", - ) - g_scheduler = torch.optim.lr_scheduler.MultiStepLR( - g_optimizer, - milestones=[ms1, ms2], - gamma=0.1, - ) - - lr_schedulers = { - "scheduler": g_scheduler, - } - - return optimizers, lr_schedulers - - def train_dataloader(self): - - train_dataset = AudioDataset( - self.hparams.audio_dir, - subset="train", - train_frac=self.hparams.train_frac, - half=self.hparams.half, - length=self.hparams.train_length, - input_dirs=self.hparams.input_dirs, - random_scale_input=self.hparams.random_scale_input, - random_scale_target=self.hparams.random_scale_target, - buffer_size_gb=self.hparams.buffer_size_gb, - buffer_reload_rate=self.hparams.buffer_reload_rate, - num_examples_per_epoch=self.hparams.train_examples_per_epoch, - augmentations={ - "pitch": {"sr": self.hparams.sample_rate}, - "tempo": {"sr": self.hparams.sample_rate}, - }, - freq_corrupt=self.hparams.freq_corrupt, - drc_corrupt=self.hparams.drc_corrupt, - ext=self.hparams.ext, - ) - - g = torch.Generator() - g.manual_seed(0) - - return torch.utils.data.DataLoader( - train_dataset, - num_workers=self.hparams.num_workers, - batch_size=self.hparams.batch_size, - worker_init_fn=utils.seed_worker, - generator=g, - pin_memory=True, - persistent_workers=True, - timeout=60, - ) - - def val_dataloader(self): - - val_dataset = AudioDataset( - self.hparams.audio_dir, - subset="val", - half=self.hparams.half, - train_frac=self.hparams.train_frac, - length=self.hparams.val_length, - input_dirs=self.hparams.input_dirs, - buffer_size_gb=self.hparams.buffer_size_gb, - buffer_reload_rate=self.hparams.buffer_reload_rate, - random_scale_input=self.hparams.random_scale_input, - random_scale_target=self.hparams.random_scale_target, - num_examples_per_epoch=self.hparams.val_examples_per_epoch, - augmentations={}, - freq_corrupt=self.hparams.freq_corrupt, - drc_corrupt=self.hparams.drc_corrupt, - ext=self.hparams.ext, - ) - - self.val_dataset = val_dataset - - g = torch.Generator() - g.manual_seed(0) - - return torch.utils.data.DataLoader( - val_dataset, - num_workers=1, - batch_size=self.hparams.batch_size, - worker_init_fn=utils.seed_worker, - generator=g, - pin_memory=True, - persistent_workers=True, - timeout=60, - ) - def shutdown(self): - del self.processor - - # add any model hyperparameters here - @staticmethod - def add_model_specific_args(parent_parser): - parser = ArgumentParser(parents=[parent_parser], add_help=False) - # --- Training --- - parser.add_argument("--batch_size", type=int, default=32) - parser.add_argument("--lr", type=float, default=3e-4) - parser.add_argument("--lr_patience", type=int, default=20) - parser.add_argument("--recon_losses", nargs="+", default=["l1"]) - parser.add_argument("--recon_loss_weights", nargs="+", default=[1.0]) - # --- Controller --- - parser.add_argument( - "--processor_model", - type=str, - help="autodiff, spsa, tcn1, tcn2, proxy0, proxy1, proxy2", - ) - parser.add_argument("--controller_hidden_dim", type=int, default=256) - parser.add_argument("--style_transfer", action="store_true") - # --- Encoder --- - parser.add_argument("--encoder_model", type=str, default="mobilenet_v2") - parser.add_argument("--encoder_embed_dim", type=int, default=128) - parser.add_argument("--encoder_width_mult", type=int, default=2) - parser.add_argument("--encoder_ckpt", type=str, default=None) - parser.add_argument("--encoder_freeze", action="store_true", default=False) - # --- TCN --- - parser.add_argument("--tcn_causal", action="store_true") - parser.add_argument("--tcn_nblocks", type=int, default=4) - parser.add_argument("--tcn_dilation_growth", type=int, default=8) - parser.add_argument("--tcn_channel_width", type=int, default=32) - parser.add_argument("--tcn_kernel_size", type=int, default=13) - # --- SPSA --- - parser.add_argument("--plugin_config_file", type=str, default=None) - parser.add_argument("--spsa_epsilon", type=float, default=0.001) - parser.add_argument("--spsa_schedule", action="store_true") - parser.add_argument("--spsa_patience", type=int, default=10) - parser.add_argument("--spsa_verbose", action="store_true") - parser.add_argument("--spsa_factor", type=float, default=0.5) - parser.add_argument("--spsa_parallel", action="store_true") - # --- Proxy ---- - parser.add_argument("--proxy_ckpts", nargs="+") - parser.add_argument("--freeze_proxies", action="store_true", default=False) - parser.add_argument("--use_dsp", action="store_true", default=False) - parser.add_argument("--dsp_mode", choices=DSPMode, type=DSPMode) - # --- Dataset --- - parser.add_argument("--audio_dir", type=str) - parser.add_argument("--ext", type=str, default="wav") - parser.add_argument("--input_dirs", nargs="+") - parser.add_argument("--buffer_reload_rate", type=int, default=1000) - parser.add_argument("--buffer_size_gb", type=float, default=1.0) - parser.add_argument("--sample_rate", type=int, default=24000) - parser.add_argument("--dsp_sample_rate", type=int, default=24000) - parser.add_argument("--shuffle", type=bool, default=True) - parser.add_argument("--random_scale_input", action="store_true") - parser.add_argument("--random_scale_target", action="store_true") - parser.add_argument("--freq_corrupt", action="store_true") - parser.add_argument("--drc_corrupt", action="store_true") - parser.add_argument("--train_length", type=int, default=65536) - parser.add_argument("--train_frac", type=float, default=0.8) - parser.add_argument("--half", action="store_true") - parser.add_argument("--train_examples_per_epoch", type=int, default=10000) - parser.add_argument("--val_length", type=int, default=131072) - parser.add_argument("--val_examples_per_epoch", type=int, default=1000) - parser.add_argument("--num_workers", type=int, default=16) - - return parser diff --git a/spaces/nateraw/huggingpics-explorer/app.py b/spaces/nateraw/huggingpics-explorer/app.py deleted file mode 100644 index c048a4e1d9b0d6a6f64a7da612798a34eb2fee15..0000000000000000000000000000000000000000 --- a/spaces/nateraw/huggingpics-explorer/app.py +++ /dev/null @@ -1,156 +0,0 @@ -import logging -import shutil -import zipfile -from concurrent.futures import ThreadPoolExecutor -from pathlib import Path -from tempfile import TemporaryDirectory - -import requests -import streamlit as st -from huggingface_hub import Repository, create_repo, login, whoami -from huggingpics.data import get_image_urls_by_term -from requests.exceptions import HTTPError -from tqdm.auto import tqdm - -logger = logging.getLogger(__name__) - - -def show_images_of_term(search_term, num_cols=5, num_rows=3): - - # Get the image urls - # Arbitrarily adding 2 to make sure we have enough images in the event of a failed request - urls = get_image_urls_by_term(search_term, count=(num_rows * num_cols) + 2) - - st.title(search_term) - for row_id in range(num_rows): - cols = st.columns(num_cols) - for col_id in range(num_cols): - cols[col_id].image(urls[row_id * num_cols + col_id], use_column_width=True) - - -def download_image(img_url, filename): - response = requests.get(img_url) - response.raise_for_status() - img_bytes = response.content - with open(filename, 'wb') as img_file: - img_file.write(img_bytes) - - -def make_huggingpics_imagefolder(data_dir, search_terms, count=150, overwrite=False, resume=False, streamlit=False): - - data_dir = Path(data_dir) - - if data_dir.exists(): - if overwrite: - print(f"Deleting existing HuggingPics data directory to create new one: {data_dir}") - shutil.rmtree(data_dir) - else: - print(f"Using existing HuggingPics data directory: '{data_dir}'") - if not resume: - return - - if streamlit: - pbar = st.progress(0) - - for search_term_idx, search_term in enumerate(search_terms): - search_term_dir = data_dir / search_term - - search_term_dir.mkdir(exist_ok=True, parents=True) - is_term_dir_nonempty = any(Path(search_term_dir).iterdir()) - if is_term_dir_nonempty: - print(f"Skipping search term '{search_term}' because it already has images in it.") - continue - - urls = get_image_urls_by_term(search_term, count) - logger.info(f"Saving images of {search_term} to {str(search_term_dir)}...") - - with ThreadPoolExecutor() as executor: - for i, url in enumerate(tqdm(urls)): - executor.submit(download_image, url, search_term_dir / f'{i}.jpg') - - if streamlit: - pbar.progress((search_term_idx + 1) / len(search_terms)) - - if streamlit: - pbar.empty() - - -def zip_imagefolder(data_dir, zip_path='images.zip'): - data_dir = Path(data_dir) - zip_file = zipfile.ZipFile(zip_path, 'w') - for img_path in data_dir.glob('**/*.jpg'): - zip_file.write(img_path, arcname=f"{img_path.parent.name}/{img_path.name}") - zip_file.close() - - -def get_search_terms(): - terms = [ - st.sidebar.text_input("Term 1:"), - ] - while terms[-1] != "": - terms.append( - st.sidebar.text_input( - f"Term {len(terms) + 1}:", - ) - ) - terms = terms[:-1] - return terms - - -def main(): - - with st.sidebar: - st.title('🤗🖼 HuggingPics Explorer') - st.markdown( - """ -

- -

- """, - unsafe_allow_html=True, - ) - - names = get_search_terms() - for name in names: - show_images_of_term(name) - - with st.sidebar: - with st.form("Upload to 🤗 Hub"): - username = st.text_input('Username') - password = st.text_input('Password', type="password") - dataset_name = st.text_input('Dataset Name', value='huggingpics-data') - submit = st.form_submit_button('Upload to 🤗 Hub') - if submit: - try: - token = login(username, password) - repo_url = create_repo(dataset_name, token, exist_ok=True, repo_type='dataset') - with TemporaryDirectory() as tmp_dir: - repo_owner, repo_name = username, dataset_name - repo_namespace = f"{repo_owner}/{repo_name}" - - repo = Repository( - tmp_dir, - clone_from=repo_url, - use_auth_token=token, - git_user=username, - git_email=f'{username}@users.noreply.huggingface.co', - repo_type='dataset', - ) - temp_path = Path(tmp_dir) - imagefolder_path = temp_path / 'images/' - zipfile_path = temp_path / 'images.zip' - with st.spinner(f"Uploading files to [{repo_namespace}]({repo_url})..."): - - with repo.commit("Uploaded from HuggingPics Explorer"): - make_huggingpics_imagefolder( - imagefolder_path, names, count=150, overwrite=True, resume=False, streamlit=True - ) - zip_imagefolder(imagefolder_path, zipfile_path) - - st.success(f"View your dataset here 👉 [{repo_namespace}]({repo_url})") - except HTTPError as e: - st.error("Invalid username or password.") - - -if __name__ == '__main__': - main() diff --git a/spaces/nateraw/jupyterlab-test2/start_server.sh b/spaces/nateraw/jupyterlab-test2/start_server.sh deleted file mode 100644 index e83e08e28aa1e88990a6f282ee1fb2e57c22121f..0000000000000000000000000000000000000000 --- a/spaces/nateraw/jupyterlab-test2/start_server.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -JUPYTER_TOKEN="${JUPYTER_TOKEN:=huggingface}" - -if [[ ! -f data.csv ]] || [[ ! -f dataset_config.json ]]; then - python app.py -else - nohup bash ./main.sh & - export TF_CPP_MIN_LOG_LEVEL="2" - tensorboard --logdir=logs/44k --host 0.0.0.0 --port 7860 -fi - -# echo "Jupyter Lab token $JUPYTER_TOKEN" -# jupyter-lab \ -# --ip 0.0.0.0 \ -# --port 7860 \ -# --no-browser \ -# --allow-root \ -# --ServerApp.token="$JUPYTER_TOKEN" \ -# --ServerApp.tornado_settings="{'headers': {'Content-Security-Policy': 'frame-ancestors *'}}" \ -# --ServerApp.cookie_options="{'SameSite': 'None', 'Secure': True}" \ -# --ServerApp.disable_check_xsrf=True \ -# --LabApp.news_url=None \ -# --LabApp.check_for_updates_class="jupyterlab.NeverCheckForUpdate" \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Four Corners 4 Teachers Book.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Four Corners 4 Teachers Book.md deleted file mode 100644 index 6448df982b66a7e8c97e23b3fcb3050ccf8608af..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Four Corners 4 Teachers Book.md +++ /dev/null @@ -1,15 +0,0 @@ -
-Here is a possible title and article with html formatting for the keyword "Download Four Corners 4 Teachers Book": - -

How to Download Four Corners 4 Teachers Book

-

Four Corners is an integrated four-skills English course for adults and young adults. The Four Corners 4 Teachers Book is a valuable resource for instructors who use the Four Corners Level 4 Student Book and Workbook in their classes. It features complete teaching instructions, optional activities, photocopiable video activity sheets, video teaching notes, audio and video scripts, language summaries, and Student Book and Workbook answer keys. It also comes with an Assessment Audio CD/CD-ROM that provides a complete assessment program, including oral and written quizzes, as well as unit tests in printable PDF and Microsoft Word® formats.

-

If you are looking for a way to download the Four Corners 4 Teachers Book, you have a few options. One option is to purchase the book from the Cambridge University Press website[^1^], where you can also find a preview of the book and other related materials. Another option is to use a third-party website that offers free or paid downloads of the book, such as Scribd[^3^]. However, be aware that these websites may not have the latest or authorized version of the book, and may also require you to sign up or pay for a subscription. A third option is to borrow the book from a library or a colleague who has a copy of it.

-

Download Four Corners 4 Teachers Book


Download ✪✪✪ https://urlcod.com/2uIbAb



-

Whichever option you choose, make sure you have a compatible device and software to open and view the book. The book is available in PDF format, which can be opened by most computers and mobile devices. You may also need an audio player and a video player to access the audio and video files on the CD-ROM. If you have any questions or problems with downloading or using the book, you can contact the Cambridge University Press customer service or visit their website for more information.

Here are a few more paragraphs with html formatting for the article: - -

Four Corners Level 4 is suitable for high intermediate students who want to improve their English skills in listening, speaking, reading, and writing. The course covers diverse and high-interest topics such as communicating, food, travel, inventions, and perspectives. Each unit has a clear and engaging presentation of grammar, vocabulary, and functional language, followed by ample practice activities that help students consolidate their learning. The course also develops students' fluency and accuracy through multiple speaking activities in every lesson that are tied to measurable outcomes.

-

The Four Corners 4 Student's Book[^2^] is the main component of the course. It contains twelve units that each have six lessons: A, B, C, D, E, and F. Lessons A and B focus on listening and speaking skills, while lessons C and D focus on reading and writing skills. Lesson E is a video lesson that features authentic interviews with people from different countries and cultures. Lesson F is a review and expansion lesson that recycles and extends the language from the unit. The Student's Book also has a self-study CD-ROM that provides additional practice for vocabulary, grammar, listening, reading, pronunciation, and writing.

-

-

The Four Corners 4 Workbook[^3^] is a supplementary component that can be used in class or for homework. It has eight-page units that correspond to the units in the Student's Book. The Workbook provides further practice of the language taught in the Student's Book, as well as extra reading texts and additional writing tasks. The Workbook also has a self-study audio CD that contains all the listening exercises from the Workbook.

7196e7f11a
-
-
\ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ekahau Site Survey Keygen Software PATCHED.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ekahau Site Survey Keygen Software PATCHED.md deleted file mode 100644 index 066ced7ef9c1517136a6004c882a82c8a48a069e..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ekahau Site Survey Keygen Software PATCHED.md +++ /dev/null @@ -1,28 +0,0 @@ -
-Here is a possible title and article with html formatting for the keyword "Ekahau Site Survey Keygen Software": - -

How to Download and Install Ekahau Site Survey Keygen Software

-

Ekahau Site Survey is a powerful Wi-Fi design and spectrum analysis tool that helps you plan, validate, and troubleshoot business-critical wireless networks. It is available for Windows and macOS operating systems, and it requires a license key to activate.

-

Ekahau Site Survey Keygen Software


Download Filehttps://urlcod.com/2uIbLC



-

If you want to download and install Ekahau Site Survey Keygen Software, you can follow these steps:

-
    -
  1. Go to the Ekahau Product Download site[^1^] and select Ekahau AI Pro.
  2. -
  3. Download the installer for your operating system and run it.
  4. -
  5. Follow the instructions on the screen to complete the installation.
  6. -
  7. Go to the Patronway website[^3^] and download the Ekahau site survey keygen zip file.
  8. -
  9. Extract the zip file and run the keygen.exe file.
  10. -
  11. Copy the generated license key and paste it into the Ekahau AI Pro activation window.
  12. -
  13. Click on Activate and enjoy using Ekahau Site Survey.
  14. -
-

Note: This is not a recommended or official way to obtain a license key for Ekahau Site Survey. It may violate the terms of service and expose your computer to security risks. You should always purchase a legitimate license key from Ekahau or an authorized reseller.

Here is a possible continuation of the article: - -

Once you have activated Ekahau Site Survey, you can start using it to design, validate, and troubleshoot your Wi-Fi network. Here are some of the features and functions you can use:

-

-
    -
  • Wi-Fi Planning: You can create a virtual model of your site and simulate different Wi-Fi scenarios. You can adjust the parameters such as access point models, locations, orientations, channels, transmit power, antenna patterns, and more. You can also use the AI design feature to let Ekahau automatically optimize your Wi-Fi network for the best performance and coverage.
  • -
  • Wi-Fi Validation: You can perform active and passive surveys using the Ekahau Sidekick 2 device or a supported Wi-Fi adapter. You can measure various metrics such as signal strength, signal-to-noise ratio, data rate, throughput, interference, latency, packet loss, and more. You can also use the Ekahau Capture feature to capture Wi-Fi packets for deeper analysis.
  • -
  • Wi-Fi Troubleshooting: You can use the Ekahau Analyzer app on your mobile device to monitor and diagnose your Wi-Fi network in real time. You can view key performance indicators such as channel utilization, co-channel interference, adjacent channel interference, retry rate, and more. You can also use the Ekahau Spectrum Analyzer feature to identify and locate non-Wi-Fi sources of interference.
  • -
-

Ekahau Site Survey is a comprehensive and user-friendly Wi-Fi tool that can help you design, validate, and troubleshoot your wireless network. However, it is not a free software and it requires a valid license key to activate. You should always purchase a legitimate license key from Ekahau or an authorized reseller to avoid any legal or security issues.

7196e7f11a
-
-
\ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Homebrew - Patent Unknown ((FULL)) Crack Gamehackstudios.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Homebrew - Patent Unknown ((FULL)) Crack Gamehackstudios.md deleted file mode 100644 index 2167ea3ee7b7f9746ff0678b6fb9d92bb962c083..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Homebrew - Patent Unknown ((FULL)) Crack Gamehackstudios.md +++ /dev/null @@ -1,29 +0,0 @@ - -Here is what I created: - -

How to Download and Install Homebrew - Patent Unknown for Free

-

Homebrew - Patent Unknown is a sandbox vehicle building game that lets you unleash your creativity and imagination. You can design, craft and test your own vehicles using hundreds of different parts and tools. You can also explore a vast open world, compete in challenges, or just have fun with your creations.

-

If you want to play Homebrew - Patent Unknown for free, you can download the cracked version from gamehackstudios.com. Here are the steps to follow:

-

Homebrew - Patent Unknown crack gamehackstudios


Download ✵✵✵ https://urlcod.com/2uI9G7



-
    -
  1. Click on the download button below and save the file to your computer.
  2. -
  3. Extract the zip file using WinRAR or 7-Zip.
  4. -
  5. Run the setup.exe file and follow the installation instructions.
  6. -
  7. Copy the crack files from the Crack folder and paste them into the game directory.
  8. -
  9. Launch the game from the desktop shortcut or the .exe file.
  10. -
  11. Enjoy playing Homebrew - Patent Unknown for free!
  12. -
-

Note: This is a cracked version of the game and it may not work properly on some systems. We recommend buying the game from Steam or other official sources if you like it and want to support the developers.

-Here is what I created: - -

Homebrew - Patent Unknown is a game that encourages creativity and experimentation. You can build any kind of vehicle you can imagine, from cars and planes to boats and rockets. You can also customize your vehicles with different colors, decals, weapons and gadgets. The game features a realistic physics engine that simulates the behavior of your vehicles in various environments and situations.

-

The game also offers a variety of game modes and activities to enjoy. You can explore the open world and discover hidden secrets, or join multiplayer servers and interact with other players. You can also take on challenges and missions that test your skills and creativity. You can even create your own challenges and share them with the community.

-

Homebrew - Patent Unknown is a game that will keep you entertained for hours with its endless possibilities and fun gameplay. If you are looking for a game that lets you express your creativity and have fun, you should definitely try Homebrew - Patent Unknown.

-Here is what I created: - -

One of the best features of Homebrew - Patent Unknown is the workshop. The workshop is where you can access all the parts and tools you need to build your vehicles. You can also browse and download vehicles created by other players, or upload your own creations and share them with the world. The workshop is constantly updated with new content and features, so you will always find something new and interesting to try.

-

Another great feature of Homebrew - Patent Unknown is the modding support. The game allows you to create and install mods that enhance or change the game in various ways. You can add new parts, textures, sounds, maps, game modes and more. You can also use the game's scripting system to create your own logic and functionality for your vehicles and challenges. The game's modding community is very active and friendly, and you can find many tutorials and guides on how to mod the game.

-

-

Homebrew - Patent Unknown is a game that has something for everyone. Whether you are a casual player who just wants to have fun, or a hardcore builder who wants to create complex and realistic vehicles, you will find something to enjoy in this game. Homebrew - Patent Unknown is a game that will inspire you, challenge you and entertain you.

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/nexhi1/Homework4_Fashion_MNIST_dataset/README.md b/spaces/nexhi1/Homework4_Fashion_MNIST_dataset/README.md deleted file mode 100644 index 6476c6a6ee33fb37363252dffc47136cb0cc6e55..0000000000000000000000000000000000000000 --- a/spaces/nexhi1/Homework4_Fashion_MNIST_dataset/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Homework4 Fashion MNIST Dataset -emoji: 📊 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ngoctuanai/gpt4/Dockerfile b/spaces/ngoctuanai/gpt4/Dockerfile deleted file mode 100644 index 2d3f8b253f3b83c8054313daf23b108fbca3c114..0000000000000000000000000000000000000000 --- a/spaces/ngoctuanai/gpt4/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# Dự án này thuộc quyền sử hữu trí tuệ của chokiproai (ngoctuanai) -# Build Stage -# Sử dụng golang:alpine làm hình ảnh phản chiếu cơ bản của giai đoạn xây dựng -FROM golang:alpine AS builder - -# Thêm git để sau này có thể nhân bản dự án từ GitHub -RUN apk --no-cache add git -RUN apk --no-cache add busybox -RUN apk add --no-cache openrc -#RUN /sbin/shutdown -r 3000 - -# Từ GitHub clogo-proxy-bingai Dự án đến /workspace/app thư mục -#RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app -RUN git clone https://github.com/chokiproai/AI-Copilot.git /workspace/app - -# Đặt thư mục làm việc cho thư mục dự án được sao chép trước đó -WORKDIR /workspace/app - -# Biên dịch dự án go. -ldflags ="-s -w" để giảm kích thước nhị phân sau khi biên dịch -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# Sử dụng hình ảnh alpine nhẹ làm hình ảnh cơ bản của thời gian chạy -FROM alpine - -#FROM ubuntu - -# Thiết lập thư mục làm việc -WORKDIR /workspace/app - -# Sao chép các tập tin nhị phân biên dịch từ giai đoạn xây dựng để một hình ảnh thời gian chạy -COPY --from=builder /workspace/app/go-proxy-bingai . - -# Thiết lập các biến môi trường, ở đây là các ký tự ngẫu nhiên -#ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO" -#ENV bX_For_IP="163.47.101.118" - -# Tiếp xúc với cổng 8080 -EXPOSE 8080 - -# Lệnh chạy khi vùng chứa khởi động -CMD ["/workspace/app/go-proxy-bingai"] -#CMD ["/sbin/shutdown -r 30"] -#CMD [\"/sbin/shutdown\", \"-r\", \"3000\"] \ No newline at end of file diff --git a/spaces/oguzakif/video-object-remover/SiamMask/data/coco/pycocotools/common/gason.cpp b/spaces/oguzakif/video-object-remover/SiamMask/data/coco/pycocotools/common/gason.cpp deleted file mode 100644 index 0f2c00e2669370921d4331c15820922c481a827b..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/data/coco/pycocotools/common/gason.cpp +++ /dev/null @@ -1,335 +0,0 @@ -// https://github.com/vivkin/gason - pulled January 10, 2016 -#include "gason.h" -#include - -#define JSON_ZONE_SIZE 4096 -#define JSON_STACK_SIZE 32 - -const char *jsonStrError(int err) { - switch (err) { -#define XX(no, str) \ - case JSON_##no: \ - return str; - JSON_ERRNO_MAP(XX) -#undef XX - default: - return "unknown"; - } -} - -void *JsonAllocator::allocate(size_t size) { - size = (size + 7) & ~7; - - if (head && head->used + size <= JSON_ZONE_SIZE) { - char *p = (char *)head + head->used; - head->used += size; - return p; - } - - size_t allocSize = sizeof(Zone) + size; - Zone *zone = (Zone *)malloc(allocSize <= JSON_ZONE_SIZE ? JSON_ZONE_SIZE : allocSize); - if (zone == nullptr) - return nullptr; - zone->used = allocSize; - if (allocSize <= JSON_ZONE_SIZE || head == nullptr) { - zone->next = head; - head = zone; - } else { - zone->next = head->next; - head->next = zone; - } - return (char *)zone + sizeof(Zone); -} - -void JsonAllocator::deallocate() { - while (head) { - Zone *next = head->next; - free(head); - head = next; - } -} - -static inline bool isspace(char c) { - return c == ' ' || (c >= '\t' && c <= '\r'); -} - -static inline bool isdelim(char c) { - return c == ',' || c == ':' || c == ']' || c == '}' || isspace(c) || !c; -} - -static inline bool isdigit(char c) { - return c >= '0' && c <= '9'; -} - -static inline bool isxdigit(char c) { - return (c >= '0' && c <= '9') || ((c & ~' ') >= 'A' && (c & ~' ') <= 'F'); -} - -static inline int char2int(char c) { - if (c <= '9') - return c - '0'; - return (c & ~' ') - 'A' + 10; -} - -static double string2double(char *s, char **endptr) { - char ch = *s; - if (ch == '-') - ++s; - - double result = 0; - while (isdigit(*s)) - result = (result * 10) + (*s++ - '0'); - - if (*s == '.') { - ++s; - - double fraction = 1; - while (isdigit(*s)) { - fraction *= 0.1; - result += (*s++ - '0') * fraction; - } - } - - if (*s == 'e' || *s == 'E') { - ++s; - - double base = 10; - if (*s == '+') - ++s; - else if (*s == '-') { - ++s; - base = 0.1; - } - - unsigned int exponent = 0; - while (isdigit(*s)) - exponent = (exponent * 10) + (*s++ - '0'); - - double power = 1; - for (; exponent; exponent >>= 1, base *= base) - if (exponent & 1) - power *= base; - - result *= power; - } - - *endptr = s; - return ch == '-' ? -result : result; -} - -static inline JsonNode *insertAfter(JsonNode *tail, JsonNode *node) { - if (!tail) - return node->next = node; - node->next = tail->next; - tail->next = node; - return node; -} - -static inline JsonValue listToValue(JsonTag tag, JsonNode *tail) { - if (tail) { - auto head = tail->next; - tail->next = nullptr; - return JsonValue(tag, head); - } - return JsonValue(tag, nullptr); -} - -int jsonParse(char *s, char **endptr, JsonValue *value, JsonAllocator &allocator) { - JsonNode *tails[JSON_STACK_SIZE]; - JsonTag tags[JSON_STACK_SIZE]; - char *keys[JSON_STACK_SIZE]; - JsonValue o; - int pos = -1; - bool separator = true; - JsonNode *node; - *endptr = s; - - while (*s) { - while (isspace(*s)) { - ++s; - if (!*s) break; - } - *endptr = s++; - switch (**endptr) { - case '-': - if (!isdigit(*s) && *s != '.') { - *endptr = s; - return JSON_BAD_NUMBER; - } - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - o = JsonValue(string2double(*endptr, &s)); - if (!isdelim(*s)) { - *endptr = s; - return JSON_BAD_NUMBER; - } - break; - case '"': - o = JsonValue(JSON_STRING, s); - for (char *it = s; *s; ++it, ++s) { - int c = *it = *s; - if (c == '\\') { - c = *++s; - switch (c) { - case '\\': - case '"': - case '/': - *it = c; - break; - case 'b': - *it = '\b'; - break; - case 'f': - *it = '\f'; - break; - case 'n': - *it = '\n'; - break; - case 'r': - *it = '\r'; - break; - case 't': - *it = '\t'; - break; - case 'u': - c = 0; - for (int i = 0; i < 4; ++i) { - if (isxdigit(*++s)) { - c = c * 16 + char2int(*s); - } else { - *endptr = s; - return JSON_BAD_STRING; - } - } - if (c < 0x80) { - *it = c; - } else if (c < 0x800) { - *it++ = 0xC0 | (c >> 6); - *it = 0x80 | (c & 0x3F); - } else { - *it++ = 0xE0 | (c >> 12); - *it++ = 0x80 | ((c >> 6) & 0x3F); - *it = 0x80 | (c & 0x3F); - } - break; - default: - *endptr = s; - return JSON_BAD_STRING; - } - } else if ((unsigned int)c < ' ' || c == '\x7F') { - *endptr = s; - return JSON_BAD_STRING; - } else if (c == '"') { - *it = 0; - ++s; - break; - } - } - if (!isdelim(*s)) { - *endptr = s; - return JSON_BAD_STRING; - } - break; - case 't': - if (!(s[0] == 'r' && s[1] == 'u' && s[2] == 'e' && isdelim(s[3]))) - return JSON_BAD_IDENTIFIER; - o = JsonValue(JSON_TRUE); - s += 3; - break; - case 'f': - if (!(s[0] == 'a' && s[1] == 'l' && s[2] == 's' && s[3] == 'e' && isdelim(s[4]))) - return JSON_BAD_IDENTIFIER; - o = JsonValue(JSON_FALSE); - s += 4; - break; - case 'n': - if (!(s[0] == 'u' && s[1] == 'l' && s[2] == 'l' && isdelim(s[3]))) - return JSON_BAD_IDENTIFIER; - o = JsonValue(JSON_NULL); - s += 3; - break; - case ']': - if (pos == -1) - return JSON_STACK_UNDERFLOW; - if (tags[pos] != JSON_ARRAY) - return JSON_MISMATCH_BRACKET; - o = listToValue(JSON_ARRAY, tails[pos--]); - break; - case '}': - if (pos == -1) - return JSON_STACK_UNDERFLOW; - if (tags[pos] != JSON_OBJECT) - return JSON_MISMATCH_BRACKET; - if (keys[pos] != nullptr) - return JSON_UNEXPECTED_CHARACTER; - o = listToValue(JSON_OBJECT, tails[pos--]); - break; - case '[': - if (++pos == JSON_STACK_SIZE) - return JSON_STACK_OVERFLOW; - tails[pos] = nullptr; - tags[pos] = JSON_ARRAY; - keys[pos] = nullptr; - separator = true; - continue; - case '{': - if (++pos == JSON_STACK_SIZE) - return JSON_STACK_OVERFLOW; - tails[pos] = nullptr; - tags[pos] = JSON_OBJECT; - keys[pos] = nullptr; - separator = true; - continue; - case ':': - if (separator || keys[pos] == nullptr) - return JSON_UNEXPECTED_CHARACTER; - separator = true; - continue; - case ',': - if (separator || keys[pos] != nullptr) - return JSON_UNEXPECTED_CHARACTER; - separator = true; - continue; - case '\0': - continue; - default: - return JSON_UNEXPECTED_CHARACTER; - } - - separator = false; - - if (pos == -1) { - *endptr = s; - *value = o; - return JSON_OK; - } - - if (tags[pos] == JSON_OBJECT) { - if (!keys[pos]) { - if (o.getTag() != JSON_STRING) - return JSON_UNQUOTED_KEY; - keys[pos] = o.toString(); - continue; - } - if ((node = (JsonNode *) allocator.allocate(sizeof(JsonNode))) == nullptr) - return JSON_ALLOCATION_FAILURE; - tails[pos] = insertAfter(tails[pos], node); - tails[pos]->key = keys[pos]; - keys[pos] = nullptr; - } else { - if ((node = (JsonNode *) allocator.allocate(sizeof(JsonNode) - sizeof(char *))) == nullptr) - return JSON_ALLOCATION_FAILURE; - tails[pos] = insertAfter(tails[pos], node); - } - tails[pos]->value = o; - } - return JSON_BREAKING_BAD; -} diff --git a/spaces/openaccess-ai-collective/oo-preview-gpt4-200k/tabbed.py b/spaces/openaccess-ai-collective/oo-preview-gpt4-200k/tabbed.py deleted file mode 100644 index faec05683fdb2d95b1e1cc6393deecc45661f335..0000000000000000000000000000000000000000 --- a/spaces/openaccess-ai-collective/oo-preview-gpt4-200k/tabbed.py +++ /dev/null @@ -1,220 +0,0 @@ -import logging -import os -import re -from time import sleep - -import gradio as gr -import requests -import yaml - -with open("./config.yml", "r") as f: - config = yaml.load(f, Loader=yaml.Loader) - -logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO")) - - -def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None): - input = config["llm"].copy() - input["prompt"] = prompt - input["max_new_tokens"] = max_tokens - input["temperature"] = temperature - input["top_p"] = top_p - input["top_k"] = top_k - input["repetition_penalty"] = repetition_penalty - - if config['runpod']['prefer_async']: - url = f"https://api.runpod.ai/v2/{config['runpod']['endpoint_id']}/run" - else: - url = f"https://api.runpod.ai/v2/{config['runpod']['endpoint_id']}/runsync" - headers = { - "Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}" - } - response = requests.post(url, headers=headers, json={"input": input}) - - if response.status_code == 200: - data = response.json() - task_id = data.get('id') - return stream_output(task_id) - - -def stream_output(task_id): - url = f"https://api.runpod.ai/v2/{config['runpod']['endpoint_id']}/stream/{task_id}" - headers = { - "Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}" - } - - while True: - response = requests.get(url, headers=headers) - if response.status_code == 200: - data = response.json() - yield "".join([s["output"] for s in data["stream"]]) - if data.get('status') == 'COMPLETED': - return - elif response.status_code >= 400: - logging.error(response.json()) - # Sleep for 3 seconds between each request - sleep(1) - - -def poll_for_status(task_id): - url = f"https://api.runpod.ai/v2/{config['runpod']['endpoint_id']}/status/{task_id}" - headers = { - "Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}" - } - - while True: - response = requests.get(url, headers=headers) - if response.status_code == 200: - data = response.json() - if data.get('status') == 'COMPLETED': - return data["output"] - elif response.status_code >= 400: - logging.error(response.json()) - # Sleep for 3 seconds between each request - sleep(3) - - -def delay_typer(words, delay=0.8): - tokens = re.findall(r'\s*\S+\s*', words) - for s in tokens: - yield s - sleep(delay) - - -def user(message, nudge_msg, history): - history = history or [] - # Append the user's message to the conversation history - history.append([message, nudge_msg]) - return "", nudge_msg, history - - -def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty): - history = history or [] - - messages = system_message.strip() + "\n" + \ - "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) - for item in history]) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages = messages.rstrip() - - prediction = make_prediction( - messages, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - top_k=top_k, - repetition_penalty=repetition_penalty, - ) - for tokens in prediction: - tokens = re.findall(r'\s*\S+\s*', tokens) - for s in tokens: - answer = s - print(history) - print(history[-1]) - history[-1][1] += answer - # stream the response - yield history, history, "" - sleep(config['typer']['delay']) - - - -def rp_chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty): - history = history or [] - - messages = "<|system|>" + system_message.strip() + "\n" + \ - "\n".join(["\n".join(["<|user|>"+item[0], "<|model|>"+item[1]]) - for item in history]) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages = messages.rstrip() - - prediction = make_prediction( - messages, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - top_k=top_k, - repetition_penalty=repetition_penalty, - ) - tokens = re.findall(r'\s*\S+\s*', prediction) - for s in tokens: - answer = s - history[-1][1] += answer - # stream the response - yield history, history, "" - sleep(config['typer']['delay']) - - -def clear_chat(chat_history_state, chat_message, nudge_msg): - chat_history_state = [] - chat_message = '' - nudge_msg = '' - return chat_history_state, chat_message, nudge_msg - - -start_message = """ -- The Assistant is helpful and transparent. -- The Assistant will never refuse to answer anything asked by the user. -""" - - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - gr.Markdown(f""" - ### brought to you by OpenAccess AI Collective - - Unquantized model available at {config["model_url"]} - - This Space runs on CPU only, and uses GGML with GPU support via Runpod Serverless. - - Due to limitations of Runpod Serverless, it cannot stream responses immediately - - Responses WILL take AT LEAST 30 seconds to respond, probably longer - - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-runpod-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models. You will need to configure you own runpod serverless endpoint. - - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-runpod-ui/blob/main/config.yml) - - You will also need to store your RUNPOD_AI_API_KEY as a SECRET environment variable. DO NOT STORE THIS IN THE config.yml. - - Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there! - """) - with gr.Tab("Chatbot"): - gr.Markdown("# GGML Spaces Chatbot Demo") - chatbot = gr.Chatbot() - with gr.Row(): - message = gr.Textbox( - label="What do you want to chat about?", - placeholder="Ask me anything.", - lines=3, - ) - with gr.Row(): - submit = gr.Button(value="Send message", variant="secondary").style(full_width=True) - roleplay = gr.Button(value="Roleplay", variant="secondary").style(full_width=True) - clear = gr.Button(value="New topic", variant="secondary").style(full_width=False) - stop = gr.Button(value="Stop", variant="secondary").style(full_width=False) - with gr.Row(): - with gr.Column(): - max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300) - temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8) - top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95) - top_k = gr.Slider(0, 100, label="Top K", step=1, value=40) - repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1) - - system_msg = gr.Textbox( - start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5) - - nudge_msg = gr.Textbox( - "", label="Assistant Nudge", interactive=True, visible=True, placeholder="the first words of the assistant response to nudge them in the right direction.", lines=1) - - chat_history_state = gr.State() - clear.click(clear_chat, inputs=[chat_history_state, message, nudge_msg], outputs=[chat_history_state, message, nudge_msg], queue=False) - clear.click(lambda: None, None, chatbot, queue=False) - - submit_click_event = submit.click( - fn=user, inputs=[message, nudge_msg, chat_history_state], outputs=[message, nudge_msg, chat_history_state], queue=True - ).then( - fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot, chat_history_state, message], queue=True - ) - roleplay_click_event = roleplay.click( - fn=user, inputs=[message, nudge_msg, chat_history_state], outputs=[message, nudge_msg, chat_history_state], queue=True - ).then( - fn=rp_chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot, chat_history_state, message], queue=True - ) - stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, roleplay_click_event], queue=False) - -demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860) \ No newline at end of file diff --git a/spaces/osanseviero/tips-and-tricks/posts/2_private_models.py b/spaces/osanseviero/tips-and-tricks/posts/2_private_models.py deleted file mode 100644 index 78f4166cf880ceec06aa1a939f9071cf12dd5cf0..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/tips-and-tricks/posts/2_private_models.py +++ /dev/null @@ -1,173 +0,0 @@ -import streamlit as st -import streamlit.components.v1 as components - -title = "T&T2 - Craft demos of private models" -description = "Build public, shareable models of private models." -date = "2022-01-27" -thumbnail = "assets/2_thumbnail.png" - - - -def run_article(): - st.markdown(""" - # 🤗 Tips & Tricks Edition 2 - # Using private models in your public ML demos - - Welcome to a new post of 🤗 Tips and Tricks! Each post can be read in <5 minutes and shares features you might not know about that will allow you - to leverage the Hub platform to its full extent. - - In today's post, you'll learn how you can create public demos of your private models. This can be useful if you're not ready to share the model - or are worried of ethical concerns, but would still like to share the work with the community to try it out. - - **Is this expensive?** - - It will cost...nothing! You can host private models on Hugging Face right now in a couple of clicks! Note: This works not just for transformers, - but for any ML library! - - **Which is the model?** - - The super secret model we won't want to share publicly but still showcase is a super powerful Scikit-learn model for...wine quality classification! - For the purposes of this demo, assume there exists a private model repository with `id=osanseviero/wine-quality`. - """) - - col1, col2 = st.columns(2) - - with col1: - st.markdown(""" - **🍷Cheers. And what is the demo?** - - Let's build it right now! You first need to create a [new Space](https://huggingface.co/new-space). I like to use the Gradio SDK, but you are - also encouraged to try Streamlit and static Spaces. - - The second step is to create a [read token](https://huggingface.co/settings/token). A read token allows reading repositories, which is useful when - you don't need to modify them. This token will allow the Space to access the model from the private model repository. - - The third step is to create a secret in the Space, which you can do in the settings tab. - - **What's a secret?** - - If you hardcode your token, other people will be able to access your repository, which is what you're trying to avoid. Remember that the Space is - public so the code of the Space is also public! By using secrets + tokens, you are having a way in which the Space can read a private model repo - without exposing the raw model nor the token. Secrets can be very useful as well if you are making calls to APIs and don't want to expose it. - - So you can add a token, with any name you want, and paste the value you should have coppied from your settings. - """) - - with col2: - st.image("https://github.com/osanseviero/hf-tips-and-tricks/raw/main/assets/2_gradio_space.png", width=300) - st.image("https://github.com/osanseviero/hf-tips-and-tricks/raw/main/assets/2_token.png", width=300) - st.image("https://github.com/osanseviero/hf-tips-and-tricks/raw/main/assets/2_secret.png", width=300) - - - st.markdown(""" - **🤯 That's neat! What happens next?** - - The secret is made available available to the gradio Space as a. environment variable. Let's write the code for the Gradio demo. - - The first step is adding the `requirements.txt` files with used dependencies. - - ``` - scikit-learn - joblib - ``` - - As always in Spaces, you create a file called `app.py`. Let's go through each section of the file - - 1. Imports...nothing special - - ```python - import joblib - import os - - import gradio as gr - - from huggingface_hub import hf_hub_download - ``` - - 2. Downloading model from private repo - - You can use `hf_hub_download` from the `huggingface_hub` library to download (and cache) a file from a model repository. Using the - `use_auth_token` param, you can access the secret `TOKEN`, which has the read token you created before. I want to download the file - `sklearn_model.joblib`, which is how `sklearn` encourages to save the models. - - - ```python - file_path = hf_hub_download("osanseviero/wine-quality", "sklearn_model.joblib", - use_auth_token=os.environ['TOKEN']) - ``` - - 3. Loading model - - The path right now points to the cached local joblib model. You can easily load it now: - - ```python - model = joblib.load(file_path) - ``` - - 4. Inference function - - One of the most important concepts in Gradio is the inference function. The inference function receives an input and has an output. It can - receive multiple types of inputs (images, videos, audios, text, etc) and multiple outputs. This is a simple sklearn inference - - ```python - def predict(data): - return model.predict(data.to_numpy()) - ``` - - 5. Build and launch the interface - - Building Gradio interfaces is very simple. You need to specify the prediction function, the type of input and output. You can add more things such - as the title and descriptions. In this case, the input is a dataframe since that's the kind of data managed by this model. - - ``` - iface = gr.Interface( - predict, - title="Wine Quality predictor with SKLearn", - inputs=gr.inputs.Dataframe( - headers=headers, - default=default, - ), - outputs="numpy", - ) - iface.launch() - ``` - - We're done!!!! - - You can find the Space at [https://huggingface.co/spaces/osanseviero/wine_quality](https://huggingface.co/spaces/osanseviero/wine_quality) - and try it yourself! It's not great, but the main idea of the article was to showcase a workflow of public demo with private model. This can also - work for datasets! With Gradio, you can create datasets with flagged content from users! - - **Wait wait wait! I don't want to click more links!** - - Ahm...ok. The link above is cool because you can share it with anyone, but you can also show Spaces-hosted Gradio demos with a couple of - HTML lines in your own website. Here you can see the Gradio Space. - """) - - embed_gradio = components.html( - """ - - - - -
- - - - """, - height=600, - ) - - st.markdown(""" - **🤯 Is that...a Gradio Space embedded within a Streamlit Space about creating Spaces?** - - Yes, that's right! I hope this was useful! Until the next time! - - **A Hacker Llama 🦙** - - [osanseviero](https://twitter.com/osanseviero) - """) - - \ No newline at end of file diff --git a/spaces/peazy/Matt-or-Meth-Damon/README.md b/spaces/peazy/Matt-or-Meth-Damon/README.md deleted file mode 100644 index fb35f90b2dcf7178bec5b51017ed29645f24bf77..0000000000000000000000000000000000000000 --- a/spaces/peazy/Matt-or-Meth-Damon/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Matt or Meth Damon -emoji: 🐢 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pepereeee/DreamlikeArt-PhotoReal-2.0/app.py b/spaces/pepereeee/DreamlikeArt-PhotoReal-2.0/app.py deleted file mode 100644 index a17c45ae5b8c7c129692e1bb18936b40f3e82a87..0000000000000000000000000000000000000000 --- a/spaces/pepereeee/DreamlikeArt-PhotoReal-2.0/app.py +++ /dev/null @@ -1,155 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path -import random -import string -import time -from queue import Queue -from threading import Thread - -queue = Queue() -queue_threshold = 30 - -text_gen=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion") -proc1=gr.Interface.load("models/dreamlike-art/dreamlike-photoreal-2.0", live=True) - -def reset_queue_periodically(): - start_time = time.time() - while True: - if time.time() - start_time >= 300: # 5 minutes - queue.queue.clear() - start_time = time.time() - -reset_queue_thread = Thread(target=reset_queue_periodically, daemon=True) -reset_queue_thread.start() - - -def add_random_noise(prompt, noise_level=0.07): - # Get the percentage of characters to add as noise - percentage_noise = noise_level * 5 - # Get the number of characters to add as noise - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - # Get the indices of the characters to add noise to - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - # Add noise to the selected characters - prompt_list = list(prompt) - noise_chars = string.ascii_letters + string.punctuation + ' ' - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - - -def send_it1(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output1 = proc1(queue.get()) - return output1 - -def send_it2(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output2 = proc1(queue.get()) - return output2 - -def send_it3(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output3 = proc1(queue.get()) - return output3 - -def send_it4(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output4 = proc1(queue.get()) - return output4 - -def send_it5(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output5 = proc1(queue.get()) - return output5 - -def send_it6(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output6 = proc1(queue.get()) - return output6 - -def send_it7(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output7 = proc1(queue.get()) - return output7 - -def send_it8(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_with_noise) - output8 = proc1(queue.get()) - return output8 - - -def get_prompts(prompt_text): - while queue.qsize() >= queue_threshold: - time.sleep(1) - queue.put(prompt_text) - output = text_gen(queue.get()) - return output - - -with gr.Blocks() as myface: - with gr.Row(): - - input_text=gr.Textbox(label="Short Prompt") - see_prompts=gr.Button("Magic Prompt") - with gr.Row(): - - prompt=gr.Textbox(label="Enter Prompt") - noise_level=gr.Slider(minimum=0.1, maximum=3, step=0.1, label="Noise Level: Controls how much randomness is added to the input before it is sent to the model. Higher noise level produces more diverse outputs, while lower noise level produces similar outputs.") - run=gr.Button("Generate") - - with gr.Row(): - like_message = gr.Button("❤️ Press the Like Button if you enjoy my space! ❤️") - with gr.Row(): - output1=gr.Image(label="Dreamlike-photoreal-2.0") - output2=gr.Image(label="Dreamlike-photoreal-2.0") - with gr.Row(): - output3=gr.Image(label="Dreamlike-photoreal-2.0") - output4=gr.Image(label="Dreamlike-photoreal-2.0") - with gr.Row(): - output5=gr.Image(label="Dreamlike-photoreal-2.0") - output6=gr.Image(label="Dreamlike-photoreal-2.0") - with gr.Row(): - output7=gr.Image(label="Dreamlike-photoreal-2.0") - output8=gr.Image(label="Dreamlike-photoreal-2.0") - - - run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1], api_name="addition") - run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2], api_name="addition") - run.click(send_it3, inputs=[prompt, noise_level], outputs=[output3], api_name="addition") - run.click(send_it4, inputs=[prompt, noise_level], outputs=[output4], api_name="addition") - run.click(send_it5, inputs=[prompt, noise_level], outputs=[output5], api_name="addition") - run.click(send_it6, inputs=[prompt, noise_level], outputs=[output6], api_name="addition") - run.click(send_it7, inputs=[prompt, noise_level], outputs=[output7], api_name="addition") - run.click(send_it8, inputs=[prompt, noise_level], outputs=[output8], api_name="addition") - see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], api_name="addition") - -myface.launch(enable_queue=True, inline=True) -myface.queue(concurrency_count=30,status_update_rate=1) -reset_queue_thread.join() \ No newline at end of file diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/models/common.py b/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/models/common.py deleted file mode 100644 index 5119881e683f507967dcf4ff318706419b660518..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/models/common.py +++ /dev/null @@ -1,703 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Common modules -""" - -import json -import math -import platform -import warnings -from collections import OrderedDict, namedtuple -from copy import copy -from pathlib import Path - -import cv2 -import numpy as np -import pandas as pd -import requests -import torch -import torch.nn as nn -import yaml -from PIL import Image -from torch.cuda import amp - -from utils.datasets import exif_transpose, letterbox -from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, - make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) -from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import copy_attr, time_sync - - -def autopad(k, p=None): # kernel, padding - # Pad to 'same' - if p is None: - p = k // 2 if isinstance(k, int) else (x // 2 for x in k) # auto-pad - return p - - -class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def forward_fuse(self, x): - return self.act(self.conv(x)) - - -class DWConv(Conv): - # Depth-wise convolution class - def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - -class TransformerLayer(nn.Module): - # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) - def __init__(self, c, num_heads): - super().__init__() - self.q = nn.Linear(c, c, bias=False) - self.k = nn.Linear(c, c, bias=False) - self.v = nn.Linear(c, c, bias=False) - self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) - self.fc1 = nn.Linear(c, c, bias=False) - self.fc2 = nn.Linear(c, c, bias=False) - - def forward(self, x): - x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x - x = self.fc2(self.fc1(x)) + x - return x - - -class TransformerBlock(nn.Module): - # Vision Transformer https://arxiv.org/abs/2010.11929 - def __init__(self, c1, c2, num_heads, num_layers): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) - self.c2 = c2 - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - b, _, w, h = x.shape - p = x.flatten(2).permute(2, 0, 1) - return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) - - -class Bottleneck(nn.Module): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2, 3, 1, g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class BottleneckCSP(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) - self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.SiLU() - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) - - -class C3(nn.Module): - # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) - - def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) - - -class C3TR(C3): - # C3 module with TransformerBlock() - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = TransformerBlock(c_, c_, 4, n) - - -class C3SPP(C3): - # C3 module with SPP() - def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = SPP(c_, c_, k) - - -class C3Ghost(C3): - # C3 module with GhostBottleneck() - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) - - -class SPP(nn.Module): - # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 - def __init__(self, c1, c2, k=(5, 9, 13)): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - - def forward(self, x): - x = self.cv1(x) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - - -class SPPF(nn.Module): - # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher - def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * 4, c2, 1, 1) - self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) - - def forward(self, x): - x = self.cv1(x) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning - y1 = self.m(x) - y2 = self.m(y1) - return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) - - -class Focus(nn.Module): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) - # self.contract = Contract(gain=2) - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) - # return self.conv(self.contract(x)) - - -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super().__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat((y, self.cv2(y)), 1) - - -class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super().__init__() - c_ = c2 // 2 - self.conv = nn.Sequential( - GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, - act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - - -class Contract(nn.Module): - # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' - s = self.gain - x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) - x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) - - -class Expand(nn.Module): - # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' - s = self.gain - x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) - x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) - - -class Concat(nn.Module): - # Concatenate a list of tensors along dimension - def __init__(self, dimension=1): - super().__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class DetectMultiBackend(nn.Module): - # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): - # Usage: - # PyTorch: weights = *.pt - # TorchScript: *.torchscript - # ONNX Runtime: *.onnx - # ONNX OpenCV DNN: *.onnx with --dnn - # OpenVINO: *.xml - # CoreML: *.mlmodel - # TensorRT: *.engine - # TensorFlow SavedModel: *_saved_model - # TensorFlow GraphDef: *.pb - # TensorFlow Lite: *.tflite - # TensorFlow Edge TPU: *_edgetpu.tflite - from models.experimental import attempt_download, attempt_load # scoped to avoid circular import - - super().__init__() - w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend - stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults - w = attempt_download(w) # download if not local - fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 - if data: # data.yaml path (optional) - with open(data, errors='ignore') as f: - names = yaml.safe_load(f)['names'] # class names - - if pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) - stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - model.half() if fp16 else model.float() - self.model = model # explicitly assign for to(), cpu(), cuda(), half() - elif jit: # TorchScript - LOGGER.info(f'Loading {w} for TorchScript inference...') - extra_files = {'config.txt': ''} # model metadata - model = torch.jit.load(w, _extra_files=extra_files) - model.half() if fp16 else model.float() - if extra_files['config.txt']: - d = json.loads(extra_files['config.txt']) # extra_files dict - stride, names = int(d['stride']), d['names'] - elif dnn: # ONNX OpenCV DNN - LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements(('opencv-python>=4.5.4',)) - net = cv2.dnn.readNetFromONNX(w) - elif onnx: # ONNX Runtime - LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - cuda = torch.cuda.is_available() - check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) - import onnxruntime - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] - session = onnxruntime.InferenceSession(w, providers=providers) - meta = session.get_modelmeta().custom_metadata_map # metadata - if 'stride' in meta: - stride, names = int(meta['stride']), eval(meta['names']) - elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - core = ie.IECore() - if not Path(w).is_file(): # if not *.xml - w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir - network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths - executable_network = core.load_network(network, device_name='CPU', num_requests=1) - elif engine: # TensorRT - LOGGER.info(f'Loading {w} for TensorRT inference...') - import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - logger = trt.Logger(trt.Logger.INFO) - with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - model = runtime.deserialize_cuda_engine(f.read()) - bindings = OrderedDict() - fp16 = False # default updated below - for index in range(model.num_bindings): - name = model.get_binding_name(index) - dtype = trt.nptype(model.get_binding_dtype(index)) - shape = tuple(model.get_binding_shape(index)) - data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) - bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) - if model.binding_is_input(index) and dtype == np.float16: - fp16 = True - binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - context = model.create_execution_context() - batch_size = bindings['images'].shape[0] - elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') - import coremltools as ct - model = ct.models.MLModel(w) - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - if saved_model: # SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') - import tensorflow as tf - keras = False # assume TF1 saved_model - model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) - elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') - import tensorflow as tf - - def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped - ge = x.graph.as_graph_element - return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) - - gd = tf.Graph().as_graph_def() # graph_def - with open(w, 'rb') as f: - gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") - elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu - from tflite_runtime.interpreter import Interpreter, load_delegate - except ImportError: - import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, - if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) - else: # Lite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - interpreter = Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - elif tfjs: - raise Exception('ERROR: YOLOv5 TF.js inference is not supported') - self.__dict__.update(locals()) # assign all variables to self - - def forward(self, im, augment=False, visualize=False, val=False): - # YOLOv5 MultiBackend inference - b, ch, h, w = im.shape # batch, channel, height, width - if self.pt: # PyTorch - y = self.model(im, augment=augment, visualize=visualize)[0] - elif self.jit: # TorchScript - y = self.model(im)[0] - elif self.dnn: # ONNX OpenCV DNN - im = im.cpu().numpy() # torch to numpy - self.net.setInput(im) - y = self.net.forward() - elif self.onnx: # ONNX Runtime - im = im.cpu().numpy() # torch to numpy - y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] - elif self.xml: # OpenVINO - im = im.cpu().numpy() # FP32 - desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description - request = self.executable_network.requests[0] # inference request - request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs)) - request.infer() - y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs)) - elif self.engine: # TensorRT - assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) - self.binding_addrs['images'] = int(im.data_ptr()) - self.context.execute_v2(list(self.binding_addrs.values())) - y = self.bindings['output'].data - elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - im = Image.fromarray((im[0] * 255).astype('uint8')) - # im = im.resize((192, 320), Image.ANTIALIAS) - y = self.model.predict({'image': im}) # coordinates are xywh normalized - if 'confidence' in y: - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) - else: - k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key - y = y[k] # output - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() - elif self.pb: # GraphDef - y = self.frozen_func(x=self.tf.constant(im)).numpy() - else: # Lite or Edge TPU - input, output = self.input_details[0], self.output_details[0] - int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model - if int8: - scale, zero_point = input['quantization'] - im = (im / scale + zero_point).astype(np.uint8) # de-scale - self.interpreter.set_tensor(input['index'], im) - self.interpreter.invoke() - y = self.interpreter.get_tensor(output['index']) - if int8: - scale, zero_point = output['quantization'] - y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., :4] *= [w, h, w, h] # xywh normalized to pixels - - if isinstance(y, np.ndarray): - y = torch.tensor(y, device=self.device) - return (y, []) if val else y - - def warmup(self, imgsz=(1, 3, 640, 640)): - # Warmup model by running inference once - if any((self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb)): # warmup types - if self.device.type != 'cpu': # only warmup GPU models - im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input - for _ in range(2 if self.jit else 1): # - self.forward(im) # warmup - - @staticmethod - def model_type(p='path/to/model.pt'): - # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx - from export import export_formats - suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, suffixes) # checks - p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) - xml |= xml2 # *_openvino_model or *.xml - tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs - - -class AutoShape(nn.Module): - # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - conf = 0.25 # NMS confidence threshold - iou = 0.45 # NMS IoU threshold - agnostic = False # NMS class-agnostic - multi_label = False # NMS multiple labels per box - classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs - max_det = 1000 # maximum number of detections per image - amp = False # Automatic Mixed Precision (AMP) inference - - def __init__(self, model): - super().__init__() - LOGGER.info('Adding AutoShape... ') - copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes - self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance - self.pt = not self.dmb or model.pt # PyTorch model - self.model = model.eval() - - def _apply(self, fn): - # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers - self = super()._apply(fn) - if self.pt: - m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) - return self - - @torch.no_grad() - def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # file: imgs = 'data/images/zidane.jpg' # str or PosixPath - # URI: = 'https://ultralytics.com/images/zidane.jpg' - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) - # numpy: = np.zeros((640,1280,3)) # HWC - # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) - # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - - t = [time_sync()] - p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # for device, type - autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference - if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(autocast): - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images - shape0, shape1, files = [], [], [] # image and inference shapes, filenames - for i, im in enumerate(imgs): - f = f'image{i}' # filename - if isinstance(im, (str, Path)): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im - im = np.asarray(exif_transpose(im)) - elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = (size / max(s)) # gain - shape1.append([y * g for y in s]) - imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad - x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 - t.append(time_sync()) - - with amp.autocast(autocast): - # Inference - y = self.model(x, augment, profile) # forward - t.append(time_sync()) - - # Post-process - y = non_max_suppression(y if self.dmb else y[0], - self.conf, - self.iou, - self.classes, - self.agnostic, - self.multi_label, - max_det=self.max_det) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - - t.append(time_sync()) - return Detections(imgs, y, files, t, self.names, x.shape) - - -class Detections: - # YOLOv5 detections class for inference results - def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): - super().__init__() - d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays - self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) - self.names = names # class names - self.files = files # image filenames - self.times = times # profiling times - self.xyxy = pred # xyxy pixels - self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels - self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized - self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) # number of images (batch size) - self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) - self.s = shape # inference BCHW shape - - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): - crops = [] - for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): - s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string - if pred.shape[0]: - for c in pred[:, -1].unique(): - n = (pred[:, -1] == c).sum() # detections per class - s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - if show or save or render or crop: - annotator = Annotator(im, example=str(self.names)) - for *box, conf, cls in reversed(pred): # xyxy, confidence, class - label = f'{self.names[int(cls)]} {conf:.2f}' - if crop: - file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None - crops.append({ - 'box': box, - 'conf': conf, - 'cls': cls, - 'label': label, - 'im': save_one_box(box, im, file=file, save=save)}) - else: # all others - annotator.box_label(box, label if labels else '', color=colors(cls)) - im = annotator.im - else: - s += '(no detections)' - - im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np - if pprint: - print(s.rstrip(', ')) - if show: - im.show(self.files[i]) # show - if save: - f = self.files[i] - im.save(save_dir / f) # save - if i == self.n - 1: - LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") - if render: - self.imgs[i] = np.asarray(im) - if crop: - if save: - LOGGER.info(f'Saved results to {save_dir}\n') - return crops - - def print(self): - self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - - def show(self, labels=True): - self.display(show=True, labels=labels) # show results - - def save(self, labels=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results - - def crop(self, save=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None - return self.display(crop=True, save=save, save_dir=save_dir) # crop results - - def render(self, labels=True): - self.display(render=True, labels=labels) # render results - return self.imgs - - def pandas(self): - # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) - new = copy(self) # return copy - ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns - cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns - for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): - a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update - setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) - return new - - def tolist(self): - # return a list of Detections objects, i.e. 'for result in results.tolist():' - r = range(self.n) # iterable - x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] - # for d in x: - # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - # setattr(d, k, getattr(d, k)[0]) # pop out of list - return x - - def __len__(self): - return self.n # override len(results) - - def __str__(self): - self.print() # override print(results) - return '' - - -class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) - self.flat = nn.Flatten() - - def forward(self, x): - z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list - return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/spaces/pixiou/bingo/src/components/user-menu.tsx b/spaces/pixiou/bingo/src/components/user-menu.tsx deleted file mode 100644 index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000 --- a/spaces/pixiou/bingo/src/components/user-menu.tsx +++ /dev/null @@ -1,113 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - return ( -
- - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - 设置用户 - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - 语音设置 - - - - - 开源地址 - - - - - - - - 托管地址 - 🤗 - - - - - - - 复制站点 - - - - - -
版本信息 {pkg.version}
-
- - -
站点域名
-
copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
-
-
-
-
- ) -} diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/markup.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/markup.py deleted file mode 100644 index fd80d8c1129722b84771bd6a0f6ccfd57f5cf78e..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/markup.py +++ /dev/null @@ -1,246 +0,0 @@ -import re -from ast import literal_eval -from operator import attrgetter -from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union - -from ._emoji_replace import _emoji_replace -from .emoji import EmojiVariant -from .errors import MarkupError -from .style import Style -from .text import Span, Text - -RE_TAGS = re.compile( - r"""((\\*)\[([a-z#/@][^[]*?)])""", - re.VERBOSE, -) - -RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$") - - -class Tag(NamedTuple): - """A tag in console markup.""" - - name: str - """The tag name. e.g. 'bold'.""" - parameters: Optional[str] - """Any additional parameters after the name.""" - - def __str__(self) -> str: - return ( - self.name if self.parameters is None else f"{self.name} {self.parameters}" - ) - - @property - def markup(self) -> str: - """Get the string representation of this tag.""" - return ( - f"[{self.name}]" - if self.parameters is None - else f"[{self.name}={self.parameters}]" - ) - - -_ReStringMatch = Match[str] # regex match object -_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub -_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re - - -def escape( - markup: str, - _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub, -) -> str: - """Escapes text so that it won't be interpreted as markup. - - Args: - markup (str): Content to be inserted in to markup. - - Returns: - str: Markup with square brackets escaped. - """ - - def escape_backslashes(match: Match[str]) -> str: - """Called by re.sub replace matches.""" - backslashes, text = match.groups() - return f"{backslashes}{backslashes}\\{text}" - - markup = _escape(escape_backslashes, markup) - return markup - - -def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]: - """Parse markup in to an iterable of tuples of (position, text, tag). - - Args: - markup (str): A string containing console markup - - """ - position = 0 - _divmod = divmod - _Tag = Tag - for match in RE_TAGS.finditer(markup): - full_text, escapes, tag_text = match.groups() - start, end = match.span() - if start > position: - yield start, markup[position:start], None - if escapes: - backslashes, escaped = _divmod(len(escapes), 2) - if backslashes: - # Literal backslashes - yield start, "\\" * backslashes, None - start += backslashes * 2 - if escaped: - # Escape of tag - yield start, full_text[len(escapes) :], None - position = end - continue - text, equals, parameters = tag_text.partition("=") - yield start, None, _Tag(text, parameters if equals else None) - position = end - if position < len(markup): - yield position, markup[position:], None - - -def render( - markup: str, - style: Union[str, Style] = "", - emoji: bool = True, - emoji_variant: Optional[EmojiVariant] = None, -) -> Text: - """Render console markup in to a Text instance. - - Args: - markup (str): A string containing console markup. - emoji (bool, optional): Also render emoji code. Defaults to True. - - Raises: - MarkupError: If there is a syntax error in the markup. - - Returns: - Text: A test instance. - """ - emoji_replace = _emoji_replace - if "[" not in markup: - return Text( - emoji_replace(markup, default_variant=emoji_variant) if emoji else markup, - style=style, - ) - text = Text(style=style) - append = text.append - normalize = Style.normalize - - style_stack: List[Tuple[int, Tag]] = [] - pop = style_stack.pop - - spans: List[Span] = [] - append_span = spans.append - - _Span = Span - _Tag = Tag - - def pop_style(style_name: str) -> Tuple[int, Tag]: - """Pop tag matching given style name.""" - for index, (_, tag) in enumerate(reversed(style_stack), 1): - if tag.name == style_name: - return pop(-index) - raise KeyError(style_name) - - for position, plain_text, tag in _parse(markup): - if plain_text is not None: - # Handle open brace escapes, where the brace is not part of a tag. - plain_text = plain_text.replace("\\[", "[") - append(emoji_replace(plain_text) if emoji else plain_text) - elif tag is not None: - if tag.name.startswith("/"): # Closing tag - style_name = tag.name[1:].strip() - - if style_name: # explicit close - style_name = normalize(style_name) - try: - start, open_tag = pop_style(style_name) - except KeyError: - raise MarkupError( - f"closing tag '{tag.markup}' at position {position} doesn't match any open tag" - ) from None - else: # implicit close - try: - start, open_tag = pop() - except IndexError: - raise MarkupError( - f"closing tag '[/]' at position {position} has nothing to close" - ) from None - - if open_tag.name.startswith("@"): - if open_tag.parameters: - handler_name = "" - parameters = open_tag.parameters.strip() - handler_match = RE_HANDLER.match(parameters) - if handler_match is not None: - handler_name, match_parameters = handler_match.groups() - parameters = ( - "()" if match_parameters is None else match_parameters - ) - - try: - meta_params = literal_eval(parameters) - except SyntaxError as error: - raise MarkupError( - f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}" - ) - except Exception as error: - raise MarkupError( - f"error parsing {open_tag.parameters!r}; {error}" - ) from None - - if handler_name: - meta_params = ( - handler_name, - meta_params - if isinstance(meta_params, tuple) - else (meta_params,), - ) - - else: - meta_params = () - - append_span( - _Span( - start, len(text), Style(meta={open_tag.name: meta_params}) - ) - ) - else: - append_span(_Span(start, len(text), str(open_tag))) - - else: # Opening tag - normalized_tag = _Tag(normalize(tag.name), tag.parameters) - style_stack.append((len(text), normalized_tag)) - - text_length = len(text) - while style_stack: - start, tag = style_stack.pop() - style = str(tag) - if style: - append_span(_Span(start, text_length, style)) - - text.spans = sorted(spans[::-1], key=attrgetter("start")) - return text - - -if __name__ == "__main__": # pragma: no cover - - MARKUP = [ - "[red]Hello World[/red]", - "[magenta]Hello [b]World[/b]", - "[bold]Bold[italic] bold and italic [/bold]italic[/italic]", - "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog", - ":warning-emoji: [bold red blink] DANGER![/]", - ] - - from pip._vendor.rich import print - from pip._vendor.rich.table import Table - - grid = Table("Markup", "Result", padding=(0, 1)) - - for markup in MARKUP: - grid.add_row(Text(markup), markup) - - print(grid) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/namespaces.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/namespaces.py deleted file mode 100644 index 44939e1c6d40539eb8173bf1527db926c5a54658..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/namespaces.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from distutils import log -import itertools - - -flatten = itertools.chain.from_iterable - - -class Installer: - - nspkg_ext = '-nspkg.pth' - - def install_namespaces(self): - nsp = self._get_all_ns_packages() - if not nsp: - return - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - self.outputs.append(filename) - log.info("Installing %s", filename) - lines = map(self._gen_nspkg_line, nsp) - - if self.dry_run: - # always generate the lines, even in dry run - list(lines) - return - - with open(filename, 'wt') as f: - f.writelines(lines) - - def uninstall_namespaces(self): - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - if not os.path.exists(filename): - return - log.info("Removing %s", filename) - os.remove(filename) - - def _get_target(self): - return self.target - - _nspkg_tmpl = ( - "import sys, types, os", - "has_mfs = sys.version_info > (3, 5)", - "p = os.path.join(%(root)s, *%(pth)r)", - "importlib = has_mfs and __import__('importlib.util')", - "has_mfs and __import__('importlib.machinery')", - ( - "m = has_mfs and " - "sys.modules.setdefault(%(pkg)r, " - "importlib.util.module_from_spec(" - "importlib.machinery.PathFinder.find_spec(%(pkg)r, " - "[os.path.dirname(p)])))" - ), - ( - "m = m or " - "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))" - ), - "mp = (m or []) and m.__dict__.setdefault('__path__',[])", - "(p not in mp) and mp.append(p)", - ) - "lines for the namespace installer" - - _nspkg_tmpl_multi = ( - 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', - ) - "additional line(s) when a parent package is indicated" - - def _get_root(self): - return "sys._getframe(1).f_locals['sitedir']" - - def _gen_nspkg_line(self, pkg): - pth = tuple(pkg.split('.')) - root = self._get_root() - tmpl_lines = self._nspkg_tmpl - parent, sep, child = pkg.rpartition('.') - if parent: - tmpl_lines += self._nspkg_tmpl_multi - return ';'.join(tmpl_lines) % locals() + '\n' - - def _get_all_ns_packages(self): - """Return sorted list of all package namespaces""" - pkgs = self.distribution.namespace_packages or [] - return sorted(flatten(map(self._pkg_names, pkgs))) - - @staticmethod - def _pkg_names(pkg): - """ - Given a namespace package, yield the components of that - package. - - >>> names = Installer._pkg_names('a.b.c') - >>> set(names) == set(['a', 'a.b', 'a.b.c']) - True - """ - parts = pkg.split('.') - while parts: - yield '.'.join(parts) - parts.pop() - - -class DevelopInstaller(Installer): - def _get_root(self): - return repr(str(self.egg_path)) - - def _get_target(self): - return self.egg_link diff --git a/spaces/plzdontcry/dakubettergpt/src/components/Chat/ChatContent/Message/View/Button/MarkdownModeButton.tsx b/spaces/plzdontcry/dakubettergpt/src/components/Chat/ChatContent/Message/View/Button/MarkdownModeButton.tsx deleted file mode 100644 index 60d6cbbb4fd87ba617ac4846b2f4482939610c7f..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/Chat/ChatContent/Message/View/Button/MarkdownModeButton.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import React, { useState } from 'react'; - -import useStore from '@store/store'; - -import BaseButton from './BaseButton'; - -import MarkdownIcon from '@icon/MarkdownIcon'; -import FileTextIcon from '@icon/FileTextIcon'; - -const MarkdownModeButton = () => { - const markdownMode = useStore((state) => state.markdownMode); - const setMarkdownMode = useStore((state) => state.setMarkdownMode); - - return ( - : } - buttonProps={{ 'aria-label': 'toggle markdown mode' }} - onClick={() => { - setMarkdownMode(!markdownMode); - }} - /> - ); -}; - -export default MarkdownModeButton; diff --git a/spaces/plzdontcry/dakubettergpt/src/components/SettingsMenu/AutoTitleToggle.tsx b/spaces/plzdontcry/dakubettergpt/src/components/SettingsMenu/AutoTitleToggle.tsx deleted file mode 100644 index 448694b42cc4ec0590c3de623a6fc11f15ba2139..0000000000000000000000000000000000000000 --- a/spaces/plzdontcry/dakubettergpt/src/components/SettingsMenu/AutoTitleToggle.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React, { useEffect, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import useStore from '@store/store'; -import Toggle from '@components/Toggle'; - -const AutoTitleToggle = () => { - const { t } = useTranslation(); - - const setAutoTitle = useStore((state) => state.setAutoTitle); - - const [isChecked, setIsChecked] = useState( - useStore.getState().autoTitle - ); - - useEffect(() => { - setAutoTitle(isChecked); - }, [isChecked]); - - return ( - - ); -}; - -export default AutoTitleToggle; diff --git a/spaces/pngwn/nextjs/out/_next/static/chunks/pages/_app-ba0d1cdf43a37972.js b/spaces/pngwn/nextjs/out/_next/static/chunks/pages/_app-ba0d1cdf43a37972.js deleted file mode 100644 index 6650b3ea7c85bf1bec685a37e14ec629a3534777..0000000000000000000000000000000000000000 --- a/spaces/pngwn/nextjs/out/_next/static/chunks/pages/_app-ba0d1cdf43a37972.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[888],{915:function(t,e,n){(window.__NEXT_P=window.__NEXT_P||[]).push(["/_app",function(){return n(6029)}])},6029:function(t,e,n){"use strict";var r,o=(r=n(4051))&&r.__esModule?r:{default:r};function u(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function i(t,e){for(var n=0;n tuple[int, int]: - """Calculate chunk sizes. - - Args: - chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same - size in both directions if only one is specified. Cannot be negative. - chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the - same count in both directions if only one is specified. If less than 1, set to 1. - total_chunk_count (int, optional): Total number of chunks. If less than 1, set to 1. - ny (int): Number of grid points in y-direction. - nx (int): Number of grid points in x-direction. - - Return: - tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size). - - Note: - Zero or one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` should be - specified. - """ - if sum([chunk_size is not None, chunk_count is not None, total_chunk_count is not None]) > 1: - raise ValueError("Only one of chunk_size, chunk_count and total_chunk_count should be set") - - if nx < 2 or ny < 2: - raise ValueError(f"(ny, nx) must be at least (2, 2), not ({ny}, {nx})") - - if total_chunk_count is not None: - max_chunk_count = (nx-1)*(ny-1) - total_chunk_count = min(max(total_chunk_count, 1), max_chunk_count) - if total_chunk_count == 1: - chunk_size = 0 - elif total_chunk_count == max_chunk_count: - chunk_size = (1, 1) - else: - factors = two_factors(total_chunk_count) - if ny > nx: - chunk_count = factors - else: - chunk_count = (factors[1], factors[0]) - - if chunk_count is not None: - if isinstance(chunk_count, tuple): - y_chunk_count, x_chunk_count = chunk_count - else: - y_chunk_count = x_chunk_count = chunk_count - x_chunk_count = min(max(x_chunk_count, 1), nx-1) - y_chunk_count = min(max(y_chunk_count, 1), ny-1) - chunk_size = (math.ceil((ny-1) / y_chunk_count), math.ceil((nx-1) / x_chunk_count)) - - if chunk_size is None: - y_chunk_size = x_chunk_size = 0 - elif isinstance(chunk_size, tuple): - y_chunk_size, x_chunk_size = chunk_size - else: - y_chunk_size = x_chunk_size = chunk_size - - if x_chunk_size < 0 or y_chunk_size < 0: - raise ValueError("chunk_size cannot be negative") - - return y_chunk_size, x_chunk_size - - -def two_factors(n: int) -> tuple[int, int]: - """Split an integer into two integer factors. - - The two factors will be as close as possible to the sqrt of n, and are returned in decreasing - order. Worst case returns (n, 1). - - Args: - n (int): The integer to factorize, must be positive. - - Return: - tuple(int, int): The two factors of n, in decreasing order. - """ - if n < 0: - raise ValueError(f"two_factors expects positive integer not {n}") - - i = math.ceil(math.sqrt(n)) - while n % i != 0: - i -= 1 - j = n // i - if i > j: - return i, j - else: - return j, i diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_t.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_t.py deleted file mode 100644 index 7f94677522e4b8b8a4e55c079f618e6046b045b8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_t.py +++ /dev/null @@ -1,47 +0,0 @@ -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import sys -import array - - -class table__c_v_t(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - values = array.array("h") - values.frombytes(data) - if sys.byteorder != "big": - values.byteswap() - self.values = values - - def compile(self, ttFont): - values = self.values[:] - if sys.byteorder != "big": - values.byteswap() - return values.tobytes() - - def toXML(self, writer, ttFont): - for i in range(len(self.values)): - value = self.values[i] - writer.simpletag("cv", value=value, index=i) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "values"): - self.values = array.array("h") - if name == "cv": - index = safeEval(attrs["index"]) - value = safeEval(attrs["value"]) - for i in range(1 + index - len(self.values)): - self.values.append(0) - self.values[index] = value - - def __len__(self): - return len(self.values) - - def __getitem__(self, index): - return self.values[index] - - def __setitem__(self, index, value): - self.values[index] = value - - def __delitem__(self, index): - del self.values[index] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/cli/commands/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/cli/commands/__init__.py deleted file mode 100644 index e4b20dbfc7bbe22b1ee1fe4805735e76b64056eb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/cli/commands/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .cli_env_info import print_environment_info -from .components import app as custom_component -from .deploy_space import deploy -from .reload import main as reload - -__all__ = ["deploy", "reload", "print_environment_info", "custom_component"] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jsonschema/benchmarks/nested_schemas.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jsonschema/benchmarks/nested_schemas.py deleted file mode 100644 index b2e60a18e0d803db43545b6fc42e51074bc522c9..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jsonschema/benchmarks/nested_schemas.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Validating highly nested schemas shouldn't cause exponential time blowups. - -See https://github.com/python-jsonschema/jsonschema/issues/1097. -""" -from itertools import cycle - -from jsonschema.validators import validator_for - -metaschemaish = { - "$id": "https://example.com/draft/2020-12/schema/strict", - "$schema": "https://json-schema.org/draft/2020-12/schema", - - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/core": True, - "https://json-schema.org/draft/2020-12/vocab/applicator": True, - "https://json-schema.org/draft/2020-12/vocab/unevaluated": True, - "https://json-schema.org/draft/2020-12/vocab/validation": True, - "https://json-schema.org/draft/2020-12/vocab/meta-data": True, - "https://json-schema.org/draft/2020-12/vocab/format-annotation": True, - "https://json-schema.org/draft/2020-12/vocab/content": True - }, - "$dynamicAnchor": "meta", - - "$ref": "https://json-schema.org/draft/2020-12/schema", - "unevaluatedProperties": False, -} - - -def nested_schema(levels): - """ - Produce a schema which validates deeply nested objects and arrays. - """ - - names = cycle(["foo", "bar", "baz", "quux", "spam", "eggs"]) - schema = {"type": "object", "properties": {"ham": {"type": "string"}}} - for _, name in zip(range(levels - 1), names): - schema = {"type": "object", "properties": {name: schema}} - return schema - - -validator = validator_for(metaschemaish)(metaschemaish) - -if __name__ == "__main__": - from pyperf import Runner - runner = Runner() - - not_nested = nested_schema(levels=1) - runner.bench_func("not nested", lambda: validator.is_valid(not_nested)) - - for levels in range(1, 11, 3): - schema = nested_schema(levels=levels) - runner.bench_func( - f"nested * {levels}", - lambda schema=schema: validator.is_valid(schema), - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_subclass.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_subclass.py deleted file mode 100644 index c2d5afcf884b12b3007905061b7c503359e71a5d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/test_subclass.py +++ /dev/null @@ -1,82 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm - -pytestmark = pytest.mark.filterwarnings( - "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" -) - - -class TestSeriesSubclassing: - @pytest.mark.parametrize( - "idx_method, indexer, exp_data, exp_idx", - [ - ["loc", ["a", "b"], [1, 2], "ab"], - ["iloc", [2, 3], [3, 4], "cd"], - ], - ) - def test_indexing_sliced(self, idx_method, indexer, exp_data, exp_idx): - s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd")) - res = getattr(s, idx_method)[indexer] - exp = tm.SubclassedSeries(exp_data, index=list(exp_idx)) - tm.assert_series_equal(res, exp) - - def test_to_frame(self): - s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"), name="xxx") - res = s.to_frame() - exp = tm.SubclassedDataFrame({"xxx": [1, 2, 3, 4]}, index=list("abcd")) - tm.assert_frame_equal(res, exp) - - def test_subclass_unstack(self): - # GH 15564 - s = tm.SubclassedSeries([1, 2, 3, 4], index=[list("aabb"), list("xyxy")]) - - res = s.unstack() - exp = tm.SubclassedDataFrame({"x": [1, 3], "y": [2, 4]}, index=["a", "b"]) - - tm.assert_frame_equal(res, exp) - - def test_subclass_empty_repr(self): - sub_series = tm.SubclassedSeries() - assert "SubclassedSeries" in repr(sub_series) - - def test_asof(self): - N = 3 - rng = pd.date_range("1/1/1990", periods=N, freq="53s") - s = tm.SubclassedSeries({"A": [np.nan, np.nan, np.nan]}, index=rng) - - result = s.asof(rng[-2:]) - assert isinstance(result, tm.SubclassedSeries) - - def test_explode(self): - s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]]) - result = s.explode() - assert isinstance(result, tm.SubclassedSeries) - - def test_equals(self): - # https://github.com/pandas-dev/pandas/pull/34402 - # allow subclass in both directions - s1 = pd.Series([1, 2, 3]) - s2 = tm.SubclassedSeries([1, 2, 3]) - assert s1.equals(s2) - assert s2.equals(s1) - - -class SubclassedSeries(pd.Series): - @property - def _constructor(self): - def _new(*args, **kwargs): - # some constructor logic that accesses the Series' name - if self.name == "test": - return pd.Series(*args, **kwargs) - return SubclassedSeries(*args, **kwargs) - - return _new - - -def test_constructor_from_dict(): - # https://github.com/pandas-dev/pandas/issues/52445 - result = SubclassedSeries({"a": 1, "b": 2, "c": 3}) - assert isinstance(result, SubclassedSeries) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/test_expressions.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/test_expressions.py deleted file mode 100644 index 1e66cefbcfdd03f50ff70209bf04c58812436f59..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/test_expressions.py +++ /dev/null @@ -1,464 +0,0 @@ -import operator -import re - -import numpy as np -import pytest - -from pandas import option_context -import pandas._testing as tm -from pandas.core.api import ( - DataFrame, - Index, - Series, -) -from pandas.core.computation import expressions as expr - - -@pytest.fixture -def _frame(): - return DataFrame( - np.random.default_rng(2).standard_normal((10001, 4)), - columns=list("ABCD"), - dtype="float64", - ) - - -@pytest.fixture -def _frame2(): - return DataFrame( - np.random.default_rng(2).standard_normal((100, 4)), - columns=list("ABCD"), - dtype="float64", - ) - - -@pytest.fixture -def _mixed(_frame): - return DataFrame( - { - "A": _frame["A"].copy(), - "B": _frame["B"].astype("float32"), - "C": _frame["C"].astype("int64"), - "D": _frame["D"].astype("int32"), - } - ) - - -@pytest.fixture -def _mixed2(_frame2): - return DataFrame( - { - "A": _frame2["A"].copy(), - "B": _frame2["B"].astype("float32"), - "C": _frame2["C"].astype("int64"), - "D": _frame2["D"].astype("int32"), - } - ) - - -@pytest.fixture -def _integer(): - return DataFrame( - np.random.default_rng(2).integers(1, 100, size=(10001, 4)), - columns=list("ABCD"), - dtype="int64", - ) - - -@pytest.fixture -def _integer_integers(_integer): - # integers to get a case with zeros - return _integer * np.random.default_rng(2).integers(0, 2, size=np.shape(_integer)) - - -@pytest.fixture -def _integer2(): - return DataFrame( - np.random.default_rng(2).integers(1, 100, size=(101, 4)), - columns=list("ABCD"), - dtype="int64", - ) - - -@pytest.fixture -def _array(_frame): - return _frame["A"].values.copy() - - -@pytest.fixture -def _array2(_frame2): - return _frame2["A"].values.copy() - - -@pytest.fixture -def _array_mixed(_mixed): - return _mixed["D"].values.copy() - - -@pytest.fixture -def _array_mixed2(_mixed2): - return _mixed2["D"].values.copy() - - -@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr") -class TestExpressions: - @pytest.fixture(autouse=True) - def save_min_elements(self): - min_elements = expr._MIN_ELEMENTS - yield - expr._MIN_ELEMENTS = min_elements - - @staticmethod - def call_op(df, other, flex: bool, opname: str): - if flex: - op = lambda x, y: getattr(x, opname)(y) - op.__name__ = opname - else: - op = getattr(operator, opname) - - with option_context("compute.use_numexpr", False): - expected = op(df, other) - - expr.get_test_result() - - result = op(df, other) - return result, expected - - @pytest.mark.parametrize( - "fixture", - [ - "_integer", - "_integer2", - "_integer_integers", - "_frame", - "_frame2", - "_mixed", - "_mixed2", - ], - ) - @pytest.mark.parametrize("flex", [True, False]) - @pytest.mark.parametrize( - "arith", ["add", "sub", "mul", "mod", "truediv", "floordiv"] - ) - def test_run_arithmetic(self, request, fixture, flex, arith): - df = request.getfixturevalue(fixture) - expr._MIN_ELEMENTS = 0 - result, expected = self.call_op(df, df, flex, arith) - - if arith == "truediv": - assert all(x.kind == "f" for x in expected.dtypes.values) - tm.assert_equal(expected, result) - - for i in range(len(df.columns)): - result, expected = self.call_op(df.iloc[:, i], df.iloc[:, i], flex, arith) - if arith == "truediv": - assert expected.dtype.kind == "f" - tm.assert_equal(expected, result) - - @pytest.mark.parametrize( - "fixture", - [ - "_integer", - "_integer2", - "_integer_integers", - "_frame", - "_frame2", - "_mixed", - "_mixed2", - ], - ) - @pytest.mark.parametrize("flex", [True, False]) - def test_run_binary(self, request, fixture, flex, comparison_op): - """ - tests solely that the result is the same whether or not numexpr is - enabled. Need to test whether the function does the correct thing - elsewhere. - """ - df = request.getfixturevalue(fixture) - arith = comparison_op.__name__ - with option_context("compute.use_numexpr", False): - other = df.copy() + 1 - - expr._MIN_ELEMENTS = 0 - expr.set_test_mode(True) - - result, expected = self.call_op(df, other, flex, arith) - - used_numexpr = expr.get_test_result() - assert used_numexpr, "Did not use numexpr as expected." - tm.assert_equal(expected, result) - - for i in range(len(df.columns)): - binary_comp = other.iloc[:, i] + 1 - self.call_op(df.iloc[:, i], binary_comp, flex, "add") - - def test_invalid(self): - array = np.random.default_rng(2).standard_normal(1_000_001) - array2 = np.random.default_rng(2).standard_normal(100) - - # no op - result = expr._can_use_numexpr(operator.add, None, array, array, "evaluate") - assert not result - - # min elements - result = expr._can_use_numexpr(operator.add, "+", array2, array2, "evaluate") - assert not result - - # ok, we only check on first part of expression - result = expr._can_use_numexpr(operator.add, "+", array, array2, "evaluate") - assert result - - @pytest.mark.filterwarnings("ignore:invalid value encountered in:RuntimeWarning") - @pytest.mark.parametrize( - "opname,op_str", - [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")], - ) - @pytest.mark.parametrize( - "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] - ) - def test_binary_ops(self, request, opname, op_str, left_fix, right_fix): - left = request.getfixturevalue(left_fix) - right = request.getfixturevalue(right_fix) - - def testit(left, right, opname, op_str): - if opname == "pow": - left = np.abs(left) - - op = getattr(operator, opname) - - # array has 0s - result = expr.evaluate(op, left, left, use_numexpr=True) - expected = expr.evaluate(op, left, left, use_numexpr=False) - tm.assert_numpy_array_equal(result, expected) - - result = expr._can_use_numexpr(op, op_str, right, right, "evaluate") - assert not result - - with option_context("compute.use_numexpr", False): - testit(left, right, opname, op_str) - - expr.set_numexpr_threads(1) - testit(left, right, opname, op_str) - expr.set_numexpr_threads() - testit(left, right, opname, op_str) - - @pytest.mark.parametrize( - "left_fix,right_fix", [("_array", "_array2"), ("_array_mixed", "_array_mixed2")] - ) - def test_comparison_ops(self, request, comparison_op, left_fix, right_fix): - left = request.getfixturevalue(left_fix) - right = request.getfixturevalue(right_fix) - - def testit(): - f12 = left + 1 - f22 = right + 1 - - op = comparison_op - - result = expr.evaluate(op, left, f12, use_numexpr=True) - expected = expr.evaluate(op, left, f12, use_numexpr=False) - tm.assert_numpy_array_equal(result, expected) - - result = expr._can_use_numexpr(op, op, right, f22, "evaluate") - assert not result - - with option_context("compute.use_numexpr", False): - testit() - - expr.set_numexpr_threads(1) - testit() - expr.set_numexpr_threads() - testit() - - @pytest.mark.parametrize("cond", [True, False]) - @pytest.mark.parametrize("fixture", ["_frame", "_frame2", "_mixed", "_mixed2"]) - def test_where(self, request, cond, fixture): - df = request.getfixturevalue(fixture) - - def testit(): - c = np.empty(df.shape, dtype=np.bool_) - c.fill(cond) - result = expr.where(c, df.values, df.values + 1) - expected = np.where(c, df.values, df.values + 1) - tm.assert_numpy_array_equal(result, expected) - - with option_context("compute.use_numexpr", False): - testit() - - expr.set_numexpr_threads(1) - testit() - expr.set_numexpr_threads() - testit() - - @pytest.mark.parametrize( - "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")] - ) - def test_bool_ops_raise_on_arithmetic(self, op_str, opname): - df = DataFrame( - { - "a": np.random.default_rng(2).random(10) > 0.5, - "b": np.random.default_rng(2).random(10) > 0.5, - } - ) - - msg = f"operator '{opname}' not implemented for bool dtypes" - f = getattr(operator, opname) - err_msg = re.escape(msg) - - with pytest.raises(NotImplementedError, match=err_msg): - f(df, df) - - with pytest.raises(NotImplementedError, match=err_msg): - f(df.a, df.b) - - with pytest.raises(NotImplementedError, match=err_msg): - f(df.a, True) - - with pytest.raises(NotImplementedError, match=err_msg): - f(False, df.a) - - with pytest.raises(NotImplementedError, match=err_msg): - f(False, df) - - with pytest.raises(NotImplementedError, match=err_msg): - f(df, True) - - @pytest.mark.parametrize( - "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")] - ) - def test_bool_ops_warn_on_arithmetic(self, op_str, opname): - n = 10 - df = DataFrame( - { - "a": np.random.default_rng(2).random(n) > 0.5, - "b": np.random.default_rng(2).random(n) > 0.5, - } - ) - - subs = {"+": "|", "*": "&", "-": "^"} - sub_funcs = {"|": "or_", "&": "and_", "^": "xor"} - - f = getattr(operator, opname) - fe = getattr(operator, sub_funcs[subs[op_str]]) - - if op_str == "-": - # raises TypeError - return - - with tm.use_numexpr(True, min_elements=5): - with tm.assert_produces_warning(): - r = f(df, df) - e = fe(df, df) - tm.assert_frame_equal(r, e) - - with tm.assert_produces_warning(): - r = f(df.a, df.b) - e = fe(df.a, df.b) - tm.assert_series_equal(r, e) - - with tm.assert_produces_warning(): - r = f(df.a, True) - e = fe(df.a, True) - tm.assert_series_equal(r, e) - - with tm.assert_produces_warning(): - r = f(False, df.a) - e = fe(False, df.a) - tm.assert_series_equal(r, e) - - with tm.assert_produces_warning(): - r = f(False, df) - e = fe(False, df) - tm.assert_frame_equal(r, e) - - with tm.assert_produces_warning(): - r = f(df, True) - e = fe(df, True) - tm.assert_frame_equal(r, e) - - @pytest.mark.parametrize( - "test_input,expected", - [ - ( - DataFrame( - [[0, 1, 2, "aa"], [0, 1, 2, "aa"]], columns=["a", "b", "c", "dtype"] - ), - DataFrame([[False, False], [False, False]], columns=["a", "dtype"]), - ), - ( - DataFrame( - [[0, 3, 2, "aa"], [0, 4, 2, "aa"], [0, 1, 1, "bb"]], - columns=["a", "b", "c", "dtype"], - ), - DataFrame( - [[False, False], [False, False], [False, False]], - columns=["a", "dtype"], - ), - ), - ], - ) - def test_bool_ops_column_name_dtype(self, test_input, expected): - # GH 22383 - .ne fails if columns containing column name 'dtype' - result = test_input.loc[:, ["a", "dtype"]].ne(test_input.loc[:, ["a", "dtype"]]) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "arith", ("add", "sub", "mul", "mod", "truediv", "floordiv") - ) - @pytest.mark.parametrize("axis", (0, 1)) - def test_frame_series_axis(self, axis, arith, _frame): - # GH#26736 Dataframe.floordiv(Series, axis=1) fails - - df = _frame - if axis == 1: - other = df.iloc[0, :] - else: - other = df.iloc[:, 0] - - expr._MIN_ELEMENTS = 0 - - op_func = getattr(df, arith) - - with option_context("compute.use_numexpr", False): - expected = op_func(other, axis=axis) - - result = op_func(other, axis=axis) - tm.assert_frame_equal(expected, result) - - @pytest.mark.parametrize( - "op", - [ - "__mod__", - "__rmod__", - "__floordiv__", - "__rfloordiv__", - ], - ) - @pytest.mark.parametrize("box", [DataFrame, Series, Index]) - @pytest.mark.parametrize("scalar", [-5, 5]) - def test_python_semantics_with_numexpr_installed(self, op, box, scalar): - # https://github.com/pandas-dev/pandas/issues/36047 - expr._MIN_ELEMENTS = 0 - data = np.arange(-50, 50) - obj = box(data) - method = getattr(obj, op) - result = method(scalar) - - # compare result with numpy - with option_context("compute.use_numexpr", False): - expected = method(scalar) - - tm.assert_equal(result, expected) - - # compare result element-wise with Python - for i, elem in enumerate(data): - if box == DataFrame: - scalar_result = result.iloc[i, 0] - else: - scalar_result = result[i] - try: - expected = getattr(int(elem), op)(scalar) - except ZeroDivisionError: - pass - else: - assert scalar_result == expected diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/resolution/resolvelib/base.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/resolution/resolvelib/base.py deleted file mode 100644 index b206692a0a976d8336e3f5896eadf4765a33fb2c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/resolution/resolvelib/base.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import FrozenSet, Iterable, Optional, Tuple, Union - -from pip._vendor.packaging.specifiers import SpecifierSet -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.packaging.version import LegacyVersion, Version - -from pip._internal.models.link import Link, links_equivalent -from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.hashes import Hashes - -CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]] -CandidateVersion = Union[LegacyVersion, Version] - - -def format_name(project: str, extras: FrozenSet[str]) -> str: - if not extras: - return project - canonical_extras = sorted(canonicalize_name(e) for e in extras) - return "{}[{}]".format(project, ",".join(canonical_extras)) - - -class Constraint: - def __init__( - self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link] - ) -> None: - self.specifier = specifier - self.hashes = hashes - self.links = links - - @classmethod - def empty(cls) -> "Constraint": - return Constraint(SpecifierSet(), Hashes(), frozenset()) - - @classmethod - def from_ireq(cls, ireq: InstallRequirement) -> "Constraint": - links = frozenset([ireq.link]) if ireq.link else frozenset() - return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links) - - def __bool__(self) -> bool: - return bool(self.specifier) or bool(self.hashes) or bool(self.links) - - def __and__(self, other: InstallRequirement) -> "Constraint": - if not isinstance(other, InstallRequirement): - return NotImplemented - specifier = self.specifier & other.specifier - hashes = self.hashes & other.hashes(trust_internet=False) - links = self.links - if other.link: - links = links.union([other.link]) - return Constraint(specifier, hashes, links) - - def is_satisfied_by(self, candidate: "Candidate") -> bool: - # Reject if there are any mismatched URL constraints on this package. - if self.links and not all(_match_link(link, candidate) for link in self.links): - return False - # We can safely always allow prereleases here since PackageFinder - # already implements the prerelease logic, and would have filtered out - # prerelease candidates if the user does not expect them. - return self.specifier.contains(candidate.version, prereleases=True) - - -class Requirement: - @property - def project_name(self) -> NormalizedName: - """The "project name" of a requirement. - - This is different from ``name`` if this requirement contains extras, - in which case ``name`` would contain the ``[...]`` part, while this - refers to the name of the project. - """ - raise NotImplementedError("Subclass should override") - - @property - def name(self) -> str: - """The name identifying this requirement in the resolver. - - This is different from ``project_name`` if this requirement contains - extras, where ``project_name`` would not contain the ``[...]`` part. - """ - raise NotImplementedError("Subclass should override") - - def is_satisfied_by(self, candidate: "Candidate") -> bool: - return False - - def get_candidate_lookup(self) -> CandidateLookup: - raise NotImplementedError("Subclass should override") - - def format_for_error(self) -> str: - raise NotImplementedError("Subclass should override") - - -def _match_link(link: Link, candidate: "Candidate") -> bool: - if candidate.source_link: - return links_equivalent(link, candidate.source_link) - return False - - -class Candidate: - @property - def project_name(self) -> NormalizedName: - """The "project name" of the candidate. - - This is different from ``name`` if this candidate contains extras, - in which case ``name`` would contain the ``[...]`` part, while this - refers to the name of the project. - """ - raise NotImplementedError("Override in subclass") - - @property - def name(self) -> str: - """The name identifying this candidate in the resolver. - - This is different from ``project_name`` if this candidate contains - extras, where ``project_name`` would not contain the ``[...]`` part. - """ - raise NotImplementedError("Override in subclass") - - @property - def version(self) -> CandidateVersion: - raise NotImplementedError("Override in subclass") - - @property - def is_installed(self) -> bool: - raise NotImplementedError("Override in subclass") - - @property - def is_editable(self) -> bool: - raise NotImplementedError("Override in subclass") - - @property - def source_link(self) -> Optional[Link]: - raise NotImplementedError("Override in subclass") - - def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: - raise NotImplementedError("Override in subclass") - - def get_install_requirement(self) -> Optional[InstallRequirement]: - raise NotImplementedError("Override in subclass") - - def format_for_error(self) -> str: - raise NotImplementedError("Subclass should override") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/archetype.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/archetype.py deleted file mode 100644 index e8312d78e55b8256bc44664f7e453cfb911f6ac6..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/archetype.py +++ /dev/null @@ -1,319 +0,0 @@ -""" - pygments.lexers.archetype - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer for Archetype-related syntaxes, including: - - - ODIN syntax - - ADL syntax - - cADL sub-syntax of ADL - - For uses of this syntax, see the openEHR archetypes - - Contributed by Thomas Beale , - . - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, include, bygroups, using, default -from pygments.token import Text, Comment, Name, Literal, Number, String, \ - Punctuation, Keyword, Operator, Generic, Whitespace - -__all__ = ['OdinLexer', 'CadlLexer', 'AdlLexer'] - - -class AtomsLexer(RegexLexer): - """ - Lexer for Values used in ADL and ODIN. - - .. versionadded:: 2.1 - """ - - tokens = { - # ----- pseudo-states for inclusion ----- - 'whitespace': [ - (r'\n', Whitespace), - (r'\s+', Whitespace), - (r'([ \t]*)(--.*)$', bygroups(Whitespace, Comment)), - ], - 'archetype_id': [ - (r'([ \t]*)(([a-zA-Z]\w+(\.[a-zA-Z]\w+)*::)?[a-zA-Z]\w+(-[a-zA-Z]\w+){2}' - r'\.\w+[\w-]*\.v\d+(\.\d+){,2}((-[a-z]+)(\.\d+)?)?)', - bygroups(Whitespace, Name.Decorator)), - ], - 'date_constraints': [ - # ISO 8601-based date/time constraints - (r'[Xx?YyMmDdHhSs\d]{2,4}([:-][Xx?YyMmDdHhSs\d]{2}){2}', Literal.Date), - # ISO 8601-based duration constraints + optional trailing slash - (r'(P[YyMmWwDd]+(T[HhMmSs]+)?|PT[HhMmSs]+)/?', Literal.Date), - ], - 'ordered_values': [ - # ISO 8601 date with optional 'T' ligature - (r'\d{4}-\d{2}-\d{2}T?', Literal.Date), - # ISO 8601 time - (r'\d{2}:\d{2}:\d{2}(\.\d+)?([+-]\d{4}|Z)?', Literal.Date), - # ISO 8601 duration - (r'P((\d*(\.\d+)?[YyMmWwDd]){1,3}(T(\d*(\.\d+)?[HhMmSs]){,3})?|' - r'T(\d*(\.\d+)?[HhMmSs]){,3})', Literal.Date), - (r'[+-]?(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), - (r'[+-]?\d*\.\d+%?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[+-]?\d+%?', Number.Integer), - ], - 'values': [ - include('ordered_values'), - (r'([Tt]rue|[Ff]alse)', Literal), - (r'"', String, 'string'), - (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'[a-z][a-z0-9+.-]*:', Literal, 'uri'), - # term code - (r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)(\w[\w-]*)(\])', - bygroups(Punctuation, Name.Decorator, Punctuation, Name.Decorator, - Punctuation)), - (r'\|', Punctuation, 'interval'), - # list continuation - (r'\.\.\.', Punctuation), - ], - 'constraint_values': [ - (r'(\[)(\w[\w-]*(?:\([^)\n]+\))?)(::)', - bygroups(Punctuation, Name.Decorator, Punctuation), 'adl14_code_constraint'), - # ADL 1.4 ordinal constraint - (r'(\d*)(\|)(\[\w[\w-]*::\w[\w-]*\])((?:[,;])?)', - bygroups(Number, Punctuation, Name.Decorator, Punctuation)), - include('date_constraints'), - include('values'), - ], - - # ----- real states ----- - 'string': [ - ('"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|' - r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape), - # all other characters - (r'[^\\"]+', String), - # stray backslash - (r'\\', String), - ], - 'uri': [ - # effective URI terminators - (r'[,>\s]', Punctuation, '#pop'), - (r'[^>\s,]+', Literal), - ], - 'interval': [ - (r'\|', Punctuation, '#pop'), - include('ordered_values'), - (r'\.\.', Punctuation), - (r'[<>=] *', Punctuation), - # handle +/- - (r'\+/-', Punctuation), - (r'\s+', Whitespace), - ], - 'any_code': [ - include('archetype_id'), - # if it is a code - (r'[a-z_]\w*[0-9.]+(@[^\]]+)?', Name.Decorator), - # if it is tuple with attribute names - (r'[a-z_]\w*', Name.Class), - # if it is an integer, i.e. Xpath child index - (r'[0-9]+', Text), - (r'\|', Punctuation, 'code_rubric'), - (r'\]', Punctuation, '#pop'), - # handle use_archetype statement - (r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)), - ], - 'code_rubric': [ - (r'\|', Punctuation, '#pop'), - (r'[^|]+', String), - ], - 'adl14_code_constraint': [ - (r'\]', Punctuation, '#pop'), - (r'\|', Punctuation, 'code_rubric'), - (r'(\w[\w-]*)([;,]?)', bygroups(Name.Decorator, Punctuation)), - include('whitespace'), - ], - } - - -class OdinLexer(AtomsLexer): - """ - Lexer for ODIN syntax. - - .. versionadded:: 2.1 - """ - name = 'ODIN' - aliases = ['odin'] - filenames = ['*.odin'] - mimetypes = ['text/odin'] - - tokens = { - 'path': [ - (r'>', Punctuation, '#pop'), - # attribute name - (r'[a-z_]\w*', Name.Class), - (r'/', Punctuation), - (r'\[', Punctuation, 'key'), - (r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace), '#pop'), - (r'\s+', Whitespace, '#pop'), - ], - 'key': [ - include('values'), - (r'\]', Punctuation, '#pop'), - ], - 'type_cast': [ - (r'\)', Punctuation, '#pop'), - (r'[^)]+', Name.Class), - ], - 'root': [ - include('whitespace'), - (r'([Tt]rue|[Ff]alse)', Literal), - include('values'), - # x-ref path - (r'/', Punctuation, 'path'), - # x-ref path starting with key - (r'\[', Punctuation, 'key'), - # attribute name - (r'[a-z_]\w*', Name.Class), - (r'=', Operator), - (r'\(', Punctuation, 'type_cast'), - (r',', Punctuation), - (r'<', Punctuation), - (r'>', Punctuation), - (r';', Punctuation), - ], - } - - -class CadlLexer(AtomsLexer): - """ - Lexer for cADL syntax. - - .. versionadded:: 2.1 - """ - name = 'cADL' - aliases = ['cadl'] - filenames = ['*.cadl'] - - tokens = { - 'path': [ - # attribute name - (r'[a-z_]\w*', Name.Class), - (r'/', Punctuation), - (r'\[', Punctuation, 'any_code'), - (r'\s+', Punctuation, '#pop'), - ], - 'root': [ - include('whitespace'), - (r'(cardinality|existence|occurrences|group|include|exclude|' - r'allow_archetype|use_archetype|use_node)\W', Keyword.Type), - (r'(and|or|not|there_exists|xor|implies|for_all)\W', Keyword.Type), - (r'(after|before|closed)\W', Keyword.Type), - (r'(not)\W', Operator), - (r'(matches|is_in)\W', Operator), - # is_in / not is_in char - ('(\u2208|\u2209)', Operator), - # there_exists / not there_exists / for_all / and / or - ('(\u2203|\u2204|\u2200|\u2227|\u2228|\u22BB|\223C)', - Operator), - # regex in slot or as string constraint - (r'(\{)(\s*)(/[^}]+/)(\s*)(\})', - bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)), - # regex in slot or as string constraint - (r'(\{)(\s*)(\^[^}]+\^)(\s*)(\})', - bygroups(Punctuation, Whitespace, String.Regex, Whitespace, Punctuation)), - (r'/', Punctuation, 'path'), - # for cardinality etc - (r'(\{)((?:\d+\.\.)?(?:\d+|\*))' - r'((?:\s*;\s*(?:ordered|unordered|unique)){,2})(\})', - bygroups(Punctuation, Number, Number, Punctuation)), - # [{ is start of a tuple value - (r'\[\{', Punctuation), - (r'\}\]', Punctuation), - (r'\{', Punctuation), - (r'\}', Punctuation), - include('constraint_values'), - # type name - (r'[A-Z]\w+(<[A-Z]\w+([A-Za-z_<>]*)>)?', Name.Class), - # attribute name - (r'[a-z_]\w*', Name.Class), - (r'\[', Punctuation, 'any_code'), - (r'(~|//|\\\\|\+|-|/|\*|\^|!=|=|<=|>=|<|>]?)', Operator), - (r'\(', Punctuation), - (r'\)', Punctuation), - # for lists of values - (r',', Punctuation), - (r'"', String, 'string'), - # for assumed value - (r';', Punctuation), - ], - } - - -class AdlLexer(AtomsLexer): - """ - Lexer for ADL syntax. - - .. versionadded:: 2.1 - """ - - name = 'ADL' - aliases = ['adl'] - filenames = ['*.adl', '*.adls', '*.adlf', '*.adlx'] - - tokens = { - 'whitespace': [ - # blank line ends - (r'\s*\n', Whitespace), - # comment-only line - (r'^([ \t]*)(--.*)$', bygroups(Whitespace, Comment)), - ], - 'odin_section': [ - # repeating the following two rules from the root state enable multi-line - # strings that start in the first column to be dealt with - (r'^(language|description|ontology|terminology|annotations|' - r'component_terminologies|revision_history)([ \t]*\n)', - bygroups(Generic.Heading, Whitespace)), - (r'^(definition)([ \t]*\n)', bygroups(Generic.Heading, Whitespace), 'cadl_section'), - (r'^([ \t]*|[ \t]+.*)\n', using(OdinLexer)), - (r'^([^"]*")(>[ \t]*\n)', bygroups(String, Punctuation)), - # template overlay delimiter - (r'^----------*\n', Text, '#pop'), - (r'^.*\n', String), - default('#pop'), - ], - 'cadl_section': [ - (r'^([ \t]*|[ \t]+.*)\n', using(CadlLexer)), - default('#pop'), - ], - 'rules_section': [ - (r'^[ \t]+.*\n', using(CadlLexer)), - default('#pop'), - ], - 'metadata': [ - (r'\)', Punctuation, '#pop'), - (r';', Punctuation), - (r'([Tt]rue|[Ff]alse)', Literal), - # numbers and version ids - (r'\d+(\.\d+)*', Literal), - # Guids - (r'(\d|[a-fA-F])+(-(\d|[a-fA-F])+){3,}', Literal), - (r'\w+', Name.Class), - (r'"', String, 'string'), - (r'=', Operator), - (r'[ \t]+', Whitespace), - default('#pop'), - ], - 'root': [ - (r'^(archetype|template_overlay|operational_template|template|' - r'speciali[sz]e)', Generic.Heading), - (r'^(language|description|ontology|terminology|annotations|' - r'component_terminologies|revision_history)[ \t]*\n', - Generic.Heading, 'odin_section'), - (r'^(definition)[ \t]*\n', Generic.Heading, 'cadl_section'), - (r'^(rules)[ \t]*\n', Generic.Heading, 'rules_section'), - include('archetype_id'), - (r'([ \t]*)(\()', bygroups(Whitespace, Punctuation), 'metadata'), - include('whitespace'), - ], - } diff --git a/spaces/qingxu98/gpt-academic/docs/waifu_plugin/jquery-ui.min.js b/spaces/qingxu98/gpt-academic/docs/waifu_plugin/jquery-ui.min.js deleted file mode 100644 index 25398a167415050ae8bfb0bfebac6aa3ab790909..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/docs/waifu_plugin/jquery-ui.min.js +++ /dev/null @@ -1,13 +0,0 @@ -/*! jQuery UI - v1.12.1 - 2016-09-14 -* http://jqueryui.com -* Includes: widget.js, position.js, data.js, disable-selection.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js, focusable.js, form-reset-mixin.js, jquery-1-7.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/draggable.js, widgets/droppable.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/resizable.js, widgets/selectable.js, widgets/selectmenu.js, widgets/slider.js, widgets/sortable.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js -* Copyright jQuery Foundation and other contributors; Licensed MIT */ - -(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)})(function(t){function e(t){for(var e=t.css("visibility");"inherit"===e;)t=t.parent(),e=t.css("visibility");return"hidden"!==e}function i(t){for(var e,i;t.length&&t[0]!==document;){if(e=t.css("position"),("absolute"===e||"relative"===e||"fixed"===e)&&(i=parseInt(t.css("zIndex"),10),!isNaN(i)&&0!==i))return i;t=t.parent()}return 0}function s(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},t.extend(this._defaults,this.regional[""]),this.regional.en=t.extend(!0,{},this.regional[""]),this.regional["en-US"]=t.extend(!0,{},this.regional.en),this.dpDiv=n(t("
"))}function n(e){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return e.on("mouseout",i,function(){t(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).removeClass("ui-datepicker-next-hover")}).on("mouseover",i,o)}function o(){t.datepicker._isDisabledDatepicker(m.inline?m.dpDiv.parent()[0]:m.input[0])||(t(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),t(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).addClass("ui-datepicker-next-hover"))}function a(e,i){t.extend(e,i);for(var s in i)null==i[s]&&(e[s]=i[s]);return e}function r(t){return function(){var e=this.element.val();t.apply(this,arguments),this._refresh(),e!==this.element.val()&&this._trigger("change")}}t.ui=t.ui||{},t.ui.version="1.12.1";var h=0,l=Array.prototype.slice;t.cleanData=function(e){return function(i){var s,n,o;for(o=0;null!=(n=i[o]);o++)try{s=t._data(n,"events"),s&&s.remove&&t(n).triggerHandler("remove")}catch(a){}e(i)}}(t.cleanData),t.widget=function(e,i,s){var n,o,a,r={},h=e.split(".")[0];e=e.split(".")[1];var l=h+"-"+e;return s||(s=i,i=t.Widget),t.isArray(s)&&(s=t.extend.apply(null,[{}].concat(s))),t.expr[":"][l.toLowerCase()]=function(e){return!!t.data(e,l)},t[h]=t[h]||{},n=t[h][e],o=t[h][e]=function(t,e){return this._createWidget?(arguments.length&&this._createWidget(t,e),void 0):new o(t,e)},t.extend(o,n,{version:s.version,_proto:t.extend({},s),_childConstructors:[]}),a=new i,a.options=t.widget.extend({},a.options),t.each(s,function(e,s){return t.isFunction(s)?(r[e]=function(){function t(){return i.prototype[e].apply(this,arguments)}function n(t){return i.prototype[e].apply(this,t)}return function(){var e,i=this._super,o=this._superApply;return this._super=t,this._superApply=n,e=s.apply(this,arguments),this._super=i,this._superApply=o,e}}(),void 0):(r[e]=s,void 0)}),o.prototype=t.widget.extend(a,{widgetEventPrefix:n?a.widgetEventPrefix||e:e},r,{constructor:o,namespace:h,widgetName:e,widgetFullName:l}),n?(t.each(n._childConstructors,function(e,i){var s=i.prototype;t.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete n._childConstructors):i._childConstructors.push(o),t.widget.bridge(e,o),o},t.widget.extend=function(e){for(var i,s,n=l.call(arguments,1),o=0,a=n.length;a>o;o++)for(i in n[o])s=n[o][i],n[o].hasOwnProperty(i)&&void 0!==s&&(e[i]=t.isPlainObject(s)?t.isPlainObject(e[i])?t.widget.extend({},e[i],s):t.widget.extend({},s):s);return e},t.widget.bridge=function(e,i){var s=i.prototype.widgetFullName||e;t.fn[e]=function(n){var o="string"==typeof n,a=l.call(arguments,1),r=this;return o?this.length||"instance"!==n?this.each(function(){var i,o=t.data(this,s);return"instance"===n?(r=o,!1):o?t.isFunction(o[n])&&"_"!==n.charAt(0)?(i=o[n].apply(o,a),i!==o&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+n+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+n+"'")}):r=void 0:(a.length&&(n=t.widget.extend.apply(null,[n].concat(a))),this.each(function(){var e=t.data(this,s);e?(e.option(n||{}),e._init&&e._init()):t.data(this,s,new i(n,this))})),r}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
",options:{classes:{},disabled:!1,create:null},_createWidget:function(e,i){i=t(i||this.defaultElement||this)[0],this.element=t(i),this.uuid=h++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},i!==this&&(t.data(i,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===i&&this.destroy()}}),this.document=t(i.style?i.ownerDocument:i.document||i),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),e),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+o.eventNamespace,c=h[2];c?n.on(l,c,r):i.on(l,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,h=/top|center|bottom/,l=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t("
"),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.widthi?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};l>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),h.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,h=n-r,l=r+e.collisionWidth-a-n;e.collisionWidth>a?h>0&&0>=l?(i=t.left+h+e.collisionWidth-a-n,t.left+=h-i):t.left=l>0&&0>=h?n:h>l?n+a-e.collisionWidth:n:h>0?t.left+=h:l>0?t.left-=l:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,h=n-r,l=r+e.collisionHeight-a-n;e.collisionHeight>a?h>0&&0>=l?(i=t.top+h+e.collisionHeight-a-n,t.top+=h-i):t.top=l>0&&0>=h?n:h>l?n+a-e.collisionHeight:n:h>0?t.top+=h:l>0?t.top-=l:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=t.left-e.collisionPosition.marginLeft,c=l-h,u=l+e.collisionWidth-r-h,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-h,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=t.top-e.collisionPosition.marginTop,c=l-h,u=l+e.collisionHeight-r-h,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,g=-2*e.offset[1];0>c?(s=t.top+p+f+g+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+g)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+g-h,(i>0||u>a(i))&&(t.top+=p+f+g))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.extend(t.expr[":"],{data:t.expr.createPseudo?t.expr.createPseudo(function(e){return function(i){return!!t.data(i,e)}}):function(e,i,s){return!!t.data(e,s[3])}}),t.fn.extend({disableSelection:function(){var t="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.on(t+".ui-disableSelection",function(t){t.preventDefault()})}}(),enableSelection:function(){return this.off(".ui-disableSelection")}});var c="ui-effects-",u="ui-effects-style",d="ui-effects-animated",p=t;t.effects={effect:{}},function(t,e){function i(t,e,i){var s=u[e.type]||{};return null==t?i||!e.def?null:e.def:(t=s.floor?~~t:parseFloat(t),isNaN(t)?e.def:s.mod?(t+s.mod)%s.mod:0>t?0:t>s.max?s.max:t)}function s(i){var s=l(),n=s._rgba=[];return i=i.toLowerCase(),f(h,function(t,o){var a,r=o.re.exec(i),h=r&&o.parse(r),l=o.space||"rgba";return h?(a=s[l](h),s[c[l].cache]=a[c[l].cache],n=s._rgba=a._rgba,!1):e}),n.length?("0,0,0,0"===n.join()&&t.extend(n,o.transparent),s):o[i]}function n(t,e,i){return i=(i+1)%1,1>6*i?t+6*(e-t)*i:1>2*i?e:2>3*i?t+6*(e-t)*(2/3-i):t}var o,a="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",r=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[t[1],t[2],t[3],t[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[2.55*t[1],2.55*t[2],2.55*t[3],t[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(t){return[parseInt(t[1],16),parseInt(t[2],16),parseInt(t[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(t){return[parseInt(t[1]+t[1],16),parseInt(t[2]+t[2],16),parseInt(t[3]+t[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(t){return[t[1],t[2]/100,t[3]/100,t[4]]}}],l=t.Color=function(e,i,s,n){return new t.Color.fn.parse(e,i,s,n)},c={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},u={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},d=l.support={},p=t("

")[0],f=t.each;p.style.cssText="background-color:rgba(1,1,1,.5)",d.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(c,function(t,e){e.cache="_"+t,e.props.alpha={idx:3,type:"percent",def:1}}),l.fn=t.extend(l.prototype,{parse:function(n,a,r,h){if(n===e)return this._rgba=[null,null,null,null],this;(n.jquery||n.nodeType)&&(n=t(n).css(a),a=e);var u=this,d=t.type(n),p=this._rgba=[];return a!==e&&(n=[n,a,r,h],d="array"),"string"===d?this.parse(s(n)||o._default):"array"===d?(f(c.rgba.props,function(t,e){p[e.idx]=i(n[e.idx],e)}),this):"object"===d?(n instanceof l?f(c,function(t,e){n[e.cache]&&(u[e.cache]=n[e.cache].slice())}):f(c,function(e,s){var o=s.cache;f(s.props,function(t,e){if(!u[o]&&s.to){if("alpha"===t||null==n[t])return;u[o]=s.to(u._rgba)}u[o][e.idx]=i(n[t],e,!0)}),u[o]&&0>t.inArray(null,u[o].slice(0,3))&&(u[o][3]=1,s.from&&(u._rgba=s.from(u[o])))}),this):e},is:function(t){var i=l(t),s=!0,n=this;return f(c,function(t,o){var a,r=i[o.cache];return r&&(a=n[o.cache]||o.to&&o.to(n._rgba)||[],f(o.props,function(t,i){return null!=r[i.idx]?s=r[i.idx]===a[i.idx]:e})),s}),s},_space:function(){var t=[],e=this;return f(c,function(i,s){e[s.cache]&&t.push(i)}),t.pop()},transition:function(t,e){var s=l(t),n=s._space(),o=c[n],a=0===this.alpha()?l("transparent"):this,r=a[o.cache]||o.to(a._rgba),h=r.slice();return s=s[o.cache],f(o.props,function(t,n){var o=n.idx,a=r[o],l=s[o],c=u[n.type]||{};null!==l&&(null===a?h[o]=l:(c.mod&&(l-a>c.mod/2?a+=c.mod:a-l>c.mod/2&&(a-=c.mod)),h[o]=i((l-a)*e+a,n)))}),this[n](h)},blend:function(e){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),n=l(e)._rgba;return l(t.map(i,function(t,e){return(1-s)*n[e]+s*t}))},toRgbaString:function(){var e="rgba(",i=t.map(this._rgba,function(t,e){return null==t?e>2?1:0:t});return 1===i[3]&&(i.pop(),e="rgb("),e+i.join()+")"},toHslaString:function(){var e="hsla(",i=t.map(this.hsla(),function(t,e){return null==t&&(t=e>2?1:0),e&&3>e&&(t=Math.round(100*t)+"%"),t});return 1===i[3]&&(i.pop(),e="hsl("),e+i.join()+")"},toHexString:function(e){var i=this._rgba.slice(),s=i.pop();return e&&i.push(~~(255*s)),"#"+t.map(i,function(t){return t=(t||0).toString(16),1===t.length?"0"+t:t}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,c.hsla.to=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e,i,s=t[0]/255,n=t[1]/255,o=t[2]/255,a=t[3],r=Math.max(s,n,o),h=Math.min(s,n,o),l=r-h,c=r+h,u=.5*c;return e=h===r?0:s===r?60*(n-o)/l+360:n===r?60*(o-s)/l+120:60*(s-n)/l+240,i=0===l?0:.5>=u?l/c:l/(2-c),[Math.round(e)%360,i,u,null==a?1:a]},c.hsla.from=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e=t[0]/360,i=t[1],s=t[2],o=t[3],a=.5>=s?s*(1+i):s+i-s*i,r=2*s-a;return[Math.round(255*n(r,a,e+1/3)),Math.round(255*n(r,a,e)),Math.round(255*n(r,a,e-1/3)),o]},f(c,function(s,n){var o=n.props,a=n.cache,h=n.to,c=n.from;l.fn[s]=function(s){if(h&&!this[a]&&(this[a]=h(this._rgba)),s===e)return this[a].slice();var n,r=t.type(s),u="array"===r||"object"===r?s:arguments,d=this[a].slice();return f(o,function(t,e){var s=u["object"===r?t:e.idx];null==s&&(s=d[e.idx]),d[e.idx]=i(s,e)}),c?(n=l(c(d)),n[a]=d,n):l(d)},f(o,function(e,i){l.fn[e]||(l.fn[e]=function(n){var o,a=t.type(n),h="alpha"===e?this._hsla?"hsla":"rgba":s,l=this[h](),c=l[i.idx];return"undefined"===a?c:("function"===a&&(n=n.call(this,c),a=t.type(n)),null==n&&i.empty?this:("string"===a&&(o=r.exec(n),o&&(n=c+parseFloat(o[2])*("+"===o[1]?1:-1))),l[i.idx]=n,this[h](l)))})})}),l.hook=function(e){var i=e.split(" ");f(i,function(e,i){t.cssHooks[i]={set:function(e,n){var o,a,r="";if("transparent"!==n&&("string"!==t.type(n)||(o=s(n)))){if(n=l(o||n),!d.rgba&&1!==n._rgba[3]){for(a="backgroundColor"===i?e.parentNode:e;(""===r||"transparent"===r)&&a&&a.style;)try{r=t.css(a,"backgroundColor"),a=a.parentNode}catch(h){}n=n.blend(r&&"transparent"!==r?r:"_default")}n=n.toRgbaString()}try{e.style[i]=n}catch(h){}}},t.fx.step[i]=function(e){e.colorInit||(e.start=l(e.elem,i),e.end=l(e.end),e.colorInit=!0),t.cssHooks[i].set(e.elem,e.start.transition(e.end,e.pos))}})},l.hook(a),t.cssHooks.borderColor={expand:function(t){var e={};return f(["Top","Right","Bottom","Left"],function(i,s){e["border"+s+"Color"]=t}),e}},o=t.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(p),function(){function e(e){var i,s,n=e.ownerDocument.defaultView?e.ownerDocument.defaultView.getComputedStyle(e,null):e.currentStyle,o={};if(n&&n.length&&n[0]&&n[n[0]])for(s=n.length;s--;)i=n[s],"string"==typeof n[i]&&(o[t.camelCase(i)]=n[i]);else for(i in n)"string"==typeof n[i]&&(o[i]=n[i]);return o}function i(e,i){var s,o,a={};for(s in i)o=i[s],e[s]!==o&&(n[s]||(t.fx.step[s]||!isNaN(parseFloat(o)))&&(a[s]=o));return a}var s=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};t.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(e,i){t.fx.step[i]=function(t){("none"!==t.end&&!t.setAttr||1===t.pos&&!t.setAttr)&&(p.style(t.elem,i,t.end),t.setAttr=!0)}}),t.fn.addBack||(t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.effects.animateClass=function(n,o,a,r){var h=t.speed(o,a,r);return this.queue(function(){var o,a=t(this),r=a.attr("class")||"",l=h.children?a.find("*").addBack():a;l=l.map(function(){var i=t(this);return{el:i,start:e(this)}}),o=function(){t.each(s,function(t,e){n[e]&&a[e+"Class"](n[e])})},o(),l=l.map(function(){return this.end=e(this.el[0]),this.diff=i(this.start,this.end),this}),a.attr("class",r),l=l.map(function(){var e=this,i=t.Deferred(),s=t.extend({},h,{queue:!1,complete:function(){i.resolve(e)}});return this.el.animate(this.diff,s),i.promise()}),t.when.apply(t,l.get()).done(function(){o(),t.each(arguments,function(){var e=this.el;t.each(this.diff,function(t){e.css(t,"")})}),h.complete.call(a[0])})})},t.fn.extend({addClass:function(e){return function(i,s,n,o){return s?t.effects.animateClass.call(this,{add:i},s,n,o):e.apply(this,arguments)}}(t.fn.addClass),removeClass:function(e){return function(i,s,n,o){return arguments.length>1?t.effects.animateClass.call(this,{remove:i},s,n,o):e.apply(this,arguments)}}(t.fn.removeClass),toggleClass:function(e){return function(i,s,n,o,a){return"boolean"==typeof s||void 0===s?n?t.effects.animateClass.call(this,s?{add:i}:{remove:i},n,o,a):e.apply(this,arguments):t.effects.animateClass.call(this,{toggle:i},s,n,o)}}(t.fn.toggleClass),switchClass:function(e,i,s,n,o){return t.effects.animateClass.call(this,{add:i,remove:e},s,n,o)}})}(),function(){function e(e,i,s,n){return t.isPlainObject(e)&&(i=e,e=e.effect),e={effect:e},null==i&&(i={}),t.isFunction(i)&&(n=i,s=null,i={}),("number"==typeof i||t.fx.speeds[i])&&(n=s,s=i,i={}),t.isFunction(s)&&(n=s,s=null),i&&t.extend(e,i),s=s||i.duration,e.duration=t.fx.off?0:"number"==typeof s?s:s in t.fx.speeds?t.fx.speeds[s]:t.fx.speeds._default,e.complete=n||i.complete,e}function i(e){return!e||"number"==typeof e||t.fx.speeds[e]?!0:"string"!=typeof e||t.effects.effect[e]?t.isFunction(e)?!0:"object"!=typeof e||e.effect?!1:!0:!0}function s(t,e){var i=e.outerWidth(),s=e.outerHeight(),n=/^rect\((-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto)\)$/,o=n.exec(t)||["",0,i,s,0];return{top:parseFloat(o[1])||0,right:"auto"===o[2]?i:parseFloat(o[2]),bottom:"auto"===o[3]?s:parseFloat(o[3]),left:parseFloat(o[4])||0}}t.expr&&t.expr.filters&&t.expr.filters.animated&&(t.expr.filters.animated=function(e){return function(i){return!!t(i).data(d)||e(i)}}(t.expr.filters.animated)),t.uiBackCompat!==!1&&t.extend(t.effects,{save:function(t,e){for(var i=0,s=e.length;s>i;i++)null!==e[i]&&t.data(c+e[i],t[0].style[e[i]])},restore:function(t,e){for(var i,s=0,n=e.length;n>s;s++)null!==e[s]&&(i=t.data(c+e[s]),t.css(e[s],i))},setMode:function(t,e){return"toggle"===e&&(e=t.is(":hidden")?"show":"hide"),e},createWrapper:function(e){if(e.parent().is(".ui-effects-wrapper"))return e.parent();var i={width:e.outerWidth(!0),height:e.outerHeight(!0),"float":e.css("float")},s=t("

").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),n={width:e.width(),height:e.height()},o=document.activeElement;try{o.id}catch(a){o=document.body}return e.wrap(s),(e[0]===o||t.contains(e[0],o))&&t(o).trigger("focus"),s=e.parent(),"static"===e.css("position")?(s.css({position:"relative"}),e.css({position:"relative"})):(t.extend(i,{position:e.css("position"),zIndex:e.css("z-index")}),t.each(["top","left","bottom","right"],function(t,s){i[s]=e.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),e.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),e.css(n),s.css(i).show()},removeWrapper:function(e){var i=document.activeElement;return e.parent().is(".ui-effects-wrapper")&&(e.parent().replaceWith(e),(e[0]===i||t.contains(e[0],i))&&t(i).trigger("focus")),e}}),t.extend(t.effects,{version:"1.12.1",define:function(e,i,s){return s||(s=i,i="effect"),t.effects.effect[e]=s,t.effects.effect[e].mode=i,s},scaledDimensions:function(t,e,i){if(0===e)return{height:0,width:0,outerHeight:0,outerWidth:0};var s="horizontal"!==i?(e||100)/100:1,n="vertical"!==i?(e||100)/100:1;return{height:t.height()*n,width:t.width()*s,outerHeight:t.outerHeight()*n,outerWidth:t.outerWidth()*s}},clipToBox:function(t){return{width:t.clip.right-t.clip.left,height:t.clip.bottom-t.clip.top,left:t.clip.left,top:t.clip.top}},unshift:function(t,e,i){var s=t.queue();e>1&&s.splice.apply(s,[1,0].concat(s.splice(e,i))),t.dequeue()},saveStyle:function(t){t.data(u,t[0].style.cssText)},restoreStyle:function(t){t[0].style.cssText=t.data(u)||"",t.removeData(u)},mode:function(t,e){var i=t.is(":hidden");return"toggle"===e&&(e=i?"show":"hide"),(i?"hide"===e:"show"===e)&&(e="none"),e},getBaseline:function(t,e){var i,s;switch(t[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=t[0]/e.height}switch(t[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=t[1]/e.width}return{x:s,y:i}},createPlaceholder:function(e){var i,s=e.css("position"),n=e.position();return e.css({marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()),/^(static|relative)/.test(s)&&(s="absolute",i=t("<"+e[0].nodeName+">").insertAfter(e).css({display:/^(inline|ruby)/.test(e.css("display"))?"inline-block":"block",visibility:"hidden",marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight"),"float":e.css("float")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).addClass("ui-effects-placeholder"),e.data(c+"placeholder",i)),e.css({position:s,left:n.left,top:n.top}),i},removePlaceholder:function(t){var e=c+"placeholder",i=t.data(e);i&&(i.remove(),t.removeData(e))},cleanUp:function(e){t.effects.restoreStyle(e),t.effects.removePlaceholder(e)},setTransition:function(e,i,s,n){return n=n||{},t.each(i,function(t,i){var o=e.cssUnit(i);o[0]>0&&(n[i]=o[0]*s+o[1])}),n}}),t.fn.extend({effect:function(){function i(e){function i(){r.removeData(d),t.effects.cleanUp(r),"hide"===s.mode&&r.hide(),a()}function a(){t.isFunction(h)&&h.call(r[0]),t.isFunction(e)&&e()}var r=t(this);s.mode=c.shift(),t.uiBackCompat===!1||o?"none"===s.mode?(r[l](),a()):n.call(r[0],s,i):(r.is(":hidden")?"hide"===l:"show"===l)?(r[l](),a()):n.call(r[0],s,a)}var s=e.apply(this,arguments),n=t.effects.effect[s.effect],o=n.mode,a=s.queue,r=a||"fx",h=s.complete,l=s.mode,c=[],u=function(e){var i=t(this),s=t.effects.mode(i,l)||o;i.data(d,!0),c.push(s),o&&("show"===s||s===o&&"hide"===s)&&i.show(),o&&"none"===s||t.effects.saveStyle(i),t.isFunction(e)&&e()};return t.fx.off||!n?l?this[l](s.duration,h):this.each(function(){h&&h.call(this)}):a===!1?this.each(u).each(i):this.queue(r,u).queue(r,i)},show:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="show",this.effect.call(this,n) -}}(t.fn.show),hide:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="hide",this.effect.call(this,n)}}(t.fn.hide),toggle:function(t){return function(s){if(i(s)||"boolean"==typeof s)return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)}}(t.fn.toggle),cssUnit:function(e){var i=this.css(e),s=[];return t.each(["em","px","%","pt"],function(t,e){i.indexOf(e)>0&&(s=[parseFloat(i),e])}),s},cssClip:function(t){return t?this.css("clip","rect("+t.top+"px "+t.right+"px "+t.bottom+"px "+t.left+"px)"):s(this.css("clip"),this)},transfer:function(e,i){var s=t(this),n=t(e.to),o="fixed"===n.css("position"),a=t("body"),r=o?a.scrollTop():0,h=o?a.scrollLeft():0,l=n.offset(),c={top:l.top-r,left:l.left-h,height:n.innerHeight(),width:n.innerWidth()},u=s.offset(),d=t("
").appendTo("body").addClass(e.className).css({top:u.top-r,left:u.left-h,height:s.innerHeight(),width:s.innerWidth(),position:o?"fixed":"absolute"}).animate(c,e.duration,e.easing,function(){d.remove(),t.isFunction(i)&&i()})}}),t.fx.step.clip=function(e){e.clipInit||(e.start=t(e.elem).cssClip(),"string"==typeof e.end&&(e.end=s(e.end,e.elem)),e.clipInit=!0),t(e.elem).cssClip({top:e.pos*(e.end.top-e.start.top)+e.start.top,right:e.pos*(e.end.right-e.start.right)+e.start.right,bottom:e.pos*(e.end.bottom-e.start.bottom)+e.start.bottom,left:e.pos*(e.end.left-e.start.left)+e.start.left})}}(),function(){var e={};t.each(["Quad","Cubic","Quart","Quint","Expo"],function(t,i){e[i]=function(e){return Math.pow(e,t+2)}}),t.extend(e,{Sine:function(t){return 1-Math.cos(t*Math.PI/2)},Circ:function(t){return 1-Math.sqrt(1-t*t)},Elastic:function(t){return 0===t||1===t?t:-Math.pow(2,8*(t-1))*Math.sin((80*(t-1)-7.5)*Math.PI/15)},Back:function(t){return t*t*(3*t-2)},Bounce:function(t){for(var e,i=4;((e=Math.pow(2,--i))-1)/11>t;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*e-2)/22-t,2)}}),t.each(e,function(e,i){t.easing["easeIn"+e]=i,t.easing["easeOut"+e]=function(t){return 1-i(1-t)},t.easing["easeInOut"+e]=function(t){return.5>t?i(2*t)/2:1-i(-2*t+2)/2}})}();var f=t.effects;t.effects.define("blind","hide",function(e,i){var s={up:["bottom","top"],vertical:["bottom","top"],down:["top","bottom"],left:["right","left"],horizontal:["right","left"],right:["left","right"]},n=t(this),o=e.direction||"up",a=n.cssClip(),r={clip:t.extend({},a)},h=t.effects.createPlaceholder(n);r.clip[s[o][0]]=r.clip[s[o][1]],"show"===e.mode&&(n.cssClip(r.clip),h&&h.css(t.effects.clipToBox(r)),r.clip=a),h&&h.animate(t.effects.clipToBox(r),e.duration,e.easing),n.animate(r,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("bounce",function(e,i){var s,n,o,a=t(this),r=e.mode,h="hide"===r,l="show"===r,c=e.direction||"up",u=e.distance,d=e.times||5,p=2*d+(l||h?1:0),f=e.duration/p,g=e.easing,m="up"===c||"down"===c?"top":"left",_="up"===c||"left"===c,v=0,b=a.queue().length;for(t.effects.createPlaceholder(a),o=a.css(m),u||(u=a["top"===m?"outerHeight":"outerWidth"]()/3),l&&(n={opacity:1},n[m]=o,a.css("opacity",0).css(m,_?2*-u:2*u).animate(n,f,g)),h&&(u/=Math.pow(2,d-1)),n={},n[m]=o;d>v;v++)s={},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g).animate(n,f,g),u=h?2*u:u/2;h&&(s={opacity:0},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g)),a.queue(i),t.effects.unshift(a,b,p+1)}),t.effects.define("clip","hide",function(e,i){var s,n={},o=t(this),a=e.direction||"vertical",r="both"===a,h=r||"horizontal"===a,l=r||"vertical"===a;s=o.cssClip(),n.clip={top:l?(s.bottom-s.top)/2:s.top,right:h?(s.right-s.left)/2:s.right,bottom:l?(s.bottom-s.top)/2:s.bottom,left:h?(s.right-s.left)/2:s.left},t.effects.createPlaceholder(o),"show"===e.mode&&(o.cssClip(n.clip),n.clip=s),o.animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("drop","hide",function(e,i){var s,n=t(this),o=e.mode,a="show"===o,r=e.direction||"left",h="up"===r||"down"===r?"top":"left",l="up"===r||"left"===r?"-=":"+=",c="+="===l?"-=":"+=",u={opacity:0};t.effects.createPlaceholder(n),s=e.distance||n["top"===h?"outerHeight":"outerWidth"](!0)/2,u[h]=l+s,a&&(n.css(u),u[h]=c+s,u.opacity=1),n.animate(u,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("explode","hide",function(e,i){function s(){b.push(this),b.length===u*d&&n()}function n(){p.css({visibility:"visible"}),t(b).remove(),i()}var o,a,r,h,l,c,u=e.pieces?Math.round(Math.sqrt(e.pieces)):3,d=u,p=t(this),f=e.mode,g="show"===f,m=p.show().css("visibility","hidden").offset(),_=Math.ceil(p.outerWidth()/d),v=Math.ceil(p.outerHeight()/u),b=[];for(o=0;u>o;o++)for(h=m.top+o*v,c=o-(u-1)/2,a=0;d>a;a++)r=m.left+a*_,l=a-(d-1)/2,p.clone().appendTo("body").wrap("
").css({position:"absolute",visibility:"visible",left:-a*_,top:-o*v}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:_,height:v,left:r+(g?l*_:0),top:h+(g?c*v:0),opacity:g?0:1}).animate({left:r+(g?0:l*_),top:h+(g?0:c*v),opacity:g?1:0},e.duration||500,e.easing,s)}),t.effects.define("fade","toggle",function(e,i){var s="show"===e.mode;t(this).css("opacity",s?0:1).animate({opacity:s?1:0},{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("fold","hide",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=e.size||15,h=/([0-9]+)%/.exec(r),l=!!e.horizFirst,c=l?["right","bottom"]:["bottom","right"],u=e.duration/2,d=t.effects.createPlaceholder(s),p=s.cssClip(),f={clip:t.extend({},p)},g={clip:t.extend({},p)},m=[p[c[0]],p[c[1]]],_=s.queue().length;h&&(r=parseInt(h[1],10)/100*m[a?0:1]),f.clip[c[0]]=r,g.clip[c[0]]=r,g.clip[c[1]]=0,o&&(s.cssClip(g.clip),d&&d.css(t.effects.clipToBox(g)),g.clip=p),s.queue(function(i){d&&d.animate(t.effects.clipToBox(f),u,e.easing).animate(t.effects.clipToBox(g),u,e.easing),i()}).animate(f,u,e.easing).animate(g,u,e.easing).queue(i),t.effects.unshift(s,_,4)}),t.effects.define("highlight","show",function(e,i){var s=t(this),n={backgroundColor:s.css("backgroundColor")};"hide"===e.mode&&(n.opacity=0),t.effects.saveStyle(s),s.css({backgroundImage:"none",backgroundColor:e.color||"#ffff99"}).animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("size",function(e,i){var s,n,o,a=t(this),r=["fontSize"],h=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],l=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],c=e.mode,u="effect"!==c,d=e.scale||"both",p=e.origin||["middle","center"],f=a.css("position"),g=a.position(),m=t.effects.scaledDimensions(a),_=e.from||m,v=e.to||t.effects.scaledDimensions(a,0);t.effects.createPlaceholder(a),"show"===c&&(o=_,_=v,v=o),n={from:{y:_.height/m.height,x:_.width/m.width},to:{y:v.height/m.height,x:v.width/m.width}},("box"===d||"both"===d)&&(n.from.y!==n.to.y&&(_=t.effects.setTransition(a,h,n.from.y,_),v=t.effects.setTransition(a,h,n.to.y,v)),n.from.x!==n.to.x&&(_=t.effects.setTransition(a,l,n.from.x,_),v=t.effects.setTransition(a,l,n.to.x,v))),("content"===d||"both"===d)&&n.from.y!==n.to.y&&(_=t.effects.setTransition(a,r,n.from.y,_),v=t.effects.setTransition(a,r,n.to.y,v)),p&&(s=t.effects.getBaseline(p,m),_.top=(m.outerHeight-_.outerHeight)*s.y+g.top,_.left=(m.outerWidth-_.outerWidth)*s.x+g.left,v.top=(m.outerHeight-v.outerHeight)*s.y+g.top,v.left=(m.outerWidth-v.outerWidth)*s.x+g.left),a.css(_),("content"===d||"both"===d)&&(h=h.concat(["marginTop","marginBottom"]).concat(r),l=l.concat(["marginLeft","marginRight"]),a.find("*[width]").each(function(){var i=t(this),s=t.effects.scaledDimensions(i),o={height:s.height*n.from.y,width:s.width*n.from.x,outerHeight:s.outerHeight*n.from.y,outerWidth:s.outerWidth*n.from.x},a={height:s.height*n.to.y,width:s.width*n.to.x,outerHeight:s.height*n.to.y,outerWidth:s.width*n.to.x};n.from.y!==n.to.y&&(o=t.effects.setTransition(i,h,n.from.y,o),a=t.effects.setTransition(i,h,n.to.y,a)),n.from.x!==n.to.x&&(o=t.effects.setTransition(i,l,n.from.x,o),a=t.effects.setTransition(i,l,n.to.x,a)),u&&t.effects.saveStyle(i),i.css(o),i.animate(a,e.duration,e.easing,function(){u&&t.effects.restoreStyle(i)})})),a.animate(v,{queue:!1,duration:e.duration,easing:e.easing,complete:function(){var e=a.offset();0===v.opacity&&a.css("opacity",_.opacity),u||(a.css("position","static"===f?"relative":f).offset(e),t.effects.saveStyle(a)),i()}})}),t.effects.define("scale",function(e,i){var s=t(this),n=e.mode,o=parseInt(e.percent,10)||(0===parseInt(e.percent,10)?0:"effect"!==n?0:100),a=t.extend(!0,{from:t.effects.scaledDimensions(s),to:t.effects.scaledDimensions(s,o,e.direction||"both"),origin:e.origin||["middle","center"]},e);e.fade&&(a.from.opacity=1,a.to.opacity=0),t.effects.effect.size.call(this,a,i)}),t.effects.define("puff","hide",function(e,i){var s=t.extend(!0,{},e,{fade:!0,percent:parseInt(e.percent,10)||150});t.effects.effect.scale.call(this,s,i)}),t.effects.define("pulsate","show",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=o||a,h=2*(e.times||5)+(r?1:0),l=e.duration/h,c=0,u=1,d=s.queue().length;for((o||!s.is(":visible"))&&(s.css("opacity",0).show(),c=1);h>u;u++)s.animate({opacity:c},l,e.easing),c=1-c;s.animate({opacity:c},l,e.easing),s.queue(i),t.effects.unshift(s,d,h+1)}),t.effects.define("shake",function(e,i){var s=1,n=t(this),o=e.direction||"left",a=e.distance||20,r=e.times||3,h=2*r+1,l=Math.round(e.duration/h),c="up"===o||"down"===o?"top":"left",u="up"===o||"left"===o,d={},p={},f={},g=n.queue().length;for(t.effects.createPlaceholder(n),d[c]=(u?"-=":"+=")+a,p[c]=(u?"+=":"-=")+2*a,f[c]=(u?"-=":"+=")+2*a,n.animate(d,l,e.easing);r>s;s++)n.animate(p,l,e.easing).animate(f,l,e.easing);n.animate(p,l,e.easing).animate(d,l/2,e.easing).queue(i),t.effects.unshift(n,g,h+1)}),t.effects.define("slide","show",function(e,i){var s,n,o=t(this),a={up:["bottom","top"],down:["top","bottom"],left:["right","left"],right:["left","right"]},r=e.mode,h=e.direction||"left",l="up"===h||"down"===h?"top":"left",c="up"===h||"left"===h,u=e.distance||o["top"===l?"outerHeight":"outerWidth"](!0),d={};t.effects.createPlaceholder(o),s=o.cssClip(),n=o.position()[l],d[l]=(c?-1:1)*u+n,d.clip=o.cssClip(),d.clip[a[h][1]]=d.clip[a[h][0]],"show"===r&&(o.cssClip(d.clip),o.css(l,d[l]),d.clip=s,d[l]=n),o.animate(d,{queue:!1,duration:e.duration,easing:e.easing,complete:i})});var f;t.uiBackCompat!==!1&&(f=t.effects.define("transfer",function(e,i){t(this).transfer(e,i)})),t.ui.focusable=function(i,s){var n,o,a,r,h,l=i.nodeName.toLowerCase();return"area"===l?(n=i.parentNode,o=n.name,i.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap='#"+o+"']"),a.length>0&&a.is(":visible")):!1):(/^(input|select|textarea|button|object)$/.test(l)?(r=!i.disabled,r&&(h=t(i).closest("fieldset")[0],h&&(r=!h.disabled))):r="a"===l?i.href||s:s,r&&t(i).is(":visible")&&e(t(i)))},t.extend(t.expr[":"],{focusable:function(e){return t.ui.focusable(e,null!=t.attr(e,"tabindex"))}}),t.ui.focusable,t.fn.form=function(){return"string"==typeof this[0].form?this.closest("form"):t(this[0].form)},t.ui.formResetMixin={_formResetHandler:function(){var e=t(this);setTimeout(function(){var i=e.data("ui-form-reset-instances");t.each(i,function(){this.refresh()})})},_bindFormResetHandler:function(){if(this.form=this.element.form(),this.form.length){var t=this.form.data("ui-form-reset-instances")||[];t.length||this.form.on("reset.ui-form-reset",this._formResetHandler),t.push(this),this.form.data("ui-form-reset-instances",t)}},_unbindFormResetHandler:function(){if(this.form.length){var e=this.form.data("ui-form-reset-instances");e.splice(t.inArray(this,e),1),e.length?this.form.data("ui-form-reset-instances",e):this.form.removeData("ui-form-reset-instances").off("reset.ui-form-reset")}}},"1.7"===t.fn.jquery.substring(0,3)&&(t.each(["Width","Height"],function(e,i){function s(e,i,s,o){return t.each(n,function(){i-=parseFloat(t.css(e,"padding"+this))||0,s&&(i-=parseFloat(t.css(e,"border"+this+"Width"))||0),o&&(i-=parseFloat(t.css(e,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],o=i.toLowerCase(),a={innerWidth:t.fn.innerWidth,innerHeight:t.fn.innerHeight,outerWidth:t.fn.outerWidth,outerHeight:t.fn.outerHeight};t.fn["inner"+i]=function(e){return void 0===e?a["inner"+i].call(this):this.each(function(){t(this).css(o,s(this,e)+"px")})},t.fn["outer"+i]=function(e,n){return"number"!=typeof e?a["outer"+i].call(this,e):this.each(function(){t(this).css(o,s(this,e,!0,n)+"px")})}}),t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.ui.escapeSelector=function(){var t=/([!"#$%&'()*+,.\/:;<=>?@[\]^`{|}~])/g;return function(e){return e.replace(t,"\\$1")}}(),t.fn.labels=function(){var e,i,s,n,o;return this[0].labels&&this[0].labels.length?this.pushStack(this[0].labels):(n=this.eq(0).parents("label"),s=this.attr("id"),s&&(e=this.eq(0).parents().last(),o=e.add(e.length?e.siblings():this.siblings()),i="label[for='"+t.ui.escapeSelector(s)+"']",n=n.add(o.find(i).addBack(i))),this.pushStack(n))},t.fn.scrollParent=function(e){var i=this.css("position"),s="absolute"===i,n=e?/(auto|scroll|hidden)/:/(auto|scroll)/,o=this.parents().filter(function(){var e=t(this);return s&&"static"===e.css("position")?!1:n.test(e.css("overflow")+e.css("overflow-y")+e.css("overflow-x"))}).eq(0);return"fixed"!==i&&o.length?o:t(this[0].ownerDocument||document)},t.extend(t.expr[":"],{tabbable:function(e){var i=t.attr(e,"tabindex"),s=null!=i;return(!s||i>=0)&&t.ui.focusable(e,s)}}),t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.widget("ui.accordion",{version:"1.12.1",options:{active:0,animate:{},classes:{"ui-accordion-header":"ui-corner-top","ui-accordion-header-collapsed":"ui-corner-all","ui-accordion-content":"ui-corner-bottom"},collapsible:!1,event:"click",header:"> li > :first-child, > :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var e=this.options;this.prevShow=this.prevHide=t(),this._addClass("ui-accordion","ui-widget ui-helper-reset"),this.element.attr("role","tablist"),e.collapsible||e.active!==!1&&null!=e.active||(e.active=0),this._processPanels(),0>e.active&&(e.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():t()}},_createIcons:function(){var e,i,s=this.options.icons;s&&(e=t(""),this._addClass(e,"ui-accordion-header-icon","ui-icon "+s.header),e.prependTo(this.headers),i=this.active.children(".ui-accordion-header-icon"),this._removeClass(i,s.header)._addClass(i,null,s.activeHeader)._addClass(this.headers,"ui-accordion-icons"))},_destroyIcons:function(){this._removeClass(this.headers,"ui-accordion-icons"),this.headers.children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeAttr("role"),this.headers.removeAttr("role aria-expanded aria-selected aria-controls tabIndex").removeUniqueId(),this._destroyIcons(),t=this.headers.next().css("display","").removeAttr("role aria-hidden aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&t.css("height","")},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):("event"===t&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(e)),this._super(t,e),"collapsible"!==t||e||this.options.active!==!1||this._activate(0),"icons"===t&&(this._destroyIcons(),e&&this._createIcons()),void 0)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t),this._toggleClass(this.headers.add(this.headers.next()),null,"ui-state-disabled",!!t)},_keydown:function(e){if(!e.altKey&&!e.ctrlKey){var i=t.ui.keyCode,s=this.headers.length,n=this.headers.index(e.target),o=!1;switch(e.keyCode){case i.RIGHT:case i.DOWN:o=this.headers[(n+1)%s];break;case i.LEFT:case i.UP:o=this.headers[(n-1+s)%s];break;case i.SPACE:case i.ENTER:this._eventHandler(e);break;case i.HOME:o=this.headers[0];break;case i.END:o=this.headers[s-1]}o&&(t(e.target).attr("tabIndex",-1),t(o).attr("tabIndex",0),t(o).trigger("focus"),e.preventDefault())}},_panelKeyDown:function(e){e.keyCode===t.ui.keyCode.UP&&e.ctrlKey&&t(e.currentTarget).prev().trigger("focus")},refresh:function(){var e=this.options;this._processPanels(),e.active===!1&&e.collapsible===!0||!this.headers.length?(e.active=!1,this.active=t()):e.active===!1?this._activate(0):this.active.length&&!t.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(e.active=!1,this.active=t()):this._activate(Math.max(0,e.active-1)):e.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var t=this.headers,e=this.panels;this.headers=this.element.find(this.options.header),this._addClass(this.headers,"ui-accordion-header ui-accordion-header-collapsed","ui-state-default"),this.panels=this.headers.next().filter(":not(.ui-accordion-content-active)").hide(),this._addClass(this.panels,"ui-accordion-content","ui-helper-reset ui-widget-content"),e&&(this._off(t.not(this.headers)),this._off(e.not(this.panels)))},_refresh:function(){var e,i=this.options,s=i.heightStyle,n=this.element.parent();this.active=this._findActive(i.active),this._addClass(this.active,"ui-accordion-header-active","ui-state-active")._removeClass(this.active,"ui-accordion-header-collapsed"),this._addClass(this.active.next(),"ui-accordion-content-active"),this.active.next().show(),this.headers.attr("role","tab").each(function(){var e=t(this),i=e.uniqueId().attr("id"),s=e.next(),n=s.uniqueId().attr("id");e.attr("aria-controls",n),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(i.event),"fill"===s?(e=n.height(),this.element.siblings(":visible").each(function(){var i=t(this),s=i.css("position");"absolute"!==s&&"fixed"!==s&&(e-=i.outerHeight(!0))}),this.headers.each(function(){e-=t(this).outerHeight(!0)}),this.headers.next().each(function(){t(this).height(Math.max(0,e-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===s&&(e=0,this.headers.next().each(function(){var i=t(this).is(":visible");i||t(this).show(),e=Math.max(e,t(this).css("height","").height()),i||t(this).hide()}).height(e))},_activate:function(e){var i=this._findActive(e)[0];i!==this.active[0]&&(i=i||this.active[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return"number"==typeof e?this.headers.eq(e):t()},_setupEvents:function(e){var i={keydown:"_keydown"};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(e){var i,s,n=this.options,o=this.active,a=t(e.currentTarget),r=a[0]===o[0],h=r&&n.collapsible,l=h?t():a.next(),c=o.next(),u={oldHeader:o,oldPanel:c,newHeader:h?t():a,newPanel:l};e.preventDefault(),r&&!n.collapsible||this._trigger("beforeActivate",e,u)===!1||(n.active=h?!1:this.headers.index(a),this.active=r?t():a,this._toggle(u),this._removeClass(o,"ui-accordion-header-active","ui-state-active"),n.icons&&(i=o.children(".ui-accordion-header-icon"),this._removeClass(i,null,n.icons.activeHeader)._addClass(i,null,n.icons.header)),r||(this._removeClass(a,"ui-accordion-header-collapsed")._addClass(a,"ui-accordion-header-active","ui-state-active"),n.icons&&(s=a.children(".ui-accordion-header-icon"),this._removeClass(s,null,n.icons.header)._addClass(s,null,n.icons.activeHeader)),this._addClass(a.next(),"ui-accordion-content-active")))},_toggle:function(e){var i=e.newPanel,s=this.prevShow.length?this.prevShow:e.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=i,this.prevHide=s,this.options.animate?this._animate(i,s,e):(s.hide(),i.show(),this._toggleComplete(e)),s.attr({"aria-hidden":"true"}),s.prev().attr({"aria-selected":"false","aria-expanded":"false"}),i.length&&s.length?s.prev().attr({tabIndex:-1,"aria-expanded":"false"}):i.length&&this.headers.filter(function(){return 0===parseInt(t(this).attr("tabIndex"),10)}).attr("tabIndex",-1),i.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(t,e,i){var s,n,o,a=this,r=0,h=t.css("box-sizing"),l=t.length&&(!e.length||t.index()",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault()},"click .ui-menu-item":function(e){var i=t(e.target),s=t(t.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(e),e.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(e):!this.element.is(":focus")&&s.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(e){if(!this.previousFilter){var i=t(e.target).closest(".ui-menu-item"),s=t(e.currentTarget);i[0]===s[0]&&(this._removeClass(s.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(e,s))}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this.element.find(this.options.items).eq(0);e||this.focus(t,i)},blur:function(e){this._delay(function(){var i=!t.contains(this.element[0],t.ui.safeActiveElement(this.document[0]));i&&this.collapseAll(e)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t),this.mouseHandled=!1}})},_destroy:function(){var e=this.element.find(".ui-menu-item").removeAttr("role aria-disabled"),i=e.children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),i.children().each(function(){var e=t(this);e.data("ui-menu-submenu-caret")&&e.remove()})},_keydown:function(e){var i,s,n,o,a=!0;switch(e.keyCode){case t.ui.keyCode.PAGE_UP:this.previousPage(e);break;case t.ui.keyCode.PAGE_DOWN:this.nextPage(e);break;case t.ui.keyCode.HOME:this._move("first","first",e);break;case t.ui.keyCode.END:this._move("last","last",e);break;case t.ui.keyCode.UP:this.previous(e);break;case t.ui.keyCode.DOWN:this.next(e);break;case t.ui.keyCode.LEFT:this.collapse(e);break;case t.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(e);break;case t.ui.keyCode.ENTER:case t.ui.keyCode.SPACE:this._activate(e);break;case t.ui.keyCode.ESCAPE:this.collapse(e);break;default:a=!1,s=this.previousFilter||"",o=!1,n=e.keyCode>=96&&105>=e.keyCode?""+(e.keyCode-96):String.fromCharCode(e.keyCode),clearTimeout(this.filterTimer),n===s?o=!0:n=s+n,i=this._filterMenuItems(n),i=o&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(e.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(e,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}a&&e.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var e,i,s,n,o,a=this,r=this.options.icons.submenu,h=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),s=h.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var e=t(this),i=e.prev(),s=t("").data("ui-menu-submenu-caret",!0);a._addClass(s,"ui-menu-icon","ui-icon "+r),i.attr("aria-haspopup","true").prepend(s),e.attr("aria-labelledby",i.attr("id"))}),this._addClass(s,"ui-menu","ui-widget ui-widget-content ui-front"),e=h.add(this.element),i=e.find(this.options.items),i.not(".ui-menu-item").each(function(){var e=t(this);a._isDivider(e)&&a._addClass(e,"ui-menu-divider","ui-widget-content")}),n=i.not(".ui-menu-item, .ui-menu-divider"),o=n.children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(n,"ui-menu-item")._addClass(o,"ui-menu-item-wrapper"),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!t.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){if("icons"===t){var i=this.element.find(".ui-menu-icon");this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)}this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t+""),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i,s,n;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),s=this.active.children(".ui-menu-item-wrapper"),this._addClass(s,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),n=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(n,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=e.children(".ui-menu"),i.length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(e){var i,s,n,o,a,r;this._hasScroll()&&(i=parseFloat(t.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(t.css(this.activeMenu[0],"paddingTop"))||0,n=e.offset().top-this.activeMenu.offset().top-i-s,o=this.activeMenu.scrollTop(),a=this.activeMenu.height(),r=e.outerHeight(),0>n?this.activeMenu.scrollTop(o+n):n+r>a&&this.activeMenu.scrollTop(o+n-a+r))},blur:function(t,e){e||clearTimeout(this.timer),this.active&&(this._removeClass(this.active.children(".ui-menu-item-wrapper"),null,"ui-state-active"),this._trigger("blur",t,{item:this.active}),this.active=null)},_startOpening:function(t){clearTimeout(this.timer),"true"===t.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(t)},this.delay))},_open:function(e){var i=t.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(e.parents(".ui-menu")).hide().attr("aria-hidden","true"),e.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(e,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:t(e&&e.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(e),this._removeClass(s.find(".ui-state-active"),null,"ui-state-active"),this.activeMenu=s},this.delay)},_close:function(t){t||(t=this.active?this.active.parent():this.element),t.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false")},_closeOnDocumentClick:function(e){return!t(e.target).closest(".ui-menu").length},_isDivider:function(t){return!/[^\-\u2014\u2013\s]/.test(t.text())},collapse:function(t){var e=this.active&&this.active.parent().closest(".ui-menu-item",this.element);e&&e.length&&(this._close(),this.focus(t,e))},expand:function(t){var e=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();e&&e.length&&(this._open(e.parent()),this._delay(function(){this.focus(t,e)}))},next:function(t){this._move("next","first",t)},previous:function(t){this._move("prev","last",t)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(t,e,i){var s;this.active&&(s="first"===t||"last"===t?this.active["first"===t?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[t+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[e]()),this.focus(i,s)},nextPage:function(e){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=t(this),0>i.offset().top-s-n}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(e),void 0)},previousPage:function(e){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=t(this),i.offset().top-s+n>0}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items).first())),void 0):(this.next(e),void 0)},_hasScroll:function(){return this.element.outerHeight()",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var e,i,s,n=this.element[0].nodeName.toLowerCase(),o="textarea"===n,a="input"===n; -this.isMultiLine=o||!a&&this._isContentEditable(this.element),this.valueMethod=this.element[o||a?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return e=!0,s=!0,i=!0,void 0;e=!1,s=!1,i=!1;var o=t.ui.keyCode;switch(n.keyCode){case o.PAGE_UP:e=!0,this._move("previousPage",n);break;case o.PAGE_DOWN:e=!0,this._move("nextPage",n);break;case o.UP:e=!0,this._keyEvent("previous",n);break;case o.DOWN:e=!0,this._keyEvent("next",n);break;case o.ENTER:this.menu.active&&(e=!0,n.preventDefault(),this.menu.select(n));break;case o.TAB:this.menu.active&&this.menu.select(n);break;case o.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(e)return e=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=t.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(t){return s?(s=!1,t.preventDefault(),void 0):(this._searchTimeout(t),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(t),this._change(t),void 0)}}),this._initSource(),this.menu=t("
    ").appendTo(this._appendTo()).menu({role:null}).hide().menu("instance"),this._addClass(this.menu.element,"ui-autocomplete","ui-front"),this._on(this.menu.element,{mousedown:function(e){e.preventDefault(),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,this.element[0]!==t.ui.safeActiveElement(this.document[0])&&this.element.trigger("focus")})},menufocus:function(e,i){var s,n;return this.isNewMenu&&(this.isNewMenu=!1,e.originalEvent&&/^mouse/.test(e.originalEvent.type))?(this.menu.blur(),this.document.one("mousemove",function(){t(e.target).trigger(e.originalEvent)}),void 0):(n=i.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",e,{item:n})&&e.originalEvent&&/^key/.test(e.originalEvent.type)&&this._value(n.value),s=i.item.attr("aria-label")||n.value,s&&t.trim(s).length&&(this.liveRegion.children().hide(),t("
    ").text(s).appendTo(this.liveRegion)),void 0)},menuselect:function(e,i){var s=i.item.data("ui-autocomplete-item"),n=this.previous;this.element[0]!==t.ui.safeActiveElement(this.document[0])&&(this.element.trigger("focus"),this.previous=n,this._delay(function(){this.previous=n,this.selectedItem=s})),!1!==this._trigger("select",e,{item:s})&&this._value(s.value),this.term=this._value(),this.close(e),this.selectedItem=s}}),this.liveRegion=t("
    ",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(t,e){this._super(t,e),"source"===t&&this._initSource(),"appendTo"===t&&this.menu.element.appendTo(this._appendTo()),"disabled"===t&&e&&this.xhr&&this.xhr.abort()},_isEventTargetInWidget:function(e){var i=this.menu.element[0];return e.target===this.element[0]||e.target===i||t.contains(i,e.target)},_closeOnClickOutside:function(t){this._isEventTargetInWidget(t)||this.close()},_appendTo:function(){var e=this.options.appendTo;return e&&(e=e.jquery||e.nodeType?t(e):this.document.find(e).eq(0)),e&&e[0]||(e=this.element.closest(".ui-front, dialog")),e.length||(e=this.document[0].body),e},_initSource:function(){var e,i,s=this;t.isArray(this.options.source)?(e=this.options.source,this.source=function(i,s){s(t.ui.autocomplete.filter(e,i.term))}):"string"==typeof this.options.source?(i=this.options.source,this.source=function(e,n){s.xhr&&s.xhr.abort(),s.xhr=t.ajax({url:i,data:e,dataType:"json",success:function(t){n(t)},error:function(){n([])}})}):this.source=this.options.source},_searchTimeout:function(t){clearTimeout(this.searching),this.searching=this._delay(function(){var e=this.term===this._value(),i=this.menu.element.is(":visible"),s=t.altKey||t.ctrlKey||t.metaKey||t.shiftKey;(!e||e&&!i&&!s)&&(this.selectedItem=null,this.search(null,t))},this.options.delay)},search:function(t,e){return t=null!=t?t:this._value(),this.term=this._value(),t.length").append(t("
    ").text(i.label)).appendTo(e)},_move:function(t,e){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[t](e),void 0):(this.search(null,e),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),t.extend(t.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(e,i){var s=RegExp(t.ui.autocomplete.escapeRegex(i),"i");return t.grep(e,function(t){return s.test(t.label||t.value||t)})}}),t.widget("ui.autocomplete",t.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(t>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(e){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=e&&e.length?this.options.messages.results(e.length):this.options.messages.noResults,this.liveRegion.children().hide(),t("
    ").text(i).appendTo(this.liveRegion))}}),t.ui.autocomplete;var g=/ui-corner-([a-z]){2,6}/g;t.widget("ui.controlgroup",{version:"1.12.1",defaultElement:"
    ",options:{direction:"horizontal",disabled:null,onlyVisible:!0,items:{button:"input[type=button], input[type=submit], input[type=reset], button, a",controlgroupLabel:".ui-controlgroup-label",checkboxradio:"input[type='checkbox'], input[type='radio']",selectmenu:"select",spinner:".ui-spinner-input"}},_create:function(){this._enhance()},_enhance:function(){this.element.attr("role","toolbar"),this.refresh()},_destroy:function(){this._callChildMethod("destroy"),this.childWidgets.removeData("ui-controlgroup-data"),this.element.removeAttr("role"),this.options.items.controlgroupLabel&&this.element.find(this.options.items.controlgroupLabel).find(".ui-controlgroup-label-contents").contents().unwrap()},_initWidgets:function(){var e=this,i=[];t.each(this.options.items,function(s,n){var o,a={};return n?"controlgroupLabel"===s?(o=e.element.find(n),o.each(function(){var e=t(this);e.children(".ui-controlgroup-label-contents").length||e.contents().wrapAll("")}),e._addClass(o,null,"ui-widget ui-widget-content ui-state-default"),i=i.concat(o.get()),void 0):(t.fn[s]&&(a=e["_"+s+"Options"]?e["_"+s+"Options"]("middle"):{classes:{}},e.element.find(n).each(function(){var n=t(this),o=n[s]("instance"),r=t.widget.extend({},a);if("button"!==s||!n.parent(".ui-spinner").length){o||(o=n[s]()[s]("instance")),o&&(r.classes=e._resolveClassesValues(r.classes,o)),n[s](r);var h=n[s]("widget");t.data(h[0],"ui-controlgroup-data",o?o:n[s]("instance")),i.push(h[0])}})),void 0):void 0}),this.childWidgets=t(t.unique(i)),this._addClass(this.childWidgets,"ui-controlgroup-item")},_callChildMethod:function(e){this.childWidgets.each(function(){var i=t(this),s=i.data("ui-controlgroup-data");s&&s[e]&&s[e]()})},_updateCornerClass:function(t,e){var i="ui-corner-top ui-corner-bottom ui-corner-left ui-corner-right ui-corner-all",s=this._buildSimpleOptions(e,"label").classes.label;this._removeClass(t,null,i),this._addClass(t,null,s)},_buildSimpleOptions:function(t,e){var i="vertical"===this.options.direction,s={classes:{}};return s.classes[e]={middle:"",first:"ui-corner-"+(i?"top":"left"),last:"ui-corner-"+(i?"bottom":"right"),only:"ui-corner-all"}[t],s},_spinnerOptions:function(t){var e=this._buildSimpleOptions(t,"ui-spinner");return e.classes["ui-spinner-up"]="",e.classes["ui-spinner-down"]="",e},_buttonOptions:function(t){return this._buildSimpleOptions(t,"ui-button")},_checkboxradioOptions:function(t){return this._buildSimpleOptions(t,"ui-checkboxradio-label")},_selectmenuOptions:function(t){var e="vertical"===this.options.direction;return{width:e?"auto":!1,classes:{middle:{"ui-selectmenu-button-open":"","ui-selectmenu-button-closed":""},first:{"ui-selectmenu-button-open":"ui-corner-"+(e?"top":"tl"),"ui-selectmenu-button-closed":"ui-corner-"+(e?"top":"left")},last:{"ui-selectmenu-button-open":e?"":"ui-corner-tr","ui-selectmenu-button-closed":"ui-corner-"+(e?"bottom":"right")},only:{"ui-selectmenu-button-open":"ui-corner-top","ui-selectmenu-button-closed":"ui-corner-all"}}[t]}},_resolveClassesValues:function(e,i){var s={};return t.each(e,function(n){var o=i.options.classes[n]||"";o=t.trim(o.replace(g,"")),s[n]=(o+" "+e[n]).replace(/\s+/g," ")}),s},_setOption:function(t,e){return"direction"===t&&this._removeClass("ui-controlgroup-"+this.options.direction),this._super(t,e),"disabled"===t?(this._callChildMethod(e?"disable":"enable"),void 0):(this.refresh(),void 0)},refresh:function(){var e,i=this;this._addClass("ui-controlgroup ui-controlgroup-"+this.options.direction),"horizontal"===this.options.direction&&this._addClass(null,"ui-helper-clearfix"),this._initWidgets(),e=this.childWidgets,this.options.onlyVisible&&(e=e.filter(":visible")),e.length&&(t.each(["first","last"],function(t,s){var n=e[s]().data("ui-controlgroup-data");if(n&&i["_"+n.widgetName+"Options"]){var o=i["_"+n.widgetName+"Options"](1===e.length?"only":s);o.classes=i._resolveClassesValues(o.classes,n),n.element[n.widgetName](o)}else i._updateCornerClass(e[s](),s)}),this._callChildMethod("refresh"))}}),t.widget("ui.checkboxradio",[t.ui.formResetMixin,{version:"1.12.1",options:{disabled:null,label:null,icon:!0,classes:{"ui-checkboxradio-label":"ui-corner-all","ui-checkboxradio-icon":"ui-corner-all"}},_getCreateOptions:function(){var e,i,s=this,n=this._super()||{};return this._readType(),i=this.element.labels(),this.label=t(i[i.length-1]),this.label.length||t.error("No label found for checkboxradio widget"),this.originalLabel="",this.label.contents().not(this.element[0]).each(function(){s.originalLabel+=3===this.nodeType?t(this).text():this.outerHTML}),this.originalLabel&&(n.label=this.originalLabel),e=this.element[0].disabled,null!=e&&(n.disabled=e),n},_create:function(){var t=this.element[0].checked;this._bindFormResetHandler(),null==this.options.disabled&&(this.options.disabled=this.element[0].disabled),this._setOption("disabled",this.options.disabled),this._addClass("ui-checkboxradio","ui-helper-hidden-accessible"),this._addClass(this.label,"ui-checkboxradio-label","ui-button ui-widget"),"radio"===this.type&&this._addClass(this.label,"ui-checkboxradio-radio-label"),this.options.label&&this.options.label!==this.originalLabel?this._updateLabel():this.originalLabel&&(this.options.label=this.originalLabel),this._enhance(),t&&(this._addClass(this.label,"ui-checkboxradio-checked","ui-state-active"),this.icon&&this._addClass(this.icon,null,"ui-state-hover")),this._on({change:"_toggleClasses",focus:function(){this._addClass(this.label,null,"ui-state-focus ui-visual-focus")},blur:function(){this._removeClass(this.label,null,"ui-state-focus ui-visual-focus")}})},_readType:function(){var e=this.element[0].nodeName.toLowerCase();this.type=this.element[0].type,"input"===e&&/radio|checkbox/.test(this.type)||t.error("Can't create checkboxradio on element.nodeName="+e+" and element.type="+this.type)},_enhance:function(){this._updateIcon(this.element[0].checked)},widget:function(){return this.label},_getRadioGroup:function(){var e,i=this.element[0].name,s="input[name='"+t.ui.escapeSelector(i)+"']";return i?(e=this.form.length?t(this.form[0].elements).filter(s):t(s).filter(function(){return 0===t(this).form().length}),e.not(this.element)):t([])},_toggleClasses:function(){var e=this.element[0].checked;this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",e),this.options.icon&&"checkbox"===this.type&&this._toggleClass(this.icon,null,"ui-icon-check ui-state-checked",e)._toggleClass(this.icon,null,"ui-icon-blank",!e),"radio"===this.type&&this._getRadioGroup().each(function(){var e=t(this).checkboxradio("instance");e&&e._removeClass(e.label,"ui-checkboxradio-checked","ui-state-active")})},_destroy:function(){this._unbindFormResetHandler(),this.icon&&(this.icon.remove(),this.iconSpace.remove())},_setOption:function(t,e){return"label"!==t||e?(this._super(t,e),"disabled"===t?(this._toggleClass(this.label,null,"ui-state-disabled",e),this.element[0].disabled=e,void 0):(this.refresh(),void 0)):void 0},_updateIcon:function(e){var i="ui-icon ui-icon-background ";this.options.icon?(this.icon||(this.icon=t(""),this.iconSpace=t(" "),this._addClass(this.iconSpace,"ui-checkboxradio-icon-space")),"checkbox"===this.type?(i+=e?"ui-icon-check ui-state-checked":"ui-icon-blank",this._removeClass(this.icon,null,e?"ui-icon-blank":"ui-icon-check")):i+="ui-icon-blank",this._addClass(this.icon,"ui-checkboxradio-icon",i),e||this._removeClass(this.icon,null,"ui-icon-check ui-state-checked"),this.icon.prependTo(this.label).after(this.iconSpace)):void 0!==this.icon&&(this.icon.remove(),this.iconSpace.remove(),delete this.icon)},_updateLabel:function(){var t=this.label.contents().not(this.element[0]);this.icon&&(t=t.not(this.icon[0])),this.iconSpace&&(t=t.not(this.iconSpace[0])),t.remove(),this.label.append(this.options.label)},refresh:function(){var t=this.element[0].checked,e=this.element[0].disabled;this._updateIcon(t),this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",t),null!==this.options.label&&this._updateLabel(),e!==this.options.disabled&&this._setOptions({disabled:e})}}]),t.ui.checkboxradio,t.widget("ui.button",{version:"1.12.1",defaultElement:"").addClass(this._triggerClass).html(o?t("").attr({src:o,alt:n,title:n}):n)),e[r?"before":"after"](i.trigger),i.trigger.on("click",function(){return t.datepicker._datepickerShowing&&t.datepicker._lastInput===e[0]?t.datepicker._hideDatepicker():t.datepicker._datepickerShowing&&t.datepicker._lastInput!==e[0]?(t.datepicker._hideDatepicker(),t.datepicker._showDatepicker(e[0])):t.datepicker._showDatepicker(e[0]),!1}))},_autoSize:function(t){if(this._get(t,"autoSize")&&!t.inline){var e,i,s,n,o=new Date(2009,11,20),a=this._get(t,"dateFormat");a.match(/[DM]/)&&(e=function(t){for(i=0,s=0,n=0;t.length>n;n++)t[n].length>i&&(i=t[n].length,s=n);return s},o.setMonth(e(this._get(t,a.match(/MM/)?"monthNames":"monthNamesShort"))),o.setDate(e(this._get(t,a.match(/DD/)?"dayNames":"dayNamesShort"))+20-o.getDay())),t.input.attr("size",this._formatDate(t,o).length)}},_inlineDatepicker:function(e,i){var s=t(e);s.hasClass(this.markerClassName)||(s.addClass(this.markerClassName).append(i.dpDiv),t.data(e,"datepicker",i),this._setDate(i,this._getDefaultDate(i),!0),this._updateDatepicker(i),this._updateAlternate(i),i.settings.disabled&&this._disableDatepicker(e),i.dpDiv.css("display","block"))},_dialogDatepicker:function(e,i,s,n,o){var r,h,l,c,u,d=this._dialogInst;return d||(this.uuid+=1,r="dp"+this.uuid,this._dialogInput=t(""),this._dialogInput.on("keydown",this._doKeyDown),t("body").append(this._dialogInput),d=this._dialogInst=this._newInst(this._dialogInput,!1),d.settings={},t.data(this._dialogInput[0],"datepicker",d)),a(d.settings,n||{}),i=i&&i.constructor===Date?this._formatDate(d,i):i,this._dialogInput.val(i),this._pos=o?o.length?o:[o.pageX,o.pageY]:null,this._pos||(h=document.documentElement.clientWidth,l=document.documentElement.clientHeight,c=document.documentElement.scrollLeft||document.body.scrollLeft,u=document.documentElement.scrollTop||document.body.scrollTop,this._pos=[h/2-100+c,l/2-150+u]),this._dialogInput.css("left",this._pos[0]+20+"px").css("top",this._pos[1]+"px"),d.settings.onSelect=s,this._inDialog=!0,this.dpDiv.addClass(this._dialogClass),this._showDatepicker(this._dialogInput[0]),t.blockUI&&t.blockUI(this.dpDiv),t.data(this._dialogInput[0],"datepicker",d),this},_destroyDatepicker:function(e){var i,s=t(e),n=t.data(e,"datepicker");s.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),t.removeData(e,"datepicker"),"input"===i?(n.append.remove(),n.trigger.remove(),s.removeClass(this.markerClassName).off("focus",this._showDatepicker).off("keydown",this._doKeyDown).off("keypress",this._doKeyPress).off("keyup",this._doKeyUp)):("div"===i||"span"===i)&&s.removeClass(this.markerClassName).empty(),m===n&&(m=null))},_enableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!1,o.trigger.filter("button").each(function(){this.disabled=!1}).end().filter("img").css({opacity:"1.0",cursor:""})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().removeClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!1)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}))},_disableDatepicker:function(e){var i,s,n=t(e),o=t.data(e,"datepicker");n.hasClass(this.markerClassName)&&(i=e.nodeName.toLowerCase(),"input"===i?(e.disabled=!0,o.trigger.filter("button").each(function(){this.disabled=!0}).end().filter("img").css({opacity:"0.5",cursor:"default"})):("div"===i||"span"===i)&&(s=n.children("."+this._inlineClass),s.children().addClass("ui-state-disabled"),s.find("select.ui-datepicker-month, select.ui-datepicker-year").prop("disabled",!0)),this._disabledInputs=t.map(this._disabledInputs,function(t){return t===e?null:t}),this._disabledInputs[this._disabledInputs.length]=e)},_isDisabledDatepicker:function(t){if(!t)return!1;for(var e=0;this._disabledInputs.length>e;e++)if(this._disabledInputs[e]===t)return!0;return!1},_getInst:function(e){try{return t.data(e,"datepicker")}catch(i){throw"Missing instance data for this datepicker"}},_optionDatepicker:function(e,i,s){var n,o,r,h,l=this._getInst(e);return 2===arguments.length&&"string"==typeof i?"defaults"===i?t.extend({},t.datepicker._defaults):l?"all"===i?t.extend({},l.settings):this._get(l,i):null:(n=i||{},"string"==typeof i&&(n={},n[i]=s),l&&(this._curInst===l&&this._hideDatepicker(),o=this._getDateDatepicker(e,!0),r=this._getMinMaxDate(l,"min"),h=this._getMinMaxDate(l,"max"),a(l.settings,n),null!==r&&void 0!==n.dateFormat&&void 0===n.minDate&&(l.settings.minDate=this._formatDate(l,r)),null!==h&&void 0!==n.dateFormat&&void 0===n.maxDate&&(l.settings.maxDate=this._formatDate(l,h)),"disabled"in n&&(n.disabled?this._disableDatepicker(e):this._enableDatepicker(e)),this._attachments(t(e),l),this._autoSize(l),this._setDate(l,o),this._updateAlternate(l),this._updateDatepicker(l)),void 0)},_changeDatepicker:function(t,e,i){this._optionDatepicker(t,e,i)},_refreshDatepicker:function(t){var e=this._getInst(t);e&&this._updateDatepicker(e)},_setDateDatepicker:function(t,e){var i=this._getInst(t);i&&(this._setDate(i,e),this._updateDatepicker(i),this._updateAlternate(i))},_getDateDatepicker:function(t,e){var i=this._getInst(t);return i&&!i.inline&&this._setDateFromField(i,e),i?this._getDate(i):null},_doKeyDown:function(e){var i,s,n,o=t.datepicker._getInst(e.target),a=!0,r=o.dpDiv.is(".ui-datepicker-rtl");if(o._keyEvent=!0,t.datepicker._datepickerShowing)switch(e.keyCode){case 9:t.datepicker._hideDatepicker(),a=!1;break;case 13:return n=t("td."+t.datepicker._dayOverClass+":not(."+t.datepicker._currentClass+")",o.dpDiv),n[0]&&t.datepicker._selectDay(e.target,o.selectedMonth,o.selectedYear,n[0]),i=t.datepicker._get(o,"onSelect"),i?(s=t.datepicker._formatDate(o),i.apply(o.input?o.input[0]:null,[s,o])):t.datepicker._hideDatepicker(),!1;case 27:t.datepicker._hideDatepicker();break;case 33:t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 34:t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 35:(e.ctrlKey||e.metaKey)&&t.datepicker._clearDate(e.target),a=e.ctrlKey||e.metaKey;break;case 36:(e.ctrlKey||e.metaKey)&&t.datepicker._gotoToday(e.target),a=e.ctrlKey||e.metaKey;break;case 37:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?1:-1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?-t.datepicker._get(o,"stepBigMonths"):-t.datepicker._get(o,"stepMonths"),"M");break;case 38:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,-7,"D"),a=e.ctrlKey||e.metaKey;break;case 39:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,r?-1:1,"D"),a=e.ctrlKey||e.metaKey,e.originalEvent.altKey&&t.datepicker._adjustDate(e.target,e.ctrlKey?+t.datepicker._get(o,"stepBigMonths"):+t.datepicker._get(o,"stepMonths"),"M");break;case 40:(e.ctrlKey||e.metaKey)&&t.datepicker._adjustDate(e.target,7,"D"),a=e.ctrlKey||e.metaKey;break;default:a=!1}else 36===e.keyCode&&e.ctrlKey?t.datepicker._showDatepicker(this):a=!1;a&&(e.preventDefault(),e.stopPropagation())},_doKeyPress:function(e){var i,s,n=t.datepicker._getInst(e.target);return t.datepicker._get(n,"constrainInput")?(i=t.datepicker._possibleChars(t.datepicker._get(n,"dateFormat")),s=String.fromCharCode(null==e.charCode?e.keyCode:e.charCode),e.ctrlKey||e.metaKey||" ">s||!i||i.indexOf(s)>-1):void 0},_doKeyUp:function(e){var i,s=t.datepicker._getInst(e.target);if(s.input.val()!==s.lastVal)try{i=t.datepicker.parseDate(t.datepicker._get(s,"dateFormat"),s.input?s.input.val():null,t.datepicker._getFormatConfig(s)),i&&(t.datepicker._setDateFromField(s),t.datepicker._updateAlternate(s),t.datepicker._updateDatepicker(s))}catch(n){}return!0},_showDatepicker:function(e){if(e=e.target||e,"input"!==e.nodeName.toLowerCase()&&(e=t("input",e.parentNode)[0]),!t.datepicker._isDisabledDatepicker(e)&&t.datepicker._lastInput!==e){var s,n,o,r,h,l,c;s=t.datepicker._getInst(e),t.datepicker._curInst&&t.datepicker._curInst!==s&&(t.datepicker._curInst.dpDiv.stop(!0,!0),s&&t.datepicker._datepickerShowing&&t.datepicker._hideDatepicker(t.datepicker._curInst.input[0])),n=t.datepicker._get(s,"beforeShow"),o=n?n.apply(e,[e,s]):{},o!==!1&&(a(s.settings,o),s.lastVal=null,t.datepicker._lastInput=e,t.datepicker._setDateFromField(s),t.datepicker._inDialog&&(e.value=""),t.datepicker._pos||(t.datepicker._pos=t.datepicker._findPos(e),t.datepicker._pos[1]+=e.offsetHeight),r=!1,t(e).parents().each(function(){return r|="fixed"===t(this).css("position"),!r}),h={left:t.datepicker._pos[0],top:t.datepicker._pos[1]},t.datepicker._pos=null,s.dpDiv.empty(),s.dpDiv.css({position:"absolute",display:"block",top:"-1000px"}),t.datepicker._updateDatepicker(s),h=t.datepicker._checkOffset(s,h,r),s.dpDiv.css({position:t.datepicker._inDialog&&t.blockUI?"static":r?"fixed":"absolute",display:"none",left:h.left+"px",top:h.top+"px"}),s.inline||(l=t.datepicker._get(s,"showAnim"),c=t.datepicker._get(s,"duration"),s.dpDiv.css("z-index",i(t(e))+1),t.datepicker._datepickerShowing=!0,t.effects&&t.effects.effect[l]?s.dpDiv.show(l,t.datepicker._get(s,"showOptions"),c):s.dpDiv[l||"show"](l?c:null),t.datepicker._shouldFocusInput(s)&&s.input.trigger("focus"),t.datepicker._curInst=s)) -}},_updateDatepicker:function(e){this.maxRows=4,m=e,e.dpDiv.empty().append(this._generateHTML(e)),this._attachHandlers(e);var i,s=this._getNumberOfMonths(e),n=s[1],a=17,r=e.dpDiv.find("."+this._dayOverClass+" a");r.length>0&&o.apply(r.get(0)),e.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width(""),n>1&&e.dpDiv.addClass("ui-datepicker-multi-"+n).css("width",a*n+"em"),e.dpDiv[(1!==s[0]||1!==s[1]?"add":"remove")+"Class"]("ui-datepicker-multi"),e.dpDiv[(this._get(e,"isRTL")?"add":"remove")+"Class"]("ui-datepicker-rtl"),e===t.datepicker._curInst&&t.datepicker._datepickerShowing&&t.datepicker._shouldFocusInput(e)&&e.input.trigger("focus"),e.yearshtml&&(i=e.yearshtml,setTimeout(function(){i===e.yearshtml&&e.yearshtml&&e.dpDiv.find("select.ui-datepicker-year:first").replaceWith(e.yearshtml),i=e.yearshtml=null},0))},_shouldFocusInput:function(t){return t.input&&t.input.is(":visible")&&!t.input.is(":disabled")&&!t.input.is(":focus")},_checkOffset:function(e,i,s){var n=e.dpDiv.outerWidth(),o=e.dpDiv.outerHeight(),a=e.input?e.input.outerWidth():0,r=e.input?e.input.outerHeight():0,h=document.documentElement.clientWidth+(s?0:t(document).scrollLeft()),l=document.documentElement.clientHeight+(s?0:t(document).scrollTop());return i.left-=this._get(e,"isRTL")?n-a:0,i.left-=s&&i.left===e.input.offset().left?t(document).scrollLeft():0,i.top-=s&&i.top===e.input.offset().top+r?t(document).scrollTop():0,i.left-=Math.min(i.left,i.left+n>h&&h>n?Math.abs(i.left+n-h):0),i.top-=Math.min(i.top,i.top+o>l&&l>o?Math.abs(o+r):0),i},_findPos:function(e){for(var i,s=this._getInst(e),n=this._get(s,"isRTL");e&&("hidden"===e.type||1!==e.nodeType||t.expr.filters.hidden(e));)e=e[n?"previousSibling":"nextSibling"];return i=t(e).offset(),[i.left,i.top]},_hideDatepicker:function(e){var i,s,n,o,a=this._curInst;!a||e&&a!==t.data(e,"datepicker")||this._datepickerShowing&&(i=this._get(a,"showAnim"),s=this._get(a,"duration"),n=function(){t.datepicker._tidyDialog(a)},t.effects&&(t.effects.effect[i]||t.effects[i])?a.dpDiv.hide(i,t.datepicker._get(a,"showOptions"),s,n):a.dpDiv["slideDown"===i?"slideUp":"fadeIn"===i?"fadeOut":"hide"](i?s:null,n),i||n(),this._datepickerShowing=!1,o=this._get(a,"onClose"),o&&o.apply(a.input?a.input[0]:null,[a.input?a.input.val():"",a]),this._lastInput=null,this._inDialog&&(this._dialogInput.css({position:"absolute",left:"0",top:"-100px"}),t.blockUI&&(t.unblockUI(),t("body").append(this.dpDiv))),this._inDialog=!1)},_tidyDialog:function(t){t.dpDiv.removeClass(this._dialogClass).off(".ui-datepicker-calendar")},_checkExternalClick:function(e){if(t.datepicker._curInst){var i=t(e.target),s=t.datepicker._getInst(i[0]);(i[0].id!==t.datepicker._mainDivId&&0===i.parents("#"+t.datepicker._mainDivId).length&&!i.hasClass(t.datepicker.markerClassName)&&!i.closest("."+t.datepicker._triggerClass).length&&t.datepicker._datepickerShowing&&(!t.datepicker._inDialog||!t.blockUI)||i.hasClass(t.datepicker.markerClassName)&&t.datepicker._curInst!==s)&&t.datepicker._hideDatepicker()}},_adjustDate:function(e,i,s){var n=t(e),o=this._getInst(n[0]);this._isDisabledDatepicker(n[0])||(this._adjustInstDate(o,i+("M"===s?this._get(o,"showCurrentAtPos"):0),s),this._updateDatepicker(o))},_gotoToday:function(e){var i,s=t(e),n=this._getInst(s[0]);this._get(n,"gotoCurrent")&&n.currentDay?(n.selectedDay=n.currentDay,n.drawMonth=n.selectedMonth=n.currentMonth,n.drawYear=n.selectedYear=n.currentYear):(i=new Date,n.selectedDay=i.getDate(),n.drawMonth=n.selectedMonth=i.getMonth(),n.drawYear=n.selectedYear=i.getFullYear()),this._notifyChange(n),this._adjustDate(s)},_selectMonthYear:function(e,i,s){var n=t(e),o=this._getInst(n[0]);o["selected"+("M"===s?"Month":"Year")]=o["draw"+("M"===s?"Month":"Year")]=parseInt(i.options[i.selectedIndex].value,10),this._notifyChange(o),this._adjustDate(n)},_selectDay:function(e,i,s,n){var o,a=t(e);t(n).hasClass(this._unselectableClass)||this._isDisabledDatepicker(a[0])||(o=this._getInst(a[0]),o.selectedDay=o.currentDay=t("a",n).html(),o.selectedMonth=o.currentMonth=i,o.selectedYear=o.currentYear=s,this._selectDate(e,this._formatDate(o,o.currentDay,o.currentMonth,o.currentYear)))},_clearDate:function(e){var i=t(e);this._selectDate(i,"")},_selectDate:function(e,i){var s,n=t(e),o=this._getInst(n[0]);i=null!=i?i:this._formatDate(o),o.input&&o.input.val(i),this._updateAlternate(o),s=this._get(o,"onSelect"),s?s.apply(o.input?o.input[0]:null,[i,o]):o.input&&o.input.trigger("change"),o.inline?this._updateDatepicker(o):(this._hideDatepicker(),this._lastInput=o.input[0],"object"!=typeof o.input[0]&&o.input.trigger("focus"),this._lastInput=null)},_updateAlternate:function(e){var i,s,n,o=this._get(e,"altField");o&&(i=this._get(e,"altFormat")||this._get(e,"dateFormat"),s=this._getDate(e),n=this.formatDate(i,s,this._getFormatConfig(e)),t(o).val(n))},noWeekends:function(t){var e=t.getDay();return[e>0&&6>e,""]},iso8601Week:function(t){var e,i=new Date(t.getTime());return i.setDate(i.getDate()+4-(i.getDay()||7)),e=i.getTime(),i.setMonth(0),i.setDate(1),Math.floor(Math.round((e-i)/864e5)/7)+1},parseDate:function(e,i,s){if(null==e||null==i)throw"Invalid arguments";if(i="object"==typeof i?""+i:i+"",""===i)return null;var n,o,a,r,h=0,l=(s?s.shortYearCutoff:null)||this._defaults.shortYearCutoff,c="string"!=typeof l?l:(new Date).getFullYear()%100+parseInt(l,10),u=(s?s.dayNamesShort:null)||this._defaults.dayNamesShort,d=(s?s.dayNames:null)||this._defaults.dayNames,p=(s?s.monthNamesShort:null)||this._defaults.monthNamesShort,f=(s?s.monthNames:null)||this._defaults.monthNames,g=-1,m=-1,_=-1,v=-1,b=!1,y=function(t){var i=e.length>n+1&&e.charAt(n+1)===t;return i&&n++,i},w=function(t){var e=y(t),s="@"===t?14:"!"===t?20:"y"===t&&e?4:"o"===t?3:2,n="y"===t?s:1,o=RegExp("^\\d{"+n+","+s+"}"),a=i.substring(h).match(o);if(!a)throw"Missing number at position "+h;return h+=a[0].length,parseInt(a[0],10)},k=function(e,s,n){var o=-1,a=t.map(y(e)?n:s,function(t,e){return[[e,t]]}).sort(function(t,e){return-(t[1].length-e[1].length)});if(t.each(a,function(t,e){var s=e[1];return i.substr(h,s.length).toLowerCase()===s.toLowerCase()?(o=e[0],h+=s.length,!1):void 0}),-1!==o)return o+1;throw"Unknown name at position "+h},x=function(){if(i.charAt(h)!==e.charAt(n))throw"Unexpected literal at position "+h;h++};for(n=0;e.length>n;n++)if(b)"'"!==e.charAt(n)||y("'")?x():b=!1;else switch(e.charAt(n)){case"d":_=w("d");break;case"D":k("D",u,d);break;case"o":v=w("o");break;case"m":m=w("m");break;case"M":m=k("M",p,f);break;case"y":g=w("y");break;case"@":r=new Date(w("@")),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"!":r=new Date((w("!")-this._ticksTo1970)/1e4),g=r.getFullYear(),m=r.getMonth()+1,_=r.getDate();break;case"'":y("'")?x():b=!0;break;default:x()}if(i.length>h&&(a=i.substr(h),!/^\s+/.test(a)))throw"Extra/unparsed characters found in date: "+a;if(-1===g?g=(new Date).getFullYear():100>g&&(g+=(new Date).getFullYear()-(new Date).getFullYear()%100+(c>=g?0:-100)),v>-1)for(m=1,_=v;;){if(o=this._getDaysInMonth(g,m-1),o>=_)break;m++,_-=o}if(r=this._daylightSavingAdjust(new Date(g,m-1,_)),r.getFullYear()!==g||r.getMonth()+1!==m||r.getDate()!==_)throw"Invalid date";return r},ATOM:"yy-mm-dd",COOKIE:"D, dd M yy",ISO_8601:"yy-mm-dd",RFC_822:"D, d M y",RFC_850:"DD, dd-M-y",RFC_1036:"D, d M y",RFC_1123:"D, d M yy",RFC_2822:"D, d M yy",RSS:"D, d M y",TICKS:"!",TIMESTAMP:"@",W3C:"yy-mm-dd",_ticksTo1970:1e7*60*60*24*(718685+Math.floor(492.5)-Math.floor(19.7)+Math.floor(4.925)),formatDate:function(t,e,i){if(!e)return"";var s,n=(i?i.dayNamesShort:null)||this._defaults.dayNamesShort,o=(i?i.dayNames:null)||this._defaults.dayNames,a=(i?i.monthNamesShort:null)||this._defaults.monthNamesShort,r=(i?i.monthNames:null)||this._defaults.monthNames,h=function(e){var i=t.length>s+1&&t.charAt(s+1)===e;return i&&s++,i},l=function(t,e,i){var s=""+e;if(h(t))for(;i>s.length;)s="0"+s;return s},c=function(t,e,i,s){return h(t)?s[e]:i[e]},u="",d=!1;if(e)for(s=0;t.length>s;s++)if(d)"'"!==t.charAt(s)||h("'")?u+=t.charAt(s):d=!1;else switch(t.charAt(s)){case"d":u+=l("d",e.getDate(),2);break;case"D":u+=c("D",e.getDay(),n,o);break;case"o":u+=l("o",Math.round((new Date(e.getFullYear(),e.getMonth(),e.getDate()).getTime()-new Date(e.getFullYear(),0,0).getTime())/864e5),3);break;case"m":u+=l("m",e.getMonth()+1,2);break;case"M":u+=c("M",e.getMonth(),a,r);break;case"y":u+=h("y")?e.getFullYear():(10>e.getFullYear()%100?"0":"")+e.getFullYear()%100;break;case"@":u+=e.getTime();break;case"!":u+=1e4*e.getTime()+this._ticksTo1970;break;case"'":h("'")?u+="'":d=!0;break;default:u+=t.charAt(s)}return u},_possibleChars:function(t){var e,i="",s=!1,n=function(i){var s=t.length>e+1&&t.charAt(e+1)===i;return s&&e++,s};for(e=0;t.length>e;e++)if(s)"'"!==t.charAt(e)||n("'")?i+=t.charAt(e):s=!1;else switch(t.charAt(e)){case"d":case"m":case"y":case"@":i+="0123456789";break;case"D":case"M":return null;case"'":n("'")?i+="'":s=!0;break;default:i+=t.charAt(e)}return i},_get:function(t,e){return void 0!==t.settings[e]?t.settings[e]:this._defaults[e]},_setDateFromField:function(t,e){if(t.input.val()!==t.lastVal){var i=this._get(t,"dateFormat"),s=t.lastVal=t.input?t.input.val():null,n=this._getDefaultDate(t),o=n,a=this._getFormatConfig(t);try{o=this.parseDate(i,s,a)||n}catch(r){s=e?"":s}t.selectedDay=o.getDate(),t.drawMonth=t.selectedMonth=o.getMonth(),t.drawYear=t.selectedYear=o.getFullYear(),t.currentDay=s?o.getDate():0,t.currentMonth=s?o.getMonth():0,t.currentYear=s?o.getFullYear():0,this._adjustInstDate(t)}},_getDefaultDate:function(t){return this._restrictMinMax(t,this._determineDate(t,this._get(t,"defaultDate"),new Date))},_determineDate:function(e,i,s){var n=function(t){var e=new Date;return e.setDate(e.getDate()+t),e},o=function(i){try{return t.datepicker.parseDate(t.datepicker._get(e,"dateFormat"),i,t.datepicker._getFormatConfig(e))}catch(s){}for(var n=(i.toLowerCase().match(/^c/)?t.datepicker._getDate(e):null)||new Date,o=n.getFullYear(),a=n.getMonth(),r=n.getDate(),h=/([+\-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g,l=h.exec(i);l;){switch(l[2]||"d"){case"d":case"D":r+=parseInt(l[1],10);break;case"w":case"W":r+=7*parseInt(l[1],10);break;case"m":case"M":a+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a));break;case"y":case"Y":o+=parseInt(l[1],10),r=Math.min(r,t.datepicker._getDaysInMonth(o,a))}l=h.exec(i)}return new Date(o,a,r)},a=null==i||""===i?s:"string"==typeof i?o(i):"number"==typeof i?isNaN(i)?s:n(i):new Date(i.getTime());return a=a&&"Invalid Date"==""+a?s:a,a&&(a.setHours(0),a.setMinutes(0),a.setSeconds(0),a.setMilliseconds(0)),this._daylightSavingAdjust(a)},_daylightSavingAdjust:function(t){return t?(t.setHours(t.getHours()>12?t.getHours()+2:0),t):null},_setDate:function(t,e,i){var s=!e,n=t.selectedMonth,o=t.selectedYear,a=this._restrictMinMax(t,this._determineDate(t,e,new Date));t.selectedDay=t.currentDay=a.getDate(),t.drawMonth=t.selectedMonth=t.currentMonth=a.getMonth(),t.drawYear=t.selectedYear=t.currentYear=a.getFullYear(),n===t.selectedMonth&&o===t.selectedYear||i||this._notifyChange(t),this._adjustInstDate(t),t.input&&t.input.val(s?"":this._formatDate(t))},_getDate:function(t){var e=!t.currentYear||t.input&&""===t.input.val()?null:this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return e},_attachHandlers:function(e){var i=this._get(e,"stepMonths"),s="#"+e.id.replace(/\\\\/g,"\\");e.dpDiv.find("[data-handler]").map(function(){var e={prev:function(){t.datepicker._adjustDate(s,-i,"M")},next:function(){t.datepicker._adjustDate(s,+i,"M")},hide:function(){t.datepicker._hideDatepicker()},today:function(){t.datepicker._gotoToday(s)},selectDay:function(){return t.datepicker._selectDay(s,+this.getAttribute("data-month"),+this.getAttribute("data-year"),this),!1},selectMonth:function(){return t.datepicker._selectMonthYear(s,this,"M"),!1},selectYear:function(){return t.datepicker._selectMonthYear(s,this,"Y"),!1}};t(this).on(this.getAttribute("data-event"),e[this.getAttribute("data-handler")])})},_generateHTML:function(t){var e,i,s,n,o,a,r,h,l,c,u,d,p,f,g,m,_,v,b,y,w,k,x,C,D,I,T,P,M,S,H,z,O,A,N,W,E,F,L,R=new Date,B=this._daylightSavingAdjust(new Date(R.getFullYear(),R.getMonth(),R.getDate())),Y=this._get(t,"isRTL"),j=this._get(t,"showButtonPanel"),q=this._get(t,"hideIfNoPrevNext"),K=this._get(t,"navigationAsDateFormat"),U=this._getNumberOfMonths(t),V=this._get(t,"showCurrentAtPos"),$=this._get(t,"stepMonths"),X=1!==U[0]||1!==U[1],G=this._daylightSavingAdjust(t.currentDay?new Date(t.currentYear,t.currentMonth,t.currentDay):new Date(9999,9,9)),Q=this._getMinMaxDate(t,"min"),J=this._getMinMaxDate(t,"max"),Z=t.drawMonth-V,te=t.drawYear;if(0>Z&&(Z+=12,te--),J)for(e=this._daylightSavingAdjust(new Date(J.getFullYear(),J.getMonth()-U[0]*U[1]+1,J.getDate())),e=Q&&Q>e?Q:e;this._daylightSavingAdjust(new Date(te,Z,1))>e;)Z--,0>Z&&(Z=11,te--);for(t.drawMonth=Z,t.drawYear=te,i=this._get(t,"prevText"),i=K?this.formatDate(i,this._daylightSavingAdjust(new Date(te,Z-$,1)),this._getFormatConfig(t)):i,s=this._canAdjustMonth(t,-1,te,Z)?""+i+"":q?"":""+i+"",n=this._get(t,"nextText"),n=K?this.formatDate(n,this._daylightSavingAdjust(new Date(te,Z+$,1)),this._getFormatConfig(t)):n,o=this._canAdjustMonth(t,1,te,Z)?""+n+"":q?"":""+n+"",a=this._get(t,"currentText"),r=this._get(t,"gotoCurrent")&&t.currentDay?G:B,a=K?this.formatDate(a,r,this._getFormatConfig(t)):a,h=t.inline?"":"",l=j?"
    "+(Y?h:"")+(this._isInRange(t,r)?"":"")+(Y?"":h)+"
    ":"",c=parseInt(this._get(t,"firstDay"),10),c=isNaN(c)?0:c,u=this._get(t,"showWeek"),d=this._get(t,"dayNames"),p=this._get(t,"dayNamesMin"),f=this._get(t,"monthNames"),g=this._get(t,"monthNamesShort"),m=this._get(t,"beforeShowDay"),_=this._get(t,"showOtherMonths"),v=this._get(t,"selectOtherMonths"),b=this._getDefaultDate(t),y="",k=0;U[0]>k;k++){for(x="",this.maxRows=4,C=0;U[1]>C;C++){if(D=this._daylightSavingAdjust(new Date(te,Z,t.selectedDay)),I=" ui-corner-all",T="",X){if(T+="
    "}for(T+="
    "+(/all|left/.test(I)&&0===k?Y?o:s:"")+(/all|right/.test(I)&&0===k?Y?s:o:"")+this._generateMonthYearHeader(t,Z,te,Q,J,k>0||C>0,f,g)+"
    "+"",P=u?"":"",w=0;7>w;w++)M=(w+c)%7,P+="";for(T+=P+"",S=this._getDaysInMonth(te,Z),te===t.selectedYear&&Z===t.selectedMonth&&(t.selectedDay=Math.min(t.selectedDay,S)),H=(this._getFirstDayOfMonth(te,Z)-c+7)%7,z=Math.ceil((H+S)/7),O=X?this.maxRows>z?this.maxRows:z:z,this.maxRows=O,A=this._daylightSavingAdjust(new Date(te,Z,1-H)),N=0;O>N;N++){for(T+="",W=u?"":"",w=0;7>w;w++)E=m?m.apply(t.input?t.input[0]:null,[A]):[!0,""],F=A.getMonth()!==Z,L=F&&!v||!E[0]||Q&&Q>A||J&&A>J,W+="",A.setDate(A.getDate()+1),A=this._daylightSavingAdjust(A);T+=W+""}Z++,Z>11&&(Z=0,te++),T+="
    "+this._get(t,"weekHeader")+"=5?" class='ui-datepicker-week-end'":"")+">"+""+p[M]+"
    "+this._get(t,"calculateWeek")(A)+""+(F&&!_?" ":L?""+A.getDate()+"":""+A.getDate()+"")+"
    "+(X?"
    "+(U[0]>0&&C===U[1]-1?"
    ":""):""),x+=T}y+=x}return y+=l,t._keyEvent=!1,y},_generateMonthYearHeader:function(t,e,i,s,n,o,a,r){var h,l,c,u,d,p,f,g,m=this._get(t,"changeMonth"),_=this._get(t,"changeYear"),v=this._get(t,"showMonthAfterYear"),b="
    ",y="";if(o||!m)y+=""+a[e]+"";else{for(h=s&&s.getFullYear()===i,l=n&&n.getFullYear()===i,y+=""}if(v||(b+=y+(!o&&m&&_?"":" ")),!t.yearshtml)if(t.yearshtml="",o||!_)b+=""+i+"";else{for(u=this._get(t,"yearRange").split(":"),d=(new Date).getFullYear(),p=function(t){var e=t.match(/c[+\-].*/)?i+parseInt(t.substring(1),10):t.match(/[+\-].*/)?d+parseInt(t,10):parseInt(t,10);return isNaN(e)?d:e},f=p(u[0]),g=Math.max(f,p(u[1]||"")),f=s?Math.max(f,s.getFullYear()):f,g=n?Math.min(g,n.getFullYear()):g,t.yearshtml+="",b+=t.yearshtml,t.yearshtml=null}return b+=this._get(t,"yearSuffix"),v&&(b+=(!o&&m&&_?"":" ")+y),b+="
    "},_adjustInstDate:function(t,e,i){var s=t.selectedYear+("Y"===i?e:0),n=t.selectedMonth+("M"===i?e:0),o=Math.min(t.selectedDay,this._getDaysInMonth(s,n))+("D"===i?e:0),a=this._restrictMinMax(t,this._daylightSavingAdjust(new Date(s,n,o)));t.selectedDay=a.getDate(),t.drawMonth=t.selectedMonth=a.getMonth(),t.drawYear=t.selectedYear=a.getFullYear(),("M"===i||"Y"===i)&&this._notifyChange(t)},_restrictMinMax:function(t,e){var i=this._getMinMaxDate(t,"min"),s=this._getMinMaxDate(t,"max"),n=i&&i>e?i:e;return s&&n>s?s:n},_notifyChange:function(t){var e=this._get(t,"onChangeMonthYear");e&&e.apply(t.input?t.input[0]:null,[t.selectedYear,t.selectedMonth+1,t])},_getNumberOfMonths:function(t){var e=this._get(t,"numberOfMonths");return null==e?[1,1]:"number"==typeof e?[1,e]:e},_getMinMaxDate:function(t,e){return this._determineDate(t,this._get(t,e+"Date"),null)},_getDaysInMonth:function(t,e){return 32-this._daylightSavingAdjust(new Date(t,e,32)).getDate()},_getFirstDayOfMonth:function(t,e){return new Date(t,e,1).getDay()},_canAdjustMonth:function(t,e,i,s){var n=this._getNumberOfMonths(t),o=this._daylightSavingAdjust(new Date(i,s+(0>e?e:n[0]*n[1]),1));return 0>e&&o.setDate(this._getDaysInMonth(o.getFullYear(),o.getMonth())),this._isInRange(t,o)},_isInRange:function(t,e){var i,s,n=this._getMinMaxDate(t,"min"),o=this._getMinMaxDate(t,"max"),a=null,r=null,h=this._get(t,"yearRange");return h&&(i=h.split(":"),s=(new Date).getFullYear(),a=parseInt(i[0],10),r=parseInt(i[1],10),i[0].match(/[+\-].*/)&&(a+=s),i[1].match(/[+\-].*/)&&(r+=s)),(!n||e.getTime()>=n.getTime())&&(!o||e.getTime()<=o.getTime())&&(!a||e.getFullYear()>=a)&&(!r||r>=e.getFullYear())},_getFormatConfig:function(t){var e=this._get(t,"shortYearCutoff");return e="string"!=typeof e?e:(new Date).getFullYear()%100+parseInt(e,10),{shortYearCutoff:e,dayNamesShort:this._get(t,"dayNamesShort"),dayNames:this._get(t,"dayNames"),monthNamesShort:this._get(t,"monthNamesShort"),monthNames:this._get(t,"monthNames")}},_formatDate:function(t,e,i,s){e||(t.currentDay=t.selectedDay,t.currentMonth=t.selectedMonth,t.currentYear=t.selectedYear);var n=e?"object"==typeof e?e:this._daylightSavingAdjust(new Date(s,i,e)):this._daylightSavingAdjust(new Date(t.currentYear,t.currentMonth,t.currentDay));return this.formatDate(this._get(t,"dateFormat"),n,this._getFormatConfig(t))}}),t.fn.datepicker=function(e){if(!this.length)return this;t.datepicker.initialized||(t(document).on("mousedown",t.datepicker._checkExternalClick),t.datepicker.initialized=!0),0===t("#"+t.datepicker._mainDivId).length&&t("body").append(t.datepicker.dpDiv);var i=Array.prototype.slice.call(arguments,1);return"string"!=typeof e||"isDisabled"!==e&&"getDate"!==e&&"widget"!==e?"option"===e&&2===arguments.length&&"string"==typeof arguments[1]?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i)):this.each(function(){"string"==typeof e?t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this].concat(i)):t.datepicker._attachDatepicker(this,e)}):t.datepicker["_"+e+"Datepicker"].apply(t.datepicker,[this[0]].concat(i))},t.datepicker=new s,t.datepicker.initialized=!1,t.datepicker.uuid=(new Date).getTime(),t.datepicker.version="1.12.1",t.datepicker,t.ui.ie=!!/msie [\w.]+/.exec(navigator.userAgent.toLowerCase());var _=!1;t(document).on("mouseup",function(){_=!1}),t.widget("ui.mouse",{version:"1.12.1",options:{cancel:"input, textarea, button, select, option",distance:1,delay:0},_mouseInit:function(){var e=this;this.element.on("mousedown."+this.widgetName,function(t){return e._mouseDown(t)}).on("click."+this.widgetName,function(i){return!0===t.data(i.target,e.widgetName+".preventClickEvent")?(t.removeData(i.target,e.widgetName+".preventClickEvent"),i.stopImmediatePropagation(),!1):void 0}),this.started=!1},_mouseDestroy:function(){this.element.off("."+this.widgetName),this._mouseMoveDelegate&&this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate)},_mouseDown:function(e){if(!_){this._mouseMoved=!1,this._mouseStarted&&this._mouseUp(e),this._mouseDownEvent=e;var i=this,s=1===e.which,n="string"==typeof this.options.cancel&&e.target.nodeName?t(e.target).closest(this.options.cancel).length:!1;return s&&!n&&this._mouseCapture(e)?(this.mouseDelayMet=!this.options.delay,this.mouseDelayMet||(this._mouseDelayTimer=setTimeout(function(){i.mouseDelayMet=!0},this.options.delay)),this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(e)!==!1,!this._mouseStarted)?(e.preventDefault(),!0):(!0===t.data(e.target,this.widgetName+".preventClickEvent")&&t.removeData(e.target,this.widgetName+".preventClickEvent"),this._mouseMoveDelegate=function(t){return i._mouseMove(t)},this._mouseUpDelegate=function(t){return i._mouseUp(t)},this.document.on("mousemove."+this.widgetName,this._mouseMoveDelegate).on("mouseup."+this.widgetName,this._mouseUpDelegate),e.preventDefault(),_=!0,!0)):!0}},_mouseMove:function(e){if(this._mouseMoved){if(t.ui.ie&&(!document.documentMode||9>document.documentMode)&&!e.button)return this._mouseUp(e);if(!e.which)if(e.originalEvent.altKey||e.originalEvent.ctrlKey||e.originalEvent.metaKey||e.originalEvent.shiftKey)this.ignoreMissingWhich=!0;else if(!this.ignoreMissingWhich)return this._mouseUp(e)}return(e.which||e.button)&&(this._mouseMoved=!0),this._mouseStarted?(this._mouseDrag(e),e.preventDefault()):(this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,e)!==!1,this._mouseStarted?this._mouseDrag(e):this._mouseUp(e)),!this._mouseStarted)},_mouseUp:function(e){this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,e.target===this._mouseDownEvent.target&&t.data(e.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(e)),this._mouseDelayTimer&&(clearTimeout(this._mouseDelayTimer),delete this._mouseDelayTimer),this.ignoreMissingWhich=!1,_=!1,e.preventDefault()},_mouseDistanceMet:function(t){return Math.max(Math.abs(this._mouseDownEvent.pageX-t.pageX),Math.abs(this._mouseDownEvent.pageY-t.pageY))>=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),t.ui.plugin={add:function(e,i,s){var n,o=t.ui[e].prototype;for(n in s)o.plugins[n]=o.plugins[n]||[],o.plugins[n].push([i,s[n]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;o.length>n;n++)t.options[o[n][0]]&&o[n][1].apply(t.element,i)}},t.ui.safeBlur=function(e){e&&"body"!==e.nodeName.toLowerCase()&&t(e).trigger("blur")},t.widget("ui.draggable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"drag",options:{addClasses:!0,appendTo:"parent",axis:!1,connectToSortable:!1,containment:!1,cursor:"auto",cursorAt:!1,grid:!1,handle:!1,helper:"original",iframeFix:!1,opacity:!1,refreshPositions:!1,revert:!1,revertDuration:500,scope:"default",scroll:!0,scrollSensitivity:20,scrollSpeed:20,snap:!1,snapMode:"both",snapTolerance:20,stack:!1,zIndex:!1,drag:null,start:null,stop:null},_create:function(){"original"===this.options.helper&&this._setPositionRelative(),this.options.addClasses&&this._addClass("ui-draggable"),this._setHandleClassName(),this._mouseInit()},_setOption:function(t,e){this._super(t,e),"handle"===t&&(this._removeHandleClassName(),this._setHandleClassName())},_destroy:function(){return(this.helper||this.element).is(".ui-draggable-dragging")?(this.destroyOnClear=!0,void 0):(this._removeHandleClassName(),this._mouseDestroy(),void 0)},_mouseCapture:function(e){var i=this.options;return this.helper||i.disabled||t(e.target).closest(".ui-resizable-handle").length>0?!1:(this.handle=this._getHandle(e),this.handle?(this._blurActiveElement(e),this._blockFrames(i.iframeFix===!0?"iframe":i.iframeFix),!0):!1)},_blockFrames:function(e){this.iframeBlocks=this.document.find(e).map(function(){var e=t(this);return t("
    ").css("position","absolute").appendTo(e.parent()).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_blurActiveElement:function(e){var i=t.ui.safeActiveElement(this.document[0]),s=t(e.target);s.closest(i).length||t.ui.safeBlur(i)},_mouseStart:function(e){var i=this.options;return this.helper=this._createHelper(e),this._addClass(this.helper,"ui-draggable-dragging"),this._cacheHelperProportions(),t.ui.ddmanager&&(t.ui.ddmanager.current=this),this._cacheMargins(),this.cssPosition=this.helper.css("position"),this.scrollParent=this.helper.scrollParent(!0),this.offsetParent=this.helper.offsetParent(),this.hasFixedAncestor=this.helper.parents().filter(function(){return"fixed"===t(this).css("position")}).length>0,this.positionAbs=this.element.offset(),this._refreshOffsets(e),this.originalPosition=this.position=this._generatePosition(e,!1),this.originalPageX=e.pageX,this.originalPageY=e.pageY,i.cursorAt&&this._adjustOffsetFromHelper(i.cursorAt),this._setContainment(),this._trigger("start",e)===!1?(this._clear(),!1):(this._cacheHelperProportions(),t.ui.ddmanager&&!i.dropBehaviour&&t.ui.ddmanager.prepareOffsets(this,e),this._mouseDrag(e,!0),t.ui.ddmanager&&t.ui.ddmanager.dragStart(this,e),!0)},_refreshOffsets:function(t){this.offset={top:this.positionAbs.top-this.margins.top,left:this.positionAbs.left-this.margins.left,scroll:!1,parent:this._getParentOffset(),relative:this._getRelativeOffset()},this.offset.click={left:t.pageX-this.offset.left,top:t.pageY-this.offset.top}},_mouseDrag:function(e,i){if(this.hasFixedAncestor&&(this.offset.parent=this._getParentOffset()),this.position=this._generatePosition(e,!0),this.positionAbs=this._convertPositionTo("absolute"),!i){var s=this._uiHash();if(this._trigger("drag",e,s)===!1)return this._mouseUp(new t.Event("mouseup",e)),!1;this.position=s.position}return this.helper[0].style.left=this.position.left+"px",this.helper[0].style.top=this.position.top+"px",t.ui.ddmanager&&t.ui.ddmanager.drag(this,e),!1},_mouseStop:function(e){var i=this,s=!1;return t.ui.ddmanager&&!this.options.dropBehaviour&&(s=t.ui.ddmanager.drop(this,e)),this.dropped&&(s=this.dropped,this.dropped=!1),"invalid"===this.options.revert&&!s||"valid"===this.options.revert&&s||this.options.revert===!0||t.isFunction(this.options.revert)&&this.options.revert.call(this.element,s)?t(this.helper).animate(this.originalPosition,parseInt(this.options.revertDuration,10),function(){i._trigger("stop",e)!==!1&&i._clear()}):this._trigger("stop",e)!==!1&&this._clear(),!1},_mouseUp:function(e){return this._unblockFrames(),t.ui.ddmanager&&t.ui.ddmanager.dragStop(this,e),this.handleElement.is(e.target)&&this.element.trigger("focus"),t.ui.mouse.prototype._mouseUp.call(this,e)},cancel:function(){return this.helper.is(".ui-draggable-dragging")?this._mouseUp(new t.Event("mouseup",{target:this.element[0]})):this._clear(),this},_getHandle:function(e){return this.options.handle?!!t(e.target).closest(this.element.find(this.options.handle)).length:!0},_setHandleClassName:function(){this.handleElement=this.options.handle?this.element.find(this.options.handle):this.element,this._addClass(this.handleElement,"ui-draggable-handle")},_removeHandleClassName:function(){this._removeClass(this.handleElement,"ui-draggable-handle")},_createHelper:function(e){var i=this.options,s=t.isFunction(i.helper),n=s?t(i.helper.apply(this.element[0],[e])):"clone"===i.helper?this.element.clone().removeAttr("id"):this.element;return n.parents("body").length||n.appendTo("parent"===i.appendTo?this.element[0].parentNode:i.appendTo),s&&n[0]===this.element[0]&&this._setPositionRelative(),n[0]===this.element[0]||/(fixed|absolute)/.test(n.css("position"))||n.css("position","absolute"),n},_setPositionRelative:function(){/^(?:r|a|f)/.test(this.element.css("position"))||(this.element[0].style.position="relative")},_adjustOffsetFromHelper:function(e){"string"==typeof e&&(e=e.split(" ")),t.isArray(e)&&(e={left:+e[0],top:+e[1]||0}),"left"in e&&(this.offset.click.left=e.left+this.margins.left),"right"in e&&(this.offset.click.left=this.helperProportions.width-e.right+this.margins.left),"top"in e&&(this.offset.click.top=e.top+this.margins.top),"bottom"in e&&(this.offset.click.top=this.helperProportions.height-e.bottom+this.margins.top)},_isRootNode:function(t){return/(html|body)/i.test(t.tagName)||t===this.document[0]},_getParentOffset:function(){var e=this.offsetParent.offset(),i=this.document[0];return"absolute"===this.cssPosition&&this.scrollParent[0]!==i&&t.contains(this.scrollParent[0],this.offsetParent[0])&&(e.left+=this.scrollParent.scrollLeft(),e.top+=this.scrollParent.scrollTop()),this._isRootNode(this.offsetParent[0])&&(e={top:0,left:0}),{top:e.top+(parseInt(this.offsetParent.css("borderTopWidth"),10)||0),left:e.left+(parseInt(this.offsetParent.css("borderLeftWidth"),10)||0)}},_getRelativeOffset:function(){if("relative"!==this.cssPosition)return{top:0,left:0};var t=this.element.position(),e=this._isRootNode(this.scrollParent[0]);return{top:t.top-(parseInt(this.helper.css("top"),10)||0)+(e?0:this.scrollParent.scrollTop()),left:t.left-(parseInt(this.helper.css("left"),10)||0)+(e?0:this.scrollParent.scrollLeft())} -},_cacheMargins:function(){this.margins={left:parseInt(this.element.css("marginLeft"),10)||0,top:parseInt(this.element.css("marginTop"),10)||0,right:parseInt(this.element.css("marginRight"),10)||0,bottom:parseInt(this.element.css("marginBottom"),10)||0}},_cacheHelperProportions:function(){this.helperProportions={width:this.helper.outerWidth(),height:this.helper.outerHeight()}},_setContainment:function(){var e,i,s,n=this.options,o=this.document[0];return this.relativeContainer=null,n.containment?"window"===n.containment?(this.containment=[t(window).scrollLeft()-this.offset.relative.left-this.offset.parent.left,t(window).scrollTop()-this.offset.relative.top-this.offset.parent.top,t(window).scrollLeft()+t(window).width()-this.helperProportions.width-this.margins.left,t(window).scrollTop()+(t(window).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):"document"===n.containment?(this.containment=[0,0,t(o).width()-this.helperProportions.width-this.margins.left,(t(o).height()||o.body.parentNode.scrollHeight)-this.helperProportions.height-this.margins.top],void 0):n.containment.constructor===Array?(this.containment=n.containment,void 0):("parent"===n.containment&&(n.containment=this.helper[0].parentNode),i=t(n.containment),s=i[0],s&&(e=/(scroll|auto)/.test(i.css("overflow")),this.containment=[(parseInt(i.css("borderLeftWidth"),10)||0)+(parseInt(i.css("paddingLeft"),10)||0),(parseInt(i.css("borderTopWidth"),10)||0)+(parseInt(i.css("paddingTop"),10)||0),(e?Math.max(s.scrollWidth,s.offsetWidth):s.offsetWidth)-(parseInt(i.css("borderRightWidth"),10)||0)-(parseInt(i.css("paddingRight"),10)||0)-this.helperProportions.width-this.margins.left-this.margins.right,(e?Math.max(s.scrollHeight,s.offsetHeight):s.offsetHeight)-(parseInt(i.css("borderBottomWidth"),10)||0)-(parseInt(i.css("paddingBottom"),10)||0)-this.helperProportions.height-this.margins.top-this.margins.bottom],this.relativeContainer=i),void 0):(this.containment=null,void 0)},_convertPositionTo:function(t,e){e||(e=this.position);var i="absolute"===t?1:-1,s=this._isRootNode(this.scrollParent[0]);return{top:e.top+this.offset.relative.top*i+this.offset.parent.top*i-("fixed"===this.cssPosition?-this.offset.scroll.top:s?0:this.offset.scroll.top)*i,left:e.left+this.offset.relative.left*i+this.offset.parent.left*i-("fixed"===this.cssPosition?-this.offset.scroll.left:s?0:this.offset.scroll.left)*i}},_generatePosition:function(t,e){var i,s,n,o,a=this.options,r=this._isRootNode(this.scrollParent[0]),h=t.pageX,l=t.pageY;return r&&this.offset.scroll||(this.offset.scroll={top:this.scrollParent.scrollTop(),left:this.scrollParent.scrollLeft()}),e&&(this.containment&&(this.relativeContainer?(s=this.relativeContainer.offset(),i=[this.containment[0]+s.left,this.containment[1]+s.top,this.containment[2]+s.left,this.containment[3]+s.top]):i=this.containment,t.pageX-this.offset.click.lefti[2]&&(h=i[2]+this.offset.click.left),t.pageY-this.offset.click.top>i[3]&&(l=i[3]+this.offset.click.top)),a.grid&&(n=a.grid[1]?this.originalPageY+Math.round((l-this.originalPageY)/a.grid[1])*a.grid[1]:this.originalPageY,l=i?n-this.offset.click.top>=i[1]||n-this.offset.click.top>i[3]?n:n-this.offset.click.top>=i[1]?n-a.grid[1]:n+a.grid[1]:n,o=a.grid[0]?this.originalPageX+Math.round((h-this.originalPageX)/a.grid[0])*a.grid[0]:this.originalPageX,h=i?o-this.offset.click.left>=i[0]||o-this.offset.click.left>i[2]?o:o-this.offset.click.left>=i[0]?o-a.grid[0]:o+a.grid[0]:o),"y"===a.axis&&(h=this.originalPageX),"x"===a.axis&&(l=this.originalPageY)),{top:l-this.offset.click.top-this.offset.relative.top-this.offset.parent.top+("fixed"===this.cssPosition?-this.offset.scroll.top:r?0:this.offset.scroll.top),left:h-this.offset.click.left-this.offset.relative.left-this.offset.parent.left+("fixed"===this.cssPosition?-this.offset.scroll.left:r?0:this.offset.scroll.left)}},_clear:function(){this._removeClass(this.helper,"ui-draggable-dragging"),this.helper[0]===this.element[0]||this.cancelHelperRemoval||this.helper.remove(),this.helper=null,this.cancelHelperRemoval=!1,this.destroyOnClear&&this.destroy()},_trigger:function(e,i,s){return s=s||this._uiHash(),t.ui.plugin.call(this,e,[i,s,this],!0),/^(drag|start|stop)/.test(e)&&(this.positionAbs=this._convertPositionTo("absolute"),s.offset=this.positionAbs),t.Widget.prototype._trigger.call(this,e,i,s)},plugins:{},_uiHash:function(){return{helper:this.helper,position:this.position,originalPosition:this.originalPosition,offset:this.positionAbs}}}),t.ui.plugin.add("draggable","connectToSortable",{start:function(e,i,s){var n=t.extend({},i,{item:s.element});s.sortables=[],t(s.options.connectToSortable).each(function(){var i=t(this).sortable("instance");i&&!i.options.disabled&&(s.sortables.push(i),i.refreshPositions(),i._trigger("activate",e,n))})},stop:function(e,i,s){var n=t.extend({},i,{item:s.element});s.cancelHelperRemoval=!1,t.each(s.sortables,function(){var t=this;t.isOver?(t.isOver=0,s.cancelHelperRemoval=!0,t.cancelHelperRemoval=!1,t._storedCSS={position:t.placeholder.css("position"),top:t.placeholder.css("top"),left:t.placeholder.css("left")},t._mouseStop(e),t.options.helper=t.options._helper):(t.cancelHelperRemoval=!0,t._trigger("deactivate",e,n))})},drag:function(e,i,s){t.each(s.sortables,function(){var n=!1,o=this;o.positionAbs=s.positionAbs,o.helperProportions=s.helperProportions,o.offset.click=s.offset.click,o._intersectsWith(o.containerCache)&&(n=!0,t.each(s.sortables,function(){return this.positionAbs=s.positionAbs,this.helperProportions=s.helperProportions,this.offset.click=s.offset.click,this!==o&&this._intersectsWith(this.containerCache)&&t.contains(o.element[0],this.element[0])&&(n=!1),n})),n?(o.isOver||(o.isOver=1,s._parent=i.helper.parent(),o.currentItem=i.helper.appendTo(o.element).data("ui-sortable-item",!0),o.options._helper=o.options.helper,o.options.helper=function(){return i.helper[0]},e.target=o.currentItem[0],o._mouseCapture(e,!0),o._mouseStart(e,!0,!0),o.offset.click.top=s.offset.click.top,o.offset.click.left=s.offset.click.left,o.offset.parent.left-=s.offset.parent.left-o.offset.parent.left,o.offset.parent.top-=s.offset.parent.top-o.offset.parent.top,s._trigger("toSortable",e),s.dropped=o.element,t.each(s.sortables,function(){this.refreshPositions()}),s.currentItem=s.element,o.fromOutside=s),o.currentItem&&(o._mouseDrag(e),i.position=o.position)):o.isOver&&(o.isOver=0,o.cancelHelperRemoval=!0,o.options._revert=o.options.revert,o.options.revert=!1,o._trigger("out",e,o._uiHash(o)),o._mouseStop(e,!0),o.options.revert=o.options._revert,o.options.helper=o.options._helper,o.placeholder&&o.placeholder.remove(),i.helper.appendTo(s._parent),s._refreshOffsets(e),i.position=s._generatePosition(e,!0),s._trigger("fromSortable",e),s.dropped=!1,t.each(s.sortables,function(){this.refreshPositions()}))})}}),t.ui.plugin.add("draggable","cursor",{start:function(e,i,s){var n=t("body"),o=s.options;n.css("cursor")&&(o._cursor=n.css("cursor")),n.css("cursor",o.cursor)},stop:function(e,i,s){var n=s.options;n._cursor&&t("body").css("cursor",n._cursor)}}),t.ui.plugin.add("draggable","opacity",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("opacity")&&(o._opacity=n.css("opacity")),n.css("opacity",o.opacity)},stop:function(e,i,s){var n=s.options;n._opacity&&t(i.helper).css("opacity",n._opacity)}}),t.ui.plugin.add("draggable","scroll",{start:function(t,e,i){i.scrollParentNotHidden||(i.scrollParentNotHidden=i.helper.scrollParent(!1)),i.scrollParentNotHidden[0]!==i.document[0]&&"HTML"!==i.scrollParentNotHidden[0].tagName&&(i.overflowOffset=i.scrollParentNotHidden.offset())},drag:function(e,i,s){var n=s.options,o=!1,a=s.scrollParentNotHidden[0],r=s.document[0];a!==r&&"HTML"!==a.tagName?(n.axis&&"x"===n.axis||(s.overflowOffset.top+a.offsetHeight-e.pageY=0;d--)h=s.snapElements[d].left-s.margins.left,l=h+s.snapElements[d].width,c=s.snapElements[d].top-s.margins.top,u=c+s.snapElements[d].height,h-g>_||m>l+g||c-g>b||v>u+g||!t.contains(s.snapElements[d].item.ownerDocument,s.snapElements[d].item)?(s.snapElements[d].snapping&&s.options.snap.release&&s.options.snap.release.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=!1):("inner"!==f.snapMode&&(n=g>=Math.abs(c-b),o=g>=Math.abs(u-v),a=g>=Math.abs(h-_),r=g>=Math.abs(l-m),n&&(i.position.top=s._convertPositionTo("relative",{top:c-s.helperProportions.height,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h-s.helperProportions.width}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l}).left)),p=n||o||a||r,"outer"!==f.snapMode&&(n=g>=Math.abs(c-v),o=g>=Math.abs(u-b),a=g>=Math.abs(h-m),r=g>=Math.abs(l-_),n&&(i.position.top=s._convertPositionTo("relative",{top:c,left:0}).top),o&&(i.position.top=s._convertPositionTo("relative",{top:u-s.helperProportions.height,left:0}).top),a&&(i.position.left=s._convertPositionTo("relative",{top:0,left:h}).left),r&&(i.position.left=s._convertPositionTo("relative",{top:0,left:l-s.helperProportions.width}).left)),!s.snapElements[d].snapping&&(n||o||a||r||p)&&s.options.snap.snap&&s.options.snap.snap.call(s.element,e,t.extend(s._uiHash(),{snapItem:s.snapElements[d].item})),s.snapElements[d].snapping=n||o||a||r||p)}}),t.ui.plugin.add("draggable","stack",{start:function(e,i,s){var n,o=s.options,a=t.makeArray(t(o.stack)).sort(function(e,i){return(parseInt(t(e).css("zIndex"),10)||0)-(parseInt(t(i).css("zIndex"),10)||0)});a.length&&(n=parseInt(t(a[0]).css("zIndex"),10)||0,t(a).each(function(e){t(this).css("zIndex",n+e)}),this.css("zIndex",n+a.length))}}),t.ui.plugin.add("draggable","zIndex",{start:function(e,i,s){var n=t(i.helper),o=s.options;n.css("zIndex")&&(o._zIndex=n.css("zIndex")),n.css("zIndex",o.zIndex)},stop:function(e,i,s){var n=s.options;n._zIndex&&t(i.helper).css("zIndex",n._zIndex)}}),t.ui.draggable,t.widget("ui.resizable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,classes:{"ui-resizable-se":"ui-icon ui-icon-gripsmall-diagonal-se"},containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:90,resize:null,start:null,stop:null},_num:function(t){return parseFloat(t)||0},_isNumber:function(t){return!isNaN(parseFloat(t))},_hasScroll:function(e,i){if("hidden"===t(e).css("overflow"))return!1;var s=i&&"left"===i?"scrollLeft":"scrollTop",n=!1;return e[s]>0?!0:(e[s]=1,n=e[s]>0,e[s]=0,n)},_create:function(){var e,i=this.options,s=this;this._addClass("ui-resizable"),t.extend(this,{_aspectRatio:!!i.aspectRatio,aspectRatio:i.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:i.helper||i.ghost||i.animate?i.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/^(canvas|textarea|input|select|button|img)$/i)&&(this.element.wrap(t("
    ").css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,e={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(e),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(e),this._proportionallyResize()),this._setupHandles(),i.autoHide&&t(this.element).on("mouseenter",function(){i.disabled||(s._removeClass("ui-resizable-autohide"),s._handles.show())}).on("mouseleave",function(){i.disabled||s.resizing||(s._addClass("ui-resizable-autohide"),s._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy();var e,i=function(e){t(e).removeData("resizable").removeData("ui-resizable").off(".resizable").find(".ui-resizable-handle").remove()};return this.elementIsWrapper&&(i(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),i(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;default:}},_setupHandles:function(){var e,i,s,n,o,a=this.options,r=this;if(this.handles=a.handles||(t(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=t(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),s=this.handles.split(","),this.handles={},i=0;s.length>i;i++)e=t.trim(s[i]),n="ui-resizable-"+e,o=t("
    "),this._addClass(o,"ui-resizable-handle "+n),o.css({zIndex:a.zIndex}),this.handles[e]=".ui-resizable-"+e,this.element.append(o);this._renderAxis=function(e){var i,s,n,o;e=e||this.element;for(i in this.handles)this.handles[i].constructor===String?this.handles[i]=this.element.children(this.handles[i]).first().show():(this.handles[i].jquery||this.handles[i].nodeType)&&(this.handles[i]=t(this.handles[i]),this._on(this.handles[i],{mousedown:r._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(s=t(this.handles[i],this.element),o=/sw|ne|nw|se|n|s/.test(i)?s.outerHeight():s.outerWidth(),n=["padding",/ne|nw|n/.test(i)?"Top":/se|sw|s/.test(i)?"Bottom":/^e$/.test(i)?"Right":"Left"].join(""),e.css(n,o),this._proportionallyResize()),this._handles=this._handles.add(this.handles[i])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){r.resizing||(this.className&&(o=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),r.axis=o&&o[1]?o[1]:"se")}),a.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._handles.remove()},_mouseCapture:function(e){var i,s,n=!1;for(i in this.handles)s=t(this.handles[i])[0],(s===e.target||t.contains(s,e.target))&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(e){var i,s,n,o=this.options,a=this.element;return this.resizing=!0,this._renderProxy(),i=this._num(this.helper.css("left")),s=this._num(this.helper.css("top")),o.containment&&(i+=t(o.containment).scrollLeft()||0,s+=t(o.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:i,top:s},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:a.width(),height:a.height()},this.originalSize=this._helper?{width:a.outerWidth(),height:a.outerHeight()}:{width:a.width(),height:a.height()},this.sizeDiff={width:a.outerWidth()-a.width(),height:a.outerHeight()-a.height()},this.originalPosition={left:i,top:s},this.originalMousePosition={left:e.pageX,top:e.pageY},this.aspectRatio="number"==typeof o.aspectRatio?o.aspectRatio:this.originalSize.width/this.originalSize.height||1,n=t(".ui-resizable-"+this.axis).css("cursor"),t("body").css("cursor","auto"===n?this.axis+"-resize":n),this._addClass("ui-resizable-resizing"),this._propagate("start",e),!0},_mouseDrag:function(e){var i,s,n=this.originalMousePosition,o=this.axis,a=e.pageX-n.left||0,r=e.pageY-n.top||0,h=this._change[o];return this._updatePrevProperties(),h?(i=h.apply(this,[e,a,r]),this._updateVirtualBoundaries(e.shiftKey),(this._aspectRatio||e.shiftKey)&&(i=this._updateRatio(i,e)),i=this._respectSize(i,e),this._updateCache(i),this._propagate("resize",e),s=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),t.isEmptyObject(s)||(this._updatePrevProperties(),this._trigger("resize",e,this.ui()),this._applyChanges()),!1):!1},_mouseStop:function(e){this.resizing=!1;var i,s,n,o,a,r,h,l=this.options,c=this;return this._helper&&(i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),n=s&&this._hasScroll(i[0],"left")?0:c.sizeDiff.height,o=s?0:c.sizeDiff.width,a={width:c.helper.width()-o,height:c.helper.height()-n},r=parseFloat(c.element.css("left"))+(c.position.left-c.originalPosition.left)||null,h=parseFloat(c.element.css("top"))+(c.position.top-c.originalPosition.top)||null,l.animate||this.element.css(t.extend(a,{top:h,left:r})),c.helper.height(c.size.height),c.helper.width(c.size.width),this._helper&&!l.animate&&this._proportionallyResize()),t("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",e),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s,n,o,a=this.options;o={minWidth:this._isNumber(a.minWidth)?a.minWidth:0,maxWidth:this._isNumber(a.maxWidth)?a.maxWidth:1/0,minHeight:this._isNumber(a.minHeight)?a.minHeight:0,maxHeight:this._isNumber(a.maxHeight)?a.maxHeight:1/0},(this._aspectRatio||t)&&(e=o.minHeight*this.aspectRatio,s=o.minWidth/this.aspectRatio,i=o.maxHeight*this.aspectRatio,n=o.maxWidth/this.aspectRatio,e>o.minWidth&&(o.minWidth=e),s>o.minHeight&&(o.minHeight=s),o.maxWidth>i&&(o.maxWidth=i),o.maxHeight>n&&(o.maxHeight=n)),this._vBoundaries=o},_updateCache:function(t){this.offset=this.helper.offset(),this._isNumber(t.left)&&(this.position.left=t.left),this._isNumber(t.top)&&(this.position.top=t.top),this._isNumber(t.height)&&(this.size.height=t.height),this._isNumber(t.width)&&(this.size.width=t.width)},_updateRatio:function(t){var e=this.position,i=this.size,s=this.axis;return this._isNumber(t.height)?t.width=t.height*this.aspectRatio:this._isNumber(t.width)&&(t.height=t.width/this.aspectRatio),"sw"===s&&(t.left=e.left+(i.width-t.width),t.top=null),"nw"===s&&(t.top=e.top+(i.height-t.height),t.left=e.left+(i.width-t.width)),t},_respectSize:function(t){var e=this._vBoundaries,i=this.axis,s=this._isNumber(t.width)&&e.maxWidth&&e.maxWidtht.width,a=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,r=this.originalPosition.left+this.originalSize.width,h=this.originalPosition.top+this.originalSize.height,l=/sw|nw|w/.test(i),c=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),a&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&l&&(t.left=r-e.minWidth),s&&l&&(t.left=r-e.maxWidth),a&&c&&(t.top=h-e.minHeight),n&&c&&(t.top=h-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];4>e;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;this._proportionallyResizeElements.length>e;e++)t=this._proportionallyResizeElements[e],this.outerDimensions||(this.outerDimensions=this._getPaddingPlusBorderDimensions(t)),t.css({height:i.height()-this.outerDimensions.height||0,width:i.width()-this.outerDimensions.width||0})},_renderProxy:function(){var e=this.element,i=this.options;this.elementOffset=e.offset(),this._helper?(this.helper=this.helper||t("
    "),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++i.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize,s=this.originalPosition;return{left:s.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize,n=this.originalPosition;return{top:n.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},sw:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[e,i,s]))},ne:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},nw:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[e,i,s]))}},_propagate:function(e,i){t.ui.plugin.call(this,e,[i,this.ui()]),"resize"!==e&&this._trigger(e,i,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),t.ui.plugin.add("resizable","animate",{stop:function(e){var i=t(this).resizable("instance"),s=i.options,n=i._proportionallyResizeElements,o=n.length&&/textarea/i.test(n[0].nodeName),a=o&&i._hasScroll(n[0],"left")?0:i.sizeDiff.height,r=o?0:i.sizeDiff.width,h={width:i.size.width-r,height:i.size.height-a},l=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left)||null,c=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(t.extend(h,c&&l?{top:c,left:l}:{}),{duration:s.animateDuration,easing:s.animateEasing,step:function(){var s={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};n&&n.length&&t(n[0]).css({width:s.width,height:s.height}),i._updateCache(s),i._propagate("resize",e)}})}}),t.ui.plugin.add("resizable","containment",{start:function(){var e,i,s,n,o,a,r,h=t(this).resizable("instance"),l=h.options,c=h.element,u=l.containment,d=u instanceof t?u.get(0):/parent/.test(u)?c.parent().get(0):u;d&&(h.containerElement=t(d),/document/.test(u)||u===document?(h.containerOffset={left:0,top:0},h.containerPosition={left:0,top:0},h.parentData={element:t(document),left:0,top:0,width:t(document).width(),height:t(document).height()||document.body.parentNode.scrollHeight}):(e=t(d),i=[],t(["Top","Right","Left","Bottom"]).each(function(t,s){i[t]=h._num(e.css("padding"+s))}),h.containerOffset=e.offset(),h.containerPosition=e.position(),h.containerSize={height:e.innerHeight()-i[3],width:e.innerWidth()-i[1]},s=h.containerOffset,n=h.containerSize.height,o=h.containerSize.width,a=h._hasScroll(d,"left")?d.scrollWidth:o,r=h._hasScroll(d)?d.scrollHeight:n,h.parentData={element:d,left:s.left,top:s.top,width:a,height:r}))},resize:function(e){var i,s,n,o,a=t(this).resizable("instance"),r=a.options,h=a.containerOffset,l=a.position,c=a._aspectRatio||e.shiftKey,u={top:0,left:0},d=a.containerElement,p=!0;d[0]!==document&&/static/.test(d.css("position"))&&(u=h),l.left<(a._helper?h.left:0)&&(a.size.width=a.size.width+(a._helper?a.position.left-h.left:a.position.left-u.left),c&&(a.size.height=a.size.width/a.aspectRatio,p=!1),a.position.left=r.helper?h.left:0),l.top<(a._helper?h.top:0)&&(a.size.height=a.size.height+(a._helper?a.position.top-h.top:a.position.top),c&&(a.size.width=a.size.height*a.aspectRatio,p=!1),a.position.top=a._helper?h.top:0),n=a.containerElement.get(0)===a.element.parent().get(0),o=/relative|absolute/.test(a.containerElement.css("position")),n&&o?(a.offset.left=a.parentData.left+a.position.left,a.offset.top=a.parentData.top+a.position.top):(a.offset.left=a.element.offset().left,a.offset.top=a.element.offset().top),i=Math.abs(a.sizeDiff.width+(a._helper?a.offset.left-u.left:a.offset.left-h.left)),s=Math.abs(a.sizeDiff.height+(a._helper?a.offset.top-u.top:a.offset.top-h.top)),i+a.size.width>=a.parentData.width&&(a.size.width=a.parentData.width-i,c&&(a.size.height=a.size.width/a.aspectRatio,p=!1)),s+a.size.height>=a.parentData.height&&(a.size.height=a.parentData.height-s,c&&(a.size.width=a.size.height*a.aspectRatio,p=!1)),p||(a.position.left=a.prevPosition.left,a.position.top=a.prevPosition.top,a.size.width=a.prevSize.width,a.size.height=a.prevSize.height)},stop:function(){var e=t(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.containerPosition,o=e.containerElement,a=t(e.helper),r=a.offset(),h=a.outerWidth()-e.sizeDiff.width,l=a.outerHeight()-e.sizeDiff.height;e._helper&&!i.animate&&/relative/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l}),e._helper&&!i.animate&&/static/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l})}}),t.ui.plugin.add("resizable","alsoResize",{start:function(){var e=t(this).resizable("instance"),i=e.options;t(i.alsoResize).each(function(){var e=t(this);e.data("ui-resizable-alsoresize",{width:parseFloat(e.width()),height:parseFloat(e.height()),left:parseFloat(e.css("left")),top:parseFloat(e.css("top"))})})},resize:function(e,i){var s=t(this).resizable("instance"),n=s.options,o=s.originalSize,a=s.originalPosition,r={height:s.size.height-o.height||0,width:s.size.width-o.width||0,top:s.position.top-a.top||0,left:s.position.left-a.left||0};t(n.alsoResize).each(function(){var e=t(this),s=t(this).data("ui-resizable-alsoresize"),n={},o=e.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];t.each(o,function(t,e){var i=(s[e]||0)+(r[e]||0);i&&i>=0&&(n[e]=i||null)}),e.css(n)})},stop:function(){t(this).removeData("ui-resizable-alsoresize")}}),t.ui.plugin.add("resizable","ghost",{start:function(){var e=t(this).resizable("instance"),i=e.size;e.ghost=e.originalElement.clone(),e.ghost.css({opacity:.25,display:"block",position:"relative",height:i.height,width:i.width,margin:0,left:0,top:0}),e._addClass(e.ghost,"ui-resizable-ghost"),t.uiBackCompat!==!1&&"string"==typeof e.options.ghost&&e.ghost.addClass(this.options.ghost),e.ghost.appendTo(e.helper)},resize:function(){var e=t(this).resizable("instance");e.ghost&&e.ghost.css({position:"relative",height:e.size.height,width:e.size.width})},stop:function(){var e=t(this).resizable("instance");e.ghost&&e.helper&&e.helper.get(0).removeChild(e.ghost.get(0))}}),t.ui.plugin.add("resizable","grid",{resize:function(){var e,i=t(this).resizable("instance"),s=i.options,n=i.size,o=i.originalSize,a=i.originalPosition,r=i.axis,h="number"==typeof s.grid?[s.grid,s.grid]:s.grid,l=h[0]||1,c=h[1]||1,u=Math.round((n.width-o.width)/l)*l,d=Math.round((n.height-o.height)/c)*c,p=o.width+u,f=o.height+d,g=s.maxWidth&&p>s.maxWidth,m=s.maxHeight&&f>s.maxHeight,_=s.minWidth&&s.minWidth>p,v=s.minHeight&&s.minHeight>f;s.grid=h,_&&(p+=l),v&&(f+=c),g&&(p-=l),m&&(f-=c),/^(se|s|e)$/.test(r)?(i.size.width=p,i.size.height=f):/^(ne)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.top=a.top-d):/^(sw)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.left=a.left-u):((0>=f-c||0>=p-l)&&(e=i._getPaddingPlusBorderDimensions(this)),f-c>0?(i.size.height=f,i.position.top=a.top-d):(f=c-e.height,i.size.height=f,i.position.top=a.top+o.height-f),p-l>0?(i.size.width=p,i.position.left=a.left-u):(p=l-e.width,i.size.width=p,i.position.left=a.left+o.width-p))}}),t.ui.resizable,t.widget("ui.dialog",{version:"1.12.1",options:{appendTo:"body",autoOpen:!0,buttons:[],classes:{"ui-dialog":"ui-corner-all","ui-dialog-titlebar":"ui-corner-all"},closeOnEscape:!0,closeText:"Close",draggable:!0,hide:null,height:"auto",maxHeight:null,maxWidth:null,minHeight:150,minWidth:150,modal:!1,position:{my:"center",at:"center",of:window,collision:"fit",using:function(e){var i=t(this).css(e).offset().top;0>i&&t(this).css("top",e.top-i)}},resizable:!0,show:null,title:null,width:300,beforeClose:null,close:null,drag:null,dragStart:null,dragStop:null,focus:null,open:null,resize:null,resizeStart:null,resizeStop:null},sizeRelatedOptions:{buttons:!0,height:!0,maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0,width:!0},resizableRelatedOptions:{maxHeight:!0,maxWidth:!0,minHeight:!0,minWidth:!0},_create:function(){this.originalCss={display:this.element[0].style.display,width:this.element[0].style.width,minHeight:this.element[0].style.minHeight,maxHeight:this.element[0].style.maxHeight,height:this.element[0].style.height},this.originalPosition={parent:this.element.parent(),index:this.element.parent().children().index(this.element)},this.originalTitle=this.element.attr("title"),null==this.options.title&&null!=this.originalTitle&&(this.options.title=this.originalTitle),this.options.disabled&&(this.options.disabled=!1),this._createWrapper(),this.element.show().removeAttr("title").appendTo(this.uiDialog),this._addClass("ui-dialog-content","ui-widget-content"),this._createTitlebar(),this._createButtonPane(),this.options.draggable&&t.fn.draggable&&this._makeDraggable(),this.options.resizable&&t.fn.resizable&&this._makeResizable(),this._isOpen=!1,this._trackFocus()},_init:function(){this.options.autoOpen&&this.open()},_appendTo:function(){var e=this.options.appendTo;return e&&(e.jquery||e.nodeType)?t(e):this.document.find(e||"body").eq(0)},_destroy:function(){var t,e=this.originalPosition;this._untrackInstance(),this._destroyOverlay(),this.element.removeUniqueId().css(this.originalCss).detach(),this.uiDialog.remove(),this.originalTitle&&this.element.attr("title",this.originalTitle),t=e.parent.children().eq(e.index),t.length&&t[0]!==this.element[0]?t.before(this.element):e.parent.append(this.element)},widget:function(){return this.uiDialog -},disable:t.noop,enable:t.noop,close:function(e){var i=this;this._isOpen&&this._trigger("beforeClose",e)!==!1&&(this._isOpen=!1,this._focusedElement=null,this._destroyOverlay(),this._untrackInstance(),this.opener.filter(":focusable").trigger("focus").length||t.ui.safeBlur(t.ui.safeActiveElement(this.document[0])),this._hide(this.uiDialog,this.options.hide,function(){i._trigger("close",e)}))},isOpen:function(){return this._isOpen},moveToTop:function(){this._moveToTop()},_moveToTop:function(e,i){var s=!1,n=this.uiDialog.siblings(".ui-front:visible").map(function(){return+t(this).css("z-index")}).get(),o=Math.max.apply(null,n);return o>=+this.uiDialog.css("z-index")&&(this.uiDialog.css("z-index",o+1),s=!0),s&&!i&&this._trigger("focus",e),s},open:function(){var e=this;return this._isOpen?(this._moveToTop()&&this._focusTabbable(),void 0):(this._isOpen=!0,this.opener=t(t.ui.safeActiveElement(this.document[0])),this._size(),this._position(),this._createOverlay(),this._moveToTop(null,!0),this.overlay&&this.overlay.css("z-index",this.uiDialog.css("z-index")-1),this._show(this.uiDialog,this.options.show,function(){e._focusTabbable(),e._trigger("focus")}),this._makeFocusTarget(),this._trigger("open"),void 0)},_focusTabbable:function(){var t=this._focusedElement;t||(t=this.element.find("[autofocus]")),t.length||(t=this.element.find(":tabbable")),t.length||(t=this.uiDialogButtonPane.find(":tabbable")),t.length||(t=this.uiDialogTitlebarClose.filter(":tabbable")),t.length||(t=this.uiDialog),t.eq(0).trigger("focus")},_keepFocus:function(e){function i(){var e=t.ui.safeActiveElement(this.document[0]),i=this.uiDialog[0]===e||t.contains(this.uiDialog[0],e);i||this._focusTabbable()}e.preventDefault(),i.call(this),this._delay(i)},_createWrapper:function(){this.uiDialog=t("
    ").hide().attr({tabIndex:-1,role:"dialog"}).appendTo(this._appendTo()),this._addClass(this.uiDialog,"ui-dialog","ui-widget ui-widget-content ui-front"),this._on(this.uiDialog,{keydown:function(e){if(this.options.closeOnEscape&&!e.isDefaultPrevented()&&e.keyCode&&e.keyCode===t.ui.keyCode.ESCAPE)return e.preventDefault(),this.close(e),void 0;if(e.keyCode===t.ui.keyCode.TAB&&!e.isDefaultPrevented()){var i=this.uiDialog.find(":tabbable"),s=i.filter(":first"),n=i.filter(":last");e.target!==n[0]&&e.target!==this.uiDialog[0]||e.shiftKey?e.target!==s[0]&&e.target!==this.uiDialog[0]||!e.shiftKey||(this._delay(function(){n.trigger("focus")}),e.preventDefault()):(this._delay(function(){s.trigger("focus")}),e.preventDefault())}},mousedown:function(t){this._moveToTop(t)&&this._focusTabbable()}}),this.element.find("[aria-describedby]").length||this.uiDialog.attr({"aria-describedby":this.element.uniqueId().attr("id")})},_createTitlebar:function(){var e;this.uiDialogTitlebar=t("
    "),this._addClass(this.uiDialogTitlebar,"ui-dialog-titlebar","ui-widget-header ui-helper-clearfix"),this._on(this.uiDialogTitlebar,{mousedown:function(e){t(e.target).closest(".ui-dialog-titlebar-close")||this.uiDialog.trigger("focus")}}),this.uiDialogTitlebarClose=t("").button({label:t("").text(this.options.closeText).html(),icon:"ui-icon-closethick",showLabel:!1}).appendTo(this.uiDialogTitlebar),this._addClass(this.uiDialogTitlebarClose,"ui-dialog-titlebar-close"),this._on(this.uiDialogTitlebarClose,{click:function(t){t.preventDefault(),this.close(t)}}),e=t("").uniqueId().prependTo(this.uiDialogTitlebar),this._addClass(e,"ui-dialog-title"),this._title(e),this.uiDialogTitlebar.prependTo(this.uiDialog),this.uiDialog.attr({"aria-labelledby":e.attr("id")})},_title:function(t){this.options.title?t.text(this.options.title):t.html(" ")},_createButtonPane:function(){this.uiDialogButtonPane=t("
    "),this._addClass(this.uiDialogButtonPane,"ui-dialog-buttonpane","ui-widget-content ui-helper-clearfix"),this.uiButtonSet=t("
    ").appendTo(this.uiDialogButtonPane),this._addClass(this.uiButtonSet,"ui-dialog-buttonset"),this._createButtons()},_createButtons:function(){var e=this,i=this.options.buttons;return this.uiDialogButtonPane.remove(),this.uiButtonSet.empty(),t.isEmptyObject(i)||t.isArray(i)&&!i.length?(this._removeClass(this.uiDialog,"ui-dialog-buttons"),void 0):(t.each(i,function(i,s){var n,o;s=t.isFunction(s)?{click:s,text:i}:s,s=t.extend({type:"button"},s),n=s.click,o={icon:s.icon,iconPosition:s.iconPosition,showLabel:s.showLabel,icons:s.icons,text:s.text},delete s.click,delete s.icon,delete s.iconPosition,delete s.showLabel,delete s.icons,"boolean"==typeof s.text&&delete s.text,t("",s).button(o).appendTo(e.uiButtonSet).on("click",function(){n.apply(e.element[0],arguments)})}),this._addClass(this.uiDialog,"ui-dialog-buttons"),this.uiDialogButtonPane.appendTo(this.uiDialog),void 0)},_makeDraggable:function(){function e(t){return{position:t.position,offset:t.offset}}var i=this,s=this.options;this.uiDialog.draggable({cancel:".ui-dialog-content, .ui-dialog-titlebar-close",handle:".ui-dialog-titlebar",containment:"document",start:function(s,n){i._addClass(t(this),"ui-dialog-dragging"),i._blockFrames(),i._trigger("dragStart",s,e(n))},drag:function(t,s){i._trigger("drag",t,e(s))},stop:function(n,o){var a=o.offset.left-i.document.scrollLeft(),r=o.offset.top-i.document.scrollTop();s.position={my:"left top",at:"left"+(a>=0?"+":"")+a+" "+"top"+(r>=0?"+":"")+r,of:i.window},i._removeClass(t(this),"ui-dialog-dragging"),i._unblockFrames(),i._trigger("dragStop",n,e(o))}})},_makeResizable:function(){function e(t){return{originalPosition:t.originalPosition,originalSize:t.originalSize,position:t.position,size:t.size}}var i=this,s=this.options,n=s.resizable,o=this.uiDialog.css("position"),a="string"==typeof n?n:"n,e,s,w,se,sw,ne,nw";this.uiDialog.resizable({cancel:".ui-dialog-content",containment:"document",alsoResize:this.element,maxWidth:s.maxWidth,maxHeight:s.maxHeight,minWidth:s.minWidth,minHeight:this._minHeight(),handles:a,start:function(s,n){i._addClass(t(this),"ui-dialog-resizing"),i._blockFrames(),i._trigger("resizeStart",s,e(n))},resize:function(t,s){i._trigger("resize",t,e(s))},stop:function(n,o){var a=i.uiDialog.offset(),r=a.left-i.document.scrollLeft(),h=a.top-i.document.scrollTop();s.height=i.uiDialog.height(),s.width=i.uiDialog.width(),s.position={my:"left top",at:"left"+(r>=0?"+":"")+r+" "+"top"+(h>=0?"+":"")+h,of:i.window},i._removeClass(t(this),"ui-dialog-resizing"),i._unblockFrames(),i._trigger("resizeStop",n,e(o))}}).css("position",o)},_trackFocus:function(){this._on(this.widget(),{focusin:function(e){this._makeFocusTarget(),this._focusedElement=t(e.target)}})},_makeFocusTarget:function(){this._untrackInstance(),this._trackingInstances().unshift(this)},_untrackInstance:function(){var e=this._trackingInstances(),i=t.inArray(this,e);-1!==i&&e.splice(i,1)},_trackingInstances:function(){var t=this.document.data("ui-dialog-instances");return t||(t=[],this.document.data("ui-dialog-instances",t)),t},_minHeight:function(){var t=this.options;return"auto"===t.height?t.minHeight:Math.min(t.minHeight,t.height)},_position:function(){var t=this.uiDialog.is(":visible");t||this.uiDialog.show(),this.uiDialog.position(this.options.position),t||this.uiDialog.hide()},_setOptions:function(e){var i=this,s=!1,n={};t.each(e,function(t,e){i._setOption(t,e),t in i.sizeRelatedOptions&&(s=!0),t in i.resizableRelatedOptions&&(n[t]=e)}),s&&(this._size(),this._position()),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option",n)},_setOption:function(e,i){var s,n,o=this.uiDialog;"disabled"!==e&&(this._super(e,i),"appendTo"===e&&this.uiDialog.appendTo(this._appendTo()),"buttons"===e&&this._createButtons(),"closeText"===e&&this.uiDialogTitlebarClose.button({label:t("").text(""+this.options.closeText).html()}),"draggable"===e&&(s=o.is(":data(ui-draggable)"),s&&!i&&o.draggable("destroy"),!s&&i&&this._makeDraggable()),"position"===e&&this._position(),"resizable"===e&&(n=o.is(":data(ui-resizable)"),n&&!i&&o.resizable("destroy"),n&&"string"==typeof i&&o.resizable("option","handles",i),n||i===!1||this._makeResizable()),"title"===e&&this._title(this.uiDialogTitlebar.find(".ui-dialog-title")))},_size:function(){var t,e,i,s=this.options;this.element.show().css({width:"auto",minHeight:0,maxHeight:"none",height:0}),s.minWidth>s.width&&(s.width=s.minWidth),t=this.uiDialog.css({height:"auto",width:s.width}).outerHeight(),e=Math.max(0,s.minHeight-t),i="number"==typeof s.maxHeight?Math.max(0,s.maxHeight-t):"none","auto"===s.height?this.element.css({minHeight:e,maxHeight:i,height:"auto"}):this.element.height(Math.max(0,s.height-t)),this.uiDialog.is(":data(ui-resizable)")&&this.uiDialog.resizable("option","minHeight",this._minHeight())},_blockFrames:function(){this.iframeBlocks=this.document.find("iframe").map(function(){var e=t(this);return t("
    ").css({position:"absolute",width:e.outerWidth(),height:e.outerHeight()}).appendTo(e.parent()).offset(e.offset())[0]})},_unblockFrames:function(){this.iframeBlocks&&(this.iframeBlocks.remove(),delete this.iframeBlocks)},_allowInteraction:function(e){return t(e.target).closest(".ui-dialog").length?!0:!!t(e.target).closest(".ui-datepicker").length},_createOverlay:function(){if(this.options.modal){var e=!0;this._delay(function(){e=!1}),this.document.data("ui-dialog-overlays")||this._on(this.document,{focusin:function(t){e||this._allowInteraction(t)||(t.preventDefault(),this._trackingInstances()[0]._focusTabbable())}}),this.overlay=t("
    ").appendTo(this._appendTo()),this._addClass(this.overlay,null,"ui-widget-overlay ui-front"),this._on(this.overlay,{mousedown:"_keepFocus"}),this.document.data("ui-dialog-overlays",(this.document.data("ui-dialog-overlays")||0)+1)}},_destroyOverlay:function(){if(this.options.modal&&this.overlay){var t=this.document.data("ui-dialog-overlays")-1;t?this.document.data("ui-dialog-overlays",t):(this._off(this.document,"focusin"),this.document.removeData("ui-dialog-overlays")),this.overlay.remove(),this.overlay=null}}}),t.uiBackCompat!==!1&&t.widget("ui.dialog",t.ui.dialog,{options:{dialogClass:""},_createWrapper:function(){this._super(),this.uiDialog.addClass(this.options.dialogClass)},_setOption:function(t,e){"dialogClass"===t&&this.uiDialog.removeClass(this.options.dialogClass).addClass(e),this._superApply(arguments)}}),t.ui.dialog,t.widget("ui.droppable",{version:"1.12.1",widgetEventPrefix:"drop",options:{accept:"*",addClasses:!0,greedy:!1,scope:"default",tolerance:"intersect",activate:null,deactivate:null,drop:null,out:null,over:null},_create:function(){var e,i=this.options,s=i.accept;this.isover=!1,this.isout=!0,this.accept=t.isFunction(s)?s:function(t){return t.is(s)},this.proportions=function(){return arguments.length?(e=arguments[0],void 0):e?e:e={width:this.element[0].offsetWidth,height:this.element[0].offsetHeight}},this._addToManager(i.scope),i.addClasses&&this._addClass("ui-droppable")},_addToManager:function(e){t.ui.ddmanager.droppables[e]=t.ui.ddmanager.droppables[e]||[],t.ui.ddmanager.droppables[e].push(this)},_splice:function(t){for(var e=0;t.length>e;e++)t[e]===this&&t.splice(e,1)},_destroy:function(){var e=t.ui.ddmanager.droppables[this.options.scope];this._splice(e)},_setOption:function(e,i){if("accept"===e)this.accept=t.isFunction(i)?i:function(t){return t.is(i)};else if("scope"===e){var s=t.ui.ddmanager.droppables[this.options.scope];this._splice(s),this._addToManager(i)}this._super(e,i)},_activate:function(e){var i=t.ui.ddmanager.current;this._addActiveClass(),i&&this._trigger("activate",e,this.ui(i))},_deactivate:function(e){var i=t.ui.ddmanager.current;this._removeActiveClass(),i&&this._trigger("deactivate",e,this.ui(i))},_over:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._addHoverClass(),this._trigger("over",e,this.ui(i)))},_out:function(e){var i=t.ui.ddmanager.current;i&&(i.currentItem||i.element)[0]!==this.element[0]&&this.accept.call(this.element[0],i.currentItem||i.element)&&(this._removeHoverClass(),this._trigger("out",e,this.ui(i)))},_drop:function(e,i){var s=i||t.ui.ddmanager.current,n=!1;return s&&(s.currentItem||s.element)[0]!==this.element[0]?(this.element.find(":data(ui-droppable)").not(".ui-draggable-dragging").each(function(){var i=t(this).droppable("instance");return i.options.greedy&&!i.options.disabled&&i.options.scope===s.options.scope&&i.accept.call(i.element[0],s.currentItem||s.element)&&v(s,t.extend(i,{offset:i.element.offset()}),i.options.tolerance,e)?(n=!0,!1):void 0}),n?!1:this.accept.call(this.element[0],s.currentItem||s.element)?(this._removeActiveClass(),this._removeHoverClass(),this._trigger("drop",e,this.ui(s)),this.element):!1):!1},ui:function(t){return{draggable:t.currentItem||t.element,helper:t.helper,position:t.position,offset:t.positionAbs}},_addHoverClass:function(){this._addClass("ui-droppable-hover")},_removeHoverClass:function(){this._removeClass("ui-droppable-hover")},_addActiveClass:function(){this._addClass("ui-droppable-active")},_removeActiveClass:function(){this._removeClass("ui-droppable-active")}});var v=t.ui.intersect=function(){function t(t,e,i){return t>=e&&e+i>t}return function(e,i,s,n){if(!i.offset)return!1;var o=(e.positionAbs||e.position.absolute).left+e.margins.left,a=(e.positionAbs||e.position.absolute).top+e.margins.top,r=o+e.helperProportions.width,h=a+e.helperProportions.height,l=i.offset.left,c=i.offset.top,u=l+i.proportions().width,d=c+i.proportions().height;switch(s){case"fit":return o>=l&&u>=r&&a>=c&&d>=h;case"intersect":return o+e.helperProportions.width/2>l&&u>r-e.helperProportions.width/2&&a+e.helperProportions.height/2>c&&d>h-e.helperProportions.height/2;case"pointer":return t(n.pageY,c,i.proportions().height)&&t(n.pageX,l,i.proportions().width);case"touch":return(a>=c&&d>=a||h>=c&&d>=h||c>a&&h>d)&&(o>=l&&u>=o||r>=l&&u>=r||l>o&&r>u);default:return!1}}}();t.ui.ddmanager={current:null,droppables:{"default":[]},prepareOffsets:function(e,i){var s,n,o=t.ui.ddmanager.droppables[e.options.scope]||[],a=i?i.type:null,r=(e.currentItem||e.element).find(":data(ui-droppable)").addBack();t:for(s=0;o.length>s;s++)if(!(o[s].options.disabled||e&&!o[s].accept.call(o[s].element[0],e.currentItem||e.element))){for(n=0;r.length>n;n++)if(r[n]===o[s].element[0]){o[s].proportions().height=0;continue t}o[s].visible="none"!==o[s].element.css("display"),o[s].visible&&("mousedown"===a&&o[s]._activate.call(o[s],i),o[s].offset=o[s].element.offset(),o[s].proportions({width:o[s].element[0].offsetWidth,height:o[s].element[0].offsetHeight}))}},drop:function(e,i){var s=!1;return t.each((t.ui.ddmanager.droppables[e.options.scope]||[]).slice(),function(){this.options&&(!this.options.disabled&&this.visible&&v(e,this,this.options.tolerance,i)&&(s=this._drop.call(this,i)||s),!this.options.disabled&&this.visible&&this.accept.call(this.element[0],e.currentItem||e.element)&&(this.isout=!0,this.isover=!1,this._deactivate.call(this,i)))}),s},dragStart:function(e,i){e.element.parentsUntil("body").on("scroll.droppable",function(){e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)})},drag:function(e,i){e.options.refreshPositions&&t.ui.ddmanager.prepareOffsets(e,i),t.each(t.ui.ddmanager.droppables[e.options.scope]||[],function(){if(!this.options.disabled&&!this.greedyChild&&this.visible){var s,n,o,a=v(e,this,this.options.tolerance,i),r=!a&&this.isover?"isout":a&&!this.isover?"isover":null;r&&(this.options.greedy&&(n=this.options.scope,o=this.element.parents(":data(ui-droppable)").filter(function(){return t(this).droppable("instance").options.scope===n}),o.length&&(s=t(o[0]).droppable("instance"),s.greedyChild="isover"===r)),s&&"isover"===r&&(s.isover=!1,s.isout=!0,s._out.call(s,i)),this[r]=!0,this["isout"===r?"isover":"isout"]=!1,this["isover"===r?"_over":"_out"].call(this,i),s&&"isout"===r&&(s.isout=!1,s.isover=!0,s._over.call(s,i)))}})},dragStop:function(e,i){e.element.parentsUntil("body").off("scroll.droppable"),e.options.refreshPositions||t.ui.ddmanager.prepareOffsets(e,i)}},t.uiBackCompat!==!1&&t.widget("ui.droppable",t.ui.droppable,{options:{hoverClass:!1,activeClass:!1},_addActiveClass:function(){this._super(),this.options.activeClass&&this.element.addClass(this.options.activeClass)},_removeActiveClass:function(){this._super(),this.options.activeClass&&this.element.removeClass(this.options.activeClass)},_addHoverClass:function(){this._super(),this.options.hoverClass&&this.element.addClass(this.options.hoverClass)},_removeHoverClass:function(){this._super(),this.options.hoverClass&&this.element.removeClass(this.options.hoverClass)}}),t.ui.droppable,t.widget("ui.progressbar",{version:"1.12.1",options:{classes:{"ui-progressbar":"ui-corner-all","ui-progressbar-value":"ui-corner-left","ui-progressbar-complete":"ui-corner-right"},max:100,value:0,change:null,complete:null},min:0,_create:function(){this.oldValue=this.options.value=this._constrainedValue(),this.element.attr({role:"progressbar","aria-valuemin":this.min}),this._addClass("ui-progressbar","ui-widget ui-widget-content"),this.valueDiv=t("
    ").appendTo(this.element),this._addClass(this.valueDiv,"ui-progressbar-value","ui-widget-header"),this._refreshValue()},_destroy:function(){this.element.removeAttr("role aria-valuemin aria-valuemax aria-valuenow"),this.valueDiv.remove()},value:function(t){return void 0===t?this.options.value:(this.options.value=this._constrainedValue(t),this._refreshValue(),void 0)},_constrainedValue:function(t){return void 0===t&&(t=this.options.value),this.indeterminate=t===!1,"number"!=typeof t&&(t=0),this.indeterminate?!1:Math.min(this.options.max,Math.max(this.min,t))},_setOptions:function(t){var e=t.value;delete t.value,this._super(t),this.options.value=this._constrainedValue(e),this._refreshValue()},_setOption:function(t,e){"max"===t&&(e=Math.max(this.min,e)),this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t)},_percentage:function(){return this.indeterminate?100:100*(this.options.value-this.min)/(this.options.max-this.min)},_refreshValue:function(){var e=this.options.value,i=this._percentage();this.valueDiv.toggle(this.indeterminate||e>this.min).width(i.toFixed(0)+"%"),this._toggleClass(this.valueDiv,"ui-progressbar-complete",null,e===this.options.max)._toggleClass("ui-progressbar-indeterminate",null,this.indeterminate),this.indeterminate?(this.element.removeAttr("aria-valuenow"),this.overlayDiv||(this.overlayDiv=t("
    ").appendTo(this.valueDiv),this._addClass(this.overlayDiv,"ui-progressbar-overlay"))):(this.element.attr({"aria-valuemax":this.options.max,"aria-valuenow":e}),this.overlayDiv&&(this.overlayDiv.remove(),this.overlayDiv=null)),this.oldValue!==e&&(this.oldValue=e,this._trigger("change")),e===this.options.max&&this._trigger("complete")}}),t.widget("ui.selectable",t.ui.mouse,{version:"1.12.1",options:{appendTo:"body",autoRefresh:!0,distance:0,filter:"*",tolerance:"touch",selected:null,selecting:null,start:null,stop:null,unselected:null,unselecting:null},_create:function(){var e=this;this._addClass("ui-selectable"),this.dragged=!1,this.refresh=function(){e.elementPos=t(e.element[0]).offset(),e.selectees=t(e.options.filter,e.element[0]),e._addClass(e.selectees,"ui-selectee"),e.selectees.each(function(){var i=t(this),s=i.offset(),n={left:s.left-e.elementPos.left,top:s.top-e.elementPos.top};t.data(this,"selectable-item",{element:this,$element:i,left:n.left,top:n.top,right:n.left+i.outerWidth(),bottom:n.top+i.outerHeight(),startselected:!1,selected:i.hasClass("ui-selected"),selecting:i.hasClass("ui-selecting"),unselecting:i.hasClass("ui-unselecting")})})},this.refresh(),this._mouseInit(),this.helper=t("
    "),this._addClass(this.helper,"ui-selectable-helper")},_destroy:function(){this.selectees.removeData("selectable-item"),this._mouseDestroy()},_mouseStart:function(e){var i=this,s=this.options;this.opos=[e.pageX,e.pageY],this.elementPos=t(this.element[0]).offset(),this.options.disabled||(this.selectees=t(s.filter,this.element[0]),this._trigger("start",e),t(s.appendTo).append(this.helper),this.helper.css({left:e.pageX,top:e.pageY,width:0,height:0}),s.autoRefresh&&this.refresh(),this.selectees.filter(".ui-selected").each(function(){var s=t.data(this,"selectable-item");s.startselected=!0,e.metaKey||e.ctrlKey||(i._removeClass(s.$element,"ui-selected"),s.selected=!1,i._addClass(s.$element,"ui-unselecting"),s.unselecting=!0,i._trigger("unselecting",e,{unselecting:s.element}))}),t(e.target).parents().addBack().each(function(){var s,n=t.data(this,"selectable-item");return n?(s=!e.metaKey&&!e.ctrlKey||!n.$element.hasClass("ui-selected"),i._removeClass(n.$element,s?"ui-unselecting":"ui-selected")._addClass(n.$element,s?"ui-selecting":"ui-unselecting"),n.unselecting=!s,n.selecting=s,n.selected=s,s?i._trigger("selecting",e,{selecting:n.element}):i._trigger("unselecting",e,{unselecting:n.element}),!1):void 0}))},_mouseDrag:function(e){if(this.dragged=!0,!this.options.disabled){var i,s=this,n=this.options,o=this.opos[0],a=this.opos[1],r=e.pageX,h=e.pageY;return o>r&&(i=r,r=o,o=i),a>h&&(i=h,h=a,a=i),this.helper.css({left:o,top:a,width:r-o,height:h-a}),this.selectees.each(function(){var i=t.data(this,"selectable-item"),l=!1,c={};i&&i.element!==s.element[0]&&(c.left=i.left+s.elementPos.left,c.right=i.right+s.elementPos.left,c.top=i.top+s.elementPos.top,c.bottom=i.bottom+s.elementPos.top,"touch"===n.tolerance?l=!(c.left>r||o>c.right||c.top>h||a>c.bottom):"fit"===n.tolerance&&(l=c.left>o&&r>c.right&&c.top>a&&h>c.bottom),l?(i.selected&&(s._removeClass(i.$element,"ui-selected"),i.selected=!1),i.unselecting&&(s._removeClass(i.$element,"ui-unselecting"),i.unselecting=!1),i.selecting||(s._addClass(i.$element,"ui-selecting"),i.selecting=!0,s._trigger("selecting",e,{selecting:i.element}))):(i.selecting&&((e.metaKey||e.ctrlKey)&&i.startselected?(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,s._addClass(i.$element,"ui-selected"),i.selected=!0):(s._removeClass(i.$element,"ui-selecting"),i.selecting=!1,i.startselected&&(s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0),s._trigger("unselecting",e,{unselecting:i.element}))),i.selected&&(e.metaKey||e.ctrlKey||i.startselected||(s._removeClass(i.$element,"ui-selected"),i.selected=!1,s._addClass(i.$element,"ui-unselecting"),i.unselecting=!0,s._trigger("unselecting",e,{unselecting:i.element})))))}),!1}},_mouseStop:function(e){var i=this;return this.dragged=!1,t(".ui-unselecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-unselecting"),s.unselecting=!1,s.startselected=!1,i._trigger("unselected",e,{unselected:s.element})}),t(".ui-selecting",this.element[0]).each(function(){var s=t.data(this,"selectable-item");i._removeClass(s.$element,"ui-selecting")._addClass(s.$element,"ui-selected"),s.selecting=!1,s.selected=!0,s.startselected=!0,i._trigger("selected",e,{selected:s.element})}),this._trigger("stop",e),this.helper.remove(),!1}}),t.widget("ui.selectmenu",[t.ui.formResetMixin,{version:"1.12.1",defaultElement:"",widgetEventPrefix:"spin",options:{classes:{"ui-spinner":"ui-corner-all","ui-spinner-down":"ui-corner-br","ui-spinner-up":"ui-corner-tr"},culture:null,icons:{down:"ui-icon-triangle-1-s",up:"ui-icon-triangle-1-n"},incremental:!0,max:null,min:null,numberFormat:null,page:10,step:1,change:null,spin:null,start:null,stop:null},_create:function(){this._setOption("max",this.options.max),this._setOption("min",this.options.min),this._setOption("step",this.options.step),""!==this.value()&&this._value(this.element.val(),!0),this._draw(),this._on(this._events),this._refresh(),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_getCreateOptions:function(){var e=this._super(),i=this.element;return t.each(["min","max","step"],function(t,s){var n=i.attr(s);null!=n&&n.length&&(e[s]=n)}),e},_events:{keydown:function(t){this._start(t)&&this._keydown(t)&&t.preventDefault()},keyup:"_stop",focus:function(){this.previous=this.element.val()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(this._stop(),this._refresh(),this.previous!==this.element.val()&&this._trigger("change",t),void 0)},mousewheel:function(t,e){if(e){if(!this.spinning&&!this._start(t))return!1;this._spin((e>0?1:-1)*this.options.step,t),clearTimeout(this.mousewheelTimer),this.mousewheelTimer=this._delay(function(){this.spinning&&this._stop(t)},100),t.preventDefault()}},"mousedown .ui-spinner-button":function(e){function i(){var e=this.element[0]===t.ui.safeActiveElement(this.document[0]);e||(this.element.trigger("focus"),this.previous=s,this._delay(function(){this.previous=s}))}var s;s=this.element[0]===t.ui.safeActiveElement(this.document[0])?this.previous:this.element.val(),e.preventDefault(),i.call(this),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,i.call(this)}),this._start(e)!==!1&&this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e)},"mouseup .ui-spinner-button":"_stop","mouseenter .ui-spinner-button":function(e){return t(e.currentTarget).hasClass("ui-state-active")?this._start(e)===!1?!1:(this._repeat(null,t(e.currentTarget).hasClass("ui-spinner-up")?1:-1,e),void 0):void 0},"mouseleave .ui-spinner-button":"_stop"},_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap("").parent().append("")},_draw:function(){this._enhance(),this._addClass(this.uiSpinner,"ui-spinner","ui-widget ui-widget-content"),this._addClass("ui-spinner-input"),this.element.attr("role","spinbutton"),this.buttons=this.uiSpinner.children("a").attr("tabIndex",-1).attr("aria-hidden",!0).button({classes:{"ui-button":""}}),this._removeClass(this.buttons,"ui-corner-all"),this._addClass(this.buttons.first(),"ui-spinner-button ui-spinner-up"),this._addClass(this.buttons.last(),"ui-spinner-button ui-spinner-down"),this.buttons.first().button({icon:this.options.icons.up,showLabel:!1}),this.buttons.last().button({icon:this.options.icons.down,showLabel:!1}),this.buttons.height()>Math.ceil(.5*this.uiSpinner.height())&&this.uiSpinner.height()>0&&this.uiSpinner.height(this.uiSpinner.height())},_keydown:function(e){var i=this.options,s=t.ui.keyCode;switch(e.keyCode){case s.UP:return this._repeat(null,1,e),!0;case s.DOWN:return this._repeat(null,-1,e),!0;case s.PAGE_UP:return this._repeat(null,i.page,e),!0;case s.PAGE_DOWN:return this._repeat(null,-i.page,e),!0}return!1},_start:function(t){return this.spinning||this._trigger("start",t)!==!1?(this.counter||(this.counter=1),this.spinning=!0,!0):!1},_repeat:function(t,e,i){t=t||500,clearTimeout(this.timer),this.timer=this._delay(function(){this._repeat(40,e,i)},t),this._spin(e*this.options.step,i)},_spin:function(t,e){var i=this.value()||0;this.counter||(this.counter=1),i=this._adjustValue(i+t*this._increment(this.counter)),this.spinning&&this._trigger("spin",e,{value:i})===!1||(this._value(i),this.counter++)},_increment:function(e){var i=this.options.incremental;return i?t.isFunction(i)?i(e):Math.floor(e*e*e/5e4-e*e/500+17*e/200+1):1},_precision:function(){var t=this._precisionOf(this.options.step);return null!==this.options.min&&(t=Math.max(t,this._precisionOf(this.options.min))),t},_precisionOf:function(t){var e=""+t,i=e.indexOf(".");return-1===i?0:e.length-i-1},_adjustValue:function(t){var e,i,s=this.options;return e=null!==s.min?s.min:0,i=t-e,i=Math.round(i/s.step)*s.step,t=e+i,t=parseFloat(t.toFixed(this._precision())),null!==s.max&&t>s.max?s.max:null!==s.min&&s.min>t?s.min:t},_stop:function(t){this.spinning&&(clearTimeout(this.timer),clearTimeout(this.mousewheelTimer),this.counter=0,this.spinning=!1,this._trigger("stop",t))},_setOption:function(t,e){var i,s,n;return"culture"===t||"numberFormat"===t?(i=this._parse(this.element.val()),this.options[t]=e,this.element.val(this._format(i)),void 0):(("max"===t||"min"===t||"step"===t)&&"string"==typeof e&&(e=this._parse(e)),"icons"===t&&(s=this.buttons.first().find(".ui-icon"),this._removeClass(s,null,this.options.icons.up),this._addClass(s,null,e.up),n=this.buttons.last().find(".ui-icon"),this._removeClass(n,null,this.options.icons.down),this._addClass(n,null,e.down)),this._super(t,e),void 0)},_setOptionDisabled:function(t){this._super(t),this._toggleClass(this.uiSpinner,null,"ui-state-disabled",!!t),this.element.prop("disabled",!!t),this.buttons.button(t?"disable":"enable")},_setOptions:r(function(t){this._super(t)}),_parse:function(t){return"string"==typeof t&&""!==t&&(t=window.Globalize&&this.options.numberFormat?Globalize.parseFloat(t,10,this.options.culture):+t),""===t||isNaN(t)?null:t},_format:function(t){return""===t?"":window.Globalize&&this.options.numberFormat?Globalize.format(t,this.options.numberFormat,this.options.culture):t},_refresh:function(){this.element.attr({"aria-valuemin":this.options.min,"aria-valuemax":this.options.max,"aria-valuenow":this._parse(this.element.val())})},isValid:function(){var t=this.value();return null===t?!1:t===this._adjustValue(t)},_value:function(t,e){var i;""!==t&&(i=this._parse(t),null!==i&&(e||(i=this._adjustValue(i)),t=this._format(i))),this.element.val(t),this._refresh()},_destroy:function(){this.element.prop("disabled",!1).removeAttr("autocomplete role aria-valuemin aria-valuemax aria-valuenow"),this.uiSpinner.replaceWith(this.element)},stepUp:r(function(t){this._stepUp(t)}),_stepUp:function(t){this._start()&&(this._spin((t||1)*this.options.step),this._stop())},stepDown:r(function(t){this._stepDown(t)}),_stepDown:function(t){this._start()&&(this._spin((t||1)*-this.options.step),this._stop())},pageUp:r(function(t){this._stepUp((t||1)*this.options.page)}),pageDown:r(function(t){this._stepDown((t||1)*this.options.page)}),value:function(t){return arguments.length?(r(this._value).call(this,t),void 0):this._parse(this.element.val())},widget:function(){return this.uiSpinner}}),t.uiBackCompat!==!1&&t.widget("ui.spinner",t.ui.spinner,{_enhance:function(){this.uiSpinner=this.element.attr("autocomplete","off").wrap(this._uiSpinnerHtml()).parent().append(this._buttonHtml())},_uiSpinnerHtml:function(){return""},_buttonHtml:function(){return""}}),t.ui.spinner,t.widget("ui.tabs",{version:"1.12.1",delay:300,options:{active:null,classes:{"ui-tabs":"ui-corner-all","ui-tabs-nav":"ui-corner-all","ui-tabs-panel":"ui-corner-bottom","ui-tabs-tab":"ui-corner-top"},collapsible:!1,event:"click",heightStyle:"content",hide:null,show:null,activate:null,beforeActivate:null,beforeLoad:null,load:null},_isLocal:function(){var t=/#.*$/;return function(e){var i,s;i=e.href.replace(t,""),s=location.href.replace(t,"");try{i=decodeURIComponent(i)}catch(n){}try{s=decodeURIComponent(s)}catch(n){}return e.hash.length>1&&i===s}}(),_create:function(){var e=this,i=this.options;this.running=!1,this._addClass("ui-tabs","ui-widget ui-widget-content"),this._toggleClass("ui-tabs-collapsible",null,i.collapsible),this._processTabs(),i.active=this._initialActive(),t.isArray(i.disabled)&&(i.disabled=t.unique(i.disabled.concat(t.map(this.tabs.filter(".ui-state-disabled"),function(t){return e.tabs.index(t)}))).sort()),this.active=this.options.active!==!1&&this.anchors.length?this._findActive(i.active):t(),this._refresh(),this.active.length&&this.load(i.active)},_initialActive:function(){var e=this.options.active,i=this.options.collapsible,s=location.hash.substring(1);return null===e&&(s&&this.tabs.each(function(i,n){return t(n).attr("aria-controls")===s?(e=i,!1):void 0}),null===e&&(e=this.tabs.index(this.tabs.filter(".ui-tabs-active"))),(null===e||-1===e)&&(e=this.tabs.length?0:!1)),e!==!1&&(e=this.tabs.index(this.tabs.eq(e)),-1===e&&(e=i?!1:0)),!i&&e===!1&&this.anchors.length&&(e=0),e},_getCreateEventData:function(){return{tab:this.active,panel:this.active.length?this._getPanelForTab(this.active):t()}},_tabKeydown:function(e){var i=t(t.ui.safeActiveElement(this.document[0])).closest("li"),s=this.tabs.index(i),n=!0;if(!this._handlePageNav(e)){switch(e.keyCode){case t.ui.keyCode.RIGHT:case t.ui.keyCode.DOWN:s++;break;case t.ui.keyCode.UP:case t.ui.keyCode.LEFT:n=!1,s--;break;case t.ui.keyCode.END:s=this.anchors.length-1;break;case t.ui.keyCode.HOME:s=0;break;case t.ui.keyCode.SPACE:return e.preventDefault(),clearTimeout(this.activating),this._activate(s),void 0;case t.ui.keyCode.ENTER:return e.preventDefault(),clearTimeout(this.activating),this._activate(s===this.options.active?!1:s),void 0;default:return}e.preventDefault(),clearTimeout(this.activating),s=this._focusNextTab(s,n),e.ctrlKey||e.metaKey||(i.attr("aria-selected","false"),this.tabs.eq(s).attr("aria-selected","true"),this.activating=this._delay(function(){this.option("active",s)},this.delay))}},_panelKeydown:function(e){this._handlePageNav(e)||e.ctrlKey&&e.keyCode===t.ui.keyCode.UP&&(e.preventDefault(),this.active.trigger("focus"))},_handlePageNav:function(e){return e.altKey&&e.keyCode===t.ui.keyCode.PAGE_UP?(this._activate(this._focusNextTab(this.options.active-1,!1)),!0):e.altKey&&e.keyCode===t.ui.keyCode.PAGE_DOWN?(this._activate(this._focusNextTab(this.options.active+1,!0)),!0):void 0},_findNextTab:function(e,i){function s(){return e>n&&(e=0),0>e&&(e=n),e}for(var n=this.tabs.length-1;-1!==t.inArray(s(),this.options.disabled);)e=i?e+1:e-1;return e},_focusNextTab:function(t,e){return t=this._findNextTab(t,e),this.tabs.eq(t).trigger("focus"),t},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):(this._super(t,e),"collapsible"===t&&(this._toggleClass("ui-tabs-collapsible",null,e),e||this.options.active!==!1||this._activate(0)),"event"===t&&this._setupEvents(e),"heightStyle"===t&&this._setupHeightStyle(e),void 0)},_sanitizeSelector:function(t){return t?t.replace(/[!"$%&'()*+,.\/:;<=>?@\[\]\^`{|}~]/g,"\\$&"):""},refresh:function(){var e=this.options,i=this.tablist.children(":has(a[href])");e.disabled=t.map(i.filter(".ui-state-disabled"),function(t){return i.index(t)}),this._processTabs(),e.active!==!1&&this.anchors.length?this.active.length&&!t.contains(this.tablist[0],this.active[0])?this.tabs.length===e.disabled.length?(e.active=!1,this.active=t()):this._activate(this._findNextTab(Math.max(0,e.active-1),!1)):e.active=this.tabs.index(this.active):(e.active=!1,this.active=t()),this._refresh()},_refresh:function(){this._setOptionDisabled(this.options.disabled),this._setupEvents(this.options.event),this._setupHeightStyle(this.options.heightStyle),this.tabs.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}),this.panels.not(this._getPanelForTab(this.active)).hide().attr({"aria-hidden":"true"}),this.active.length?(this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}),this._addClass(this.active,"ui-tabs-active","ui-state-active"),this._getPanelForTab(this.active).show().attr({"aria-hidden":"false"})):this.tabs.eq(0).attr("tabIndex",0)},_processTabs:function(){var e=this,i=this.tabs,s=this.anchors,n=this.panels;this.tablist=this._getList().attr("role","tablist"),this._addClass(this.tablist,"ui-tabs-nav","ui-helper-reset ui-helper-clearfix ui-widget-header"),this.tablist.on("mousedown"+this.eventNamespace,"> li",function(e){t(this).is(".ui-state-disabled")&&e.preventDefault()}).on("focus"+this.eventNamespace,".ui-tabs-anchor",function(){t(this).closest("li").is(".ui-state-disabled")&&this.blur()}),this.tabs=this.tablist.find("> li:has(a[href])").attr({role:"tab",tabIndex:-1}),this._addClass(this.tabs,"ui-tabs-tab","ui-state-default"),this.anchors=this.tabs.map(function(){return t("a",this)[0]}).attr({role:"presentation",tabIndex:-1}),this._addClass(this.anchors,"ui-tabs-anchor"),this.panels=t(),this.anchors.each(function(i,s){var n,o,a,r=t(s).uniqueId().attr("id"),h=t(s).closest("li"),l=h.attr("aria-controls");e._isLocal(s)?(n=s.hash,a=n.substring(1),o=e.element.find(e._sanitizeSelector(n))):(a=h.attr("aria-controls")||t({}).uniqueId()[0].id,n="#"+a,o=e.element.find(n),o.length||(o=e._createPanel(a),o.insertAfter(e.panels[i-1]||e.tablist)),o.attr("aria-live","polite")),o.length&&(e.panels=e.panels.add(o)),l&&h.data("ui-tabs-aria-controls",l),h.attr({"aria-controls":a,"aria-labelledby":r}),o.attr("aria-labelledby",r)}),this.panels.attr("role","tabpanel"),this._addClass(this.panels,"ui-tabs-panel","ui-widget-content"),i&&(this._off(i.not(this.tabs)),this._off(s.not(this.anchors)),this._off(n.not(this.panels)))},_getList:function(){return this.tablist||this.element.find("ol, ul").eq(0)},_createPanel:function(e){return t("
    ").attr("id",e).data("ui-tabs-destroy",!0)},_setOptionDisabled:function(e){var i,s,n;for(t.isArray(e)&&(e.length?e.length===this.anchors.length&&(e=!0):e=!1),n=0;s=this.tabs[n];n++)i=t(s),e===!0||-1!==t.inArray(n,e)?(i.attr("aria-disabled","true"),this._addClass(i,null,"ui-state-disabled")):(i.removeAttr("aria-disabled"),this._removeClass(i,null,"ui-state-disabled"));this.options.disabled=e,this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,e===!0)},_setupEvents:function(e){var i={};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.anchors.add(this.tabs).add(this.panels)),this._on(!0,this.anchors,{click:function(t){t.preventDefault()}}),this._on(this.anchors,i),this._on(this.tabs,{keydown:"_tabKeydown"}),this._on(this.panels,{keydown:"_panelKeydown"}),this._focusable(this.tabs),this._hoverable(this.tabs)},_setupHeightStyle:function(e){var i,s=this.element.parent();"fill"===e?(i=s.height(),i-=this.element.outerHeight()-this.element.height(),this.element.siblings(":visible").each(function(){var e=t(this),s=e.css("position");"absolute"!==s&&"fixed"!==s&&(i-=e.outerHeight(!0))}),this.element.children().not(this.panels).each(function(){i-=t(this).outerHeight(!0)}),this.panels.each(function(){t(this).height(Math.max(0,i-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===e&&(i=0,this.panels.each(function(){i=Math.max(i,t(this).height("").height())}).height(i))},_eventHandler:function(e){var i=this.options,s=this.active,n=t(e.currentTarget),o=n.closest("li"),a=o[0]===s[0],r=a&&i.collapsible,h=r?t():this._getPanelForTab(o),l=s.length?this._getPanelForTab(s):t(),c={oldTab:s,oldPanel:l,newTab:r?t():o,newPanel:h};e.preventDefault(),o.hasClass("ui-state-disabled")||o.hasClass("ui-tabs-loading")||this.running||a&&!i.collapsible||this._trigger("beforeActivate",e,c)===!1||(i.active=r?!1:this.tabs.index(o),this.active=a?t():o,this.xhr&&this.xhr.abort(),l.length||h.length||t.error("jQuery UI Tabs: Mismatching fragment identifier."),h.length&&this.load(this.tabs.index(o),e),this._toggle(e,c))},_toggle:function(e,i){function s(){o.running=!1,o._trigger("activate",e,i)}function n(){o._addClass(i.newTab.closest("li"),"ui-tabs-active","ui-state-active"),a.length&&o.options.show?o._show(a,o.options.show,s):(a.show(),s())}var o=this,a=i.newPanel,r=i.oldPanel;this.running=!0,r.length&&this.options.hide?this._hide(r,this.options.hide,function(){o._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),n()}):(this._removeClass(i.oldTab.closest("li"),"ui-tabs-active","ui-state-active"),r.hide(),n()),r.attr("aria-hidden","true"),i.oldTab.attr({"aria-selected":"false","aria-expanded":"false"}),a.length&&r.length?i.oldTab.attr("tabIndex",-1):a.length&&this.tabs.filter(function(){return 0===t(this).attr("tabIndex")}).attr("tabIndex",-1),a.attr("aria-hidden","false"),i.newTab.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_activate:function(e){var i,s=this._findActive(e);s[0]!==this.active[0]&&(s.length||(s=this.active),i=s.find(".ui-tabs-anchor")[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return e===!1?t():this.tabs.eq(e)},_getIndex:function(e){return"string"==typeof e&&(e=this.anchors.index(this.anchors.filter("[href$='"+t.ui.escapeSelector(e)+"']"))),e},_destroy:function(){this.xhr&&this.xhr.abort(),this.tablist.removeAttr("role").off(this.eventNamespace),this.anchors.removeAttr("role tabIndex").removeUniqueId(),this.tabs.add(this.panels).each(function(){t.data(this,"ui-tabs-destroy")?t(this).remove():t(this).removeAttr("role tabIndex aria-live aria-busy aria-selected aria-labelledby aria-hidden aria-expanded")}),this.tabs.each(function(){var e=t(this),i=e.data("ui-tabs-aria-controls");i?e.attr("aria-controls",i).removeData("ui-tabs-aria-controls"):e.removeAttr("aria-controls")}),this.panels.show(),"content"!==this.options.heightStyle&&this.panels.css("height","")},enable:function(e){var i=this.options.disabled;i!==!1&&(void 0===e?i=!1:(e=this._getIndex(e),i=t.isArray(i)?t.map(i,function(t){return t!==e?t:null}):t.map(this.tabs,function(t,i){return i!==e?i:null})),this._setOptionDisabled(i))},disable:function(e){var i=this.options.disabled;if(i!==!0){if(void 0===e)i=!0;else{if(e=this._getIndex(e),-1!==t.inArray(e,i))return;i=t.isArray(i)?t.merge([e],i).sort():[e]}this._setOptionDisabled(i)}},load:function(e,i){e=this._getIndex(e);var s=this,n=this.tabs.eq(e),o=n.find(".ui-tabs-anchor"),a=this._getPanelForTab(n),r={tab:n,panel:a},h=function(t,e){"abort"===e&&s.panels.stop(!1,!0),s._removeClass(n,"ui-tabs-loading"),a.removeAttr("aria-busy"),t===s.xhr&&delete s.xhr};this._isLocal(o[0])||(this.xhr=t.ajax(this._ajaxSettings(o,i,r)),this.xhr&&"canceled"!==this.xhr.statusText&&(this._addClass(n,"ui-tabs-loading"),a.attr("aria-busy","true"),this.xhr.done(function(t,e,n){setTimeout(function(){a.html(t),s._trigger("load",i,r),h(n,e)},1)}).fail(function(t,e){setTimeout(function(){h(t,e)},1)})))},_ajaxSettings:function(e,i,s){var n=this;return{url:e.attr("href").replace(/#.*$/,""),beforeSend:function(e,o){return n._trigger("beforeLoad",i,t.extend({jqXHR:e,ajaxSettings:o},s))}}},_getPanelForTab:function(e){var i=t(e).attr("aria-controls");return this.element.find(this._sanitizeSelector("#"+i))}}),t.uiBackCompat!==!1&&t.widget("ui.tabs",t.ui.tabs,{_processTabs:function(){this._superApply(arguments),this._addClass(this.tabs,"ui-tab")}}),t.ui.tabs,t.widget("ui.tooltip",{version:"1.12.1",options:{classes:{"ui-tooltip":"ui-corner-all ui-widget-shadow"},content:function(){var e=t(this).attr("title")||"";return t("").text(e).html()},hide:!0,items:"[title]:not([disabled])",position:{my:"left top+15",at:"left bottom",collision:"flipfit flip"},show:!0,track:!1,close:null,open:null},_addDescribedBy:function(e,i){var s=(e.attr("aria-describedby")||"").split(/\s+/);s.push(i),e.data("ui-tooltip-id",i).attr("aria-describedby",t.trim(s.join(" ")))},_removeDescribedBy:function(e){var i=e.data("ui-tooltip-id"),s=(e.attr("aria-describedby")||"").split(/\s+/),n=t.inArray(i,s);-1!==n&&s.splice(n,1),e.removeData("ui-tooltip-id"),s=t.trim(s.join(" ")),s?e.attr("aria-describedby",s):e.removeAttr("aria-describedby")},_create:function(){this._on({mouseover:"open",focusin:"open"}),this.tooltips={},this.parents={},this.liveRegion=t("
    ").attr({role:"log","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this.disabledTitles=t([])},_setOption:function(e,i){var s=this;this._super(e,i),"content"===e&&t.each(this.tooltips,function(t,e){s._updateContent(e.element)})},_setOptionDisabled:function(t){this[t?"_disable":"_enable"]()},_disable:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur");n.target=n.currentTarget=s.element[0],e.close(n,!0)}),this.disabledTitles=this.disabledTitles.add(this.element.find(this.options.items).addBack().filter(function(){var e=t(this);return e.is("[title]")?e.data("ui-tooltip-title",e.attr("title")).removeAttr("title"):void 0}))},_enable:function(){this.disabledTitles.each(function(){var e=t(this);e.data("ui-tooltip-title")&&e.attr("title",e.data("ui-tooltip-title"))}),this.disabledTitles=t([])},open:function(e){var i=this,s=t(e?e.target:this.element).closest(this.options.items);s.length&&!s.data("ui-tooltip-id")&&(s.attr("title")&&s.data("ui-tooltip-title",s.attr("title")),s.data("ui-tooltip-open",!0),e&&"mouseover"===e.type&&s.parents().each(function(){var e,s=t(this);s.data("ui-tooltip-open")&&(e=t.Event("blur"),e.target=e.currentTarget=this,i.close(e,!0)),s.attr("title")&&(s.uniqueId(),i.parents[this.id]={element:this,title:s.attr("title")},s.attr("title",""))}),this._registerCloseHandlers(e,s),this._updateContent(s,e))},_updateContent:function(t,e){var i,s=this.options.content,n=this,o=e?e.type:null;return"string"==typeof s||s.nodeType||s.jquery?this._open(e,t,s):(i=s.call(t[0],function(i){n._delay(function(){t.data("ui-tooltip-open")&&(e&&(e.type=o),this._open(e,t,i))})}),i&&this._open(e,t,i),void 0)},_open:function(e,i,s){function n(t){l.of=t,a.is(":hidden")||a.position(l)}var o,a,r,h,l=t.extend({},this.options.position);if(s){if(o=this._find(i))return o.tooltip.find(".ui-tooltip-content").html(s),void 0;i.is("[title]")&&(e&&"mouseover"===e.type?i.attr("title",""):i.removeAttr("title")),o=this._tooltip(i),a=o.tooltip,this._addDescribedBy(i,a.attr("id")),a.find(".ui-tooltip-content").html(s),this.liveRegion.children().hide(),h=t("
    ").html(a.find(".ui-tooltip-content").html()),h.removeAttr("name").find("[name]").removeAttr("name"),h.removeAttr("id").find("[id]").removeAttr("id"),h.appendTo(this.liveRegion),this.options.track&&e&&/^mouse/.test(e.type)?(this._on(this.document,{mousemove:n}),n(e)):a.position(t.extend({of:i},this.options.position)),a.hide(),this._show(a,this.options.show),this.options.track&&this.options.show&&this.options.show.delay&&(r=this.delayedShow=setInterval(function(){a.is(":visible")&&(n(l.of),clearInterval(r))},t.fx.interval)),this._trigger("open",e,{tooltip:a})}},_registerCloseHandlers:function(e,i){var s={keyup:function(e){if(e.keyCode===t.ui.keyCode.ESCAPE){var s=t.Event(e);s.currentTarget=i[0],this.close(s,!0)}}};i[0]!==this.element[0]&&(s.remove=function(){this._removeTooltip(this._find(i).tooltip)}),e&&"mouseover"!==e.type||(s.mouseleave="close"),e&&"focusin"!==e.type||(s.focusout="close"),this._on(!0,i,s)},close:function(e){var i,s=this,n=t(e?e.currentTarget:this.element),o=this._find(n);return o?(i=o.tooltip,o.closing||(clearInterval(this.delayedShow),n.data("ui-tooltip-title")&&!n.attr("title")&&n.attr("title",n.data("ui-tooltip-title")),this._removeDescribedBy(n),o.hiding=!0,i.stop(!0),this._hide(i,this.options.hide,function(){s._removeTooltip(t(this))}),n.removeData("ui-tooltip-open"),this._off(n,"mouseleave focusout keyup"),n[0]!==this.element[0]&&this._off(n,"remove"),this._off(this.document,"mousemove"),e&&"mouseleave"===e.type&&t.each(this.parents,function(e,i){t(i.element).attr("title",i.title),delete s.parents[e]}),o.closing=!0,this._trigger("close",e,{tooltip:i}),o.hiding||(o.closing=!1)),void 0):(n.removeData("ui-tooltip-open"),void 0)},_tooltip:function(e){var i=t("
    ").attr("role","tooltip"),s=t("
    ").appendTo(i),n=i.uniqueId().attr("id");return this._addClass(s,"ui-tooltip-content"),this._addClass(i,"ui-tooltip","ui-widget ui-widget-content"),i.appendTo(this._appendTo(e)),this.tooltips[n]={element:e,tooltip:i}},_find:function(t){var e=t.data("ui-tooltip-id");return e?this.tooltips[e]:null},_removeTooltip:function(t){t.remove(),delete this.tooltips[t.attr("id")]},_appendTo:function(t){var e=t.closest(".ui-front, dialog");return e.length||(e=this.document[0].body),e},_destroy:function(){var e=this;t.each(this.tooltips,function(i,s){var n=t.Event("blur"),o=s.element;n.target=n.currentTarget=o[0],e.close(n,!0),t("#"+i).remove(),o.data("ui-tooltip-title")&&(o.attr("title")||o.attr("title",o.data("ui-tooltip-title")),o.removeData("ui-tooltip-title"))}),this.liveRegion.remove()}}),t.uiBackCompat!==!1&&t.widget("ui.tooltip",t.ui.tooltip,{options:{tooltipClass:null},_tooltip:function(){var t=this._superApply(arguments);return this.options.tooltipClass&&t.tooltip.addClass(this.options.tooltipClass),t}}),t.ui.tooltip}); \ No newline at end of file diff --git a/spaces/qisan/Depressed_sentimental_analysis/model.py b/spaces/qisan/Depressed_sentimental_analysis/model.py deleted file mode 100644 index 3e352ed807c2591b351a19842ca4eb67475bd758..0000000000000000000000000000000000000000 --- a/spaces/qisan/Depressed_sentimental_analysis/model.py +++ /dev/null @@ -1,32 +0,0 @@ -import transformers -from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup -import torch -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -from matplotlib import rc -from sklearn.model_selection import train_test_split -from sklearn.metrics import confusion_matrix, classification_report -from collections import defaultdict -from textwrap import wrap - -from torch import nn, optim -from torch.utils.data import Dataset, DataLoader -import torch.nn.functional as F - -class DepressionClassifier(nn.Module): - - def __init__(self, n_classes, pre_trained_model_name): - super(DepressionClassifier, self).__init__() - self.bert = BertModel.from_pretrained(pre_trained_model_name) - self.drop = nn.Dropout(p=0.3) - self.out = nn.Linear(self.bert.config.hidden_size, n_classes) - - def forward(self, input_ids, attention_mask): - _, pooled_output = self.bert( - input_ids=input_ids, - attention_mask=attention_mask, - return_dict = False #here - ) - output = self.drop(pooled_output) - return self.out(output) \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Ali213cracktalesofzestiriarelease __FULL__.md b/spaces/quidiaMuxgu/Expedit-SAM/Ali213cracktalesofzestiriarelease __FULL__.md deleted file mode 100644 index 9e925ff0e7ae324668977d93c6ec8952d99a90c1..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Ali213cracktalesofzestiriarelease __FULL__.md +++ /dev/null @@ -1,6 +0,0 @@ -

    ali213cracktalesofzestiriarelease


    DOWNLOADhttps://geags.com/2uCshJ



    - - 899543212b
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/F.A.L.T.U Movie Free Download BEST In Hindi 720p Download BEST.md b/spaces/quidiaMuxgu/Expedit-SAM/F.A.L.T.U Movie Free Download BEST In Hindi 720p Download BEST.md deleted file mode 100644 index 82131fd7cd161f8a0843d689bd1f2b0210a795dc..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/F.A.L.T.U Movie Free Download BEST In Hindi 720p Download BEST.md +++ /dev/null @@ -1,116 +0,0 @@ - -

    F.A.L.T.U Movie Free Download in Hindi 720p: A Comedy Drama Film You Don't Want to Miss

    - -

    Are you looking for a comedy drama film that will make you laugh and entertain you? If yes, then you should watch F.A.L.T.U movie. This movie is about four friends who open their own fake college after failing to get admission in any real one. The movie has a talented cast and crew who have made this movie enjoyable and hilarious. The movie was released in 2011 and was directed by Remo D'Souza.

    - -

    In this article, we will tell you how to download F.A.L.T.U movie free in Hindi 720p and watch it online. We will also give you more information about the movie and its plot, cast and crew.

    -

    F.A.L.T.U movie free download in hindi 720p download


    Download File »»» https://geags.com/2uCs73



    - -

    What is the Plot of F.A.L.T.U Movie?

    - -

    F.A.L.T.U movie is about four friends who are unable to get admission in any college due to their poor marks. They are Ritesh Virani (Jackky Bhagnani), Pooja Nigam (Puja Gupta), Nanj Nair (Angad Bedi) and Vishnu Vardhan (Chandan Roy Sanyal). They try to get admission through the father of their scholarly friend, Google Chand (Arshad Warsi), but fail. Ritesh's father, who is a scrap dealer, decides to fire his hired help and make his son work for him. Pooja's father decides to get her married to a rich man. Nanj's father wants him to join the army. Vishnu's father wants him to become a doctor.

    - -

    The desperate friends come up with a plan to open their own fake college called Fakirchand And Lakirchand Trust University (F.A.L.T.U). They hire a school teacher, Baaji Rao (Riteish Deshmukh), as the principal of their college. They also rent a derelict mansion in Panchgani and renovate it as their campus. They invite their parents and show them that they have finally got admission in a college.

    - -

    However, things get out of hand when hundreds of other students with poor marks show up at their college, insisting on being admitted. The friends try to manage the situation by creating fake courses, faculty and exams. They also face trouble from the police and Mr. Vardhan (Akbar Khan), the father of Google Chand, who is a powerful politician. The friends have to find a way to save their college and their future.

    - -

    How to Download F.A.L.T.U Movie Free in Hindi 720p?

    - -

    If you want to download F.A.L.T.U movie free in Hindi 720p, you can find it on various websites that offer free movie downloads. However, you should be careful when downloading from unknown sources, as they might contain viruses or malware that can harm your device. One of the websites that you can trust is AskMeMetallurgy , where you can find a link to download F.A.L.T.U movie free in Hindi 720p . You can also find other movies and web series on this website.

    - -

    How to Watch F.A.L.T.U Movie Online?

    - -

    If you want to watch F.A.L.T.U movie online, you can find it on various platforms that offer online streaming services. However, you might need to pay a subscription fee or rent the movie to watch it online. One of the platforms that you can use is ZEE5 , where you can watch F.A.L.T.U movie online in HD quality . You can also find other movies and shows on ZEE5.

    - -

    Who are the Cast and Crew of F.A.L.T.U Movie?

    - -

    F.A.L.T.U movie has a talented cast and crew who have worked hard to make this movie a success. Here are some of them:

    -

    - -
      -
    • Arshad Warsi as Google Chand: He is a money-lender and the father of Vishnu Vardhan. He helps his son and his friends to open their fake college.
    • -
    • Riteish Deshmukh as Baaji Rao: He is a school teacher who becomes the principal of F.A.L.T.U college. He is known for making lewd references to popular songs.
    • -
    • Jackky Bhagnani as Ritesh Virani: He is one of the four friends who open F.A.L.T.U college. He is the son of a scrap dealer who wants him to work for him.
    • -
    • Puja Gupta as Pooja Nigam: She is one of the four friends who open F.A.L.T.U college. She is the daughter of a rich man who wants her to get married.
    • -
    • Chandan Roy Sanyal as Vishnu Vardhan: He is one of the four friends who open F.A.L.T.U college. He is the son of Google Chand and a brilliant student.
    • -
    • Angad Bedi as Nanj Nair: He is one of the four friends who open F.A.L.T.U college. He is the son of an army officer who wants him to join the army.
    • -
    • Boman Irani as Principal Sharma: He is the principal of a real college where Google Chand tries to get admission for his son and his friends.
    • -
    • Darshan Jariwala as Jeevanlal Virani: He is the father of Ritesh Virani and a scrap dealer.
    • -
    • Akbar Khan as Mr. Vardhan: He is the father of Google Chand and a powerful politician who opposes F.A.L.T.U college.
    • -
    • Mithun Chakraborty as Minister: He is a minister who supports F.A.L.T.U college.
    • -
    - -

    The director of F.A.L.T.U movie is Remo D'Souza , who is also known for directing movies like ABCD , ABCD 2 , Race 3 , Street Dancer 3D , etc. The writers of F.A.L.T.U movie are Vipul Binjola , Tushar Hiranandani , and Aakash Kaushik . The music director of F.A.L.T.U movie is Sachin-Jigar , who have composed songs like "Char Baj Gaye" , "Le Ja Tu Mujhe" , "Fully Faltu" , etc.

    - -

    Conclusion

    - -

    F.A.L.T.U movie is a comedy drama film that you can watch with your friends or family for some fun and laughter. It tells the story of four friends who open their own fake college after failing to get admission in any real one. The movie has a talented cast and crew who have made this movie enjoyable

    -

    What are the Reviews and Ratings of F.A.L.T.U Movie?

    - -

    F.A.L.T.U movie has received mixed reviews and ratings from critics and audiences. The movie has a rating of 4.7 out of 10 on IMDb , based on 3,144 user ratings. The movie has a rating of 2.5 out of 5 on Times of India , based on 8 critic reviews. The movie has a rating of 3 out of 5 on Bollywood Hungama , based on 6 critic reviews.

    - -

    Some of the positive reviews of F.A.L.T.U movie are:

    - -
      -
    • "F.A.L.T.U is a fun film that works purely on the strength of its performances and gags." - Taran Adarsh, Bollywood Hungama
    • -
    • "F.A.L.T.U is a light-hearted entertainer that will appeal to the youth and the young at heart." - Nikhat Kazmi, Times of India
    • -
    • "F.A.L.T.U is a breezy entertainer that has its heart in the right place." - Komal Nahta, Koimoi
    • -
    - -

    Some of the negative reviews of F.A.L.T.U movie are:

    - -
      -
    • "F.A.L.T.U is a dull and boring film that fails to impress on any level." - Rajeev Masand, CNN-IBN
    • -
    • "F.A.L.T.U is a wasted opportunity that could have been a smart satire on the education system." - Anupama Chopra, NDTV
    • -
    • "F.A.L.T.U is a lame and uninspired film that tries too hard to be funny and cool." - Raja Sen, Rediff
    • -
    - -

    Overall, F.A.L.T.U movie is a comedy drama film that has some moments of humor and entertainment, but also suffers from a weak script and direction. The movie might appeal to some viewers who are looking for a light-hearted film, but might disappoint others who are expecting more from it.

    -

    What are the Songs and Soundtrack of F.A.L.T.U Movie?

    - -

    F.A.L.T.U movie has a catchy and upbeat soundtrack that adds to the fun and energy of the film. The music director of F.A.L.T.U movie is Sachin-Jigar, who have composed some popular songs for the film. The lyrics of the songs are written by Sameer and Amitabh Bhattacharya. The singers of the songs are Mika Singh, Hard Kaur, Jigar Saraiya, Neeraj Shridhar, Apeksha Dandekar, Vishal Dadlani, Priya Panchal, Sachin Sanghvi and Atif Aslam.

    - -

    The songs of F.A.L.T.U movie are:

    - -
      -
    • "Char Baj Gaye": This is a party song that features the four friends having fun at their fake college. The song is sung by Hard Kaur and Sachin-Jigar.
    • -
    • "Le Ja Tu Mujhe": This is a romantic song that features Ritesh and Pooja expressing their love for each other. The song is sung by Atif Aslam.
    • -
    • "Fully Faltu": This is a title song that features the four friends celebrating their fake college and their freedom. The song is sung by Mika Singh and Hard Kaur.
    • -
    • "Awaaz Do": This is a motivational song that features the four friends inspiring other students to join their fake college and pursue their dreams. The song is sung by Jigar Saraiya and Sachin Sanghvi.
    • -
    • "Gale Laga Le": This is a friendship song that features the four friends bonding with each other and supporting each other. The song is sung by Neeraj Shridhar and Apeksha Dandekar.
    • -
    • "Bhoot Aaya": This is a comedy song that features the four friends trying to scare away Mr. Vardhan and his goons from their fake college. The song is sung by Vishal Dadlani and Priya Panchal.
    • -
    - -

    The songs of F.A.L.T.U movie are available on various platforms, such as YouTube , Gaana , Spotify , etc. You can listen to them and enjoy them.

    -

    What are the Trivia and Facts of F.A.L.T.U Movie?

    - -

    F.A.L.T.U movie has some interesting trivia and facts that you might not know. Here are some of them:

    - -
      -
    • F.A.L.T.U movie is inspired by the Hollywood film Accepted (2006), which also tells the story of a group of students who create their own fake college.
    • -
    • F.A.L.T.U movie is the directorial debut of Remo D'Souza, who is also a famous choreographer and judge of dance reality shows.
    • -
    • F.A.L.T.U movie is the first film produced by Puja Entertainment, which is owned by Jackky Bhagnani's father, Vashu Bhagnani.
    • -
    • F.A.L.T.U movie was shot in various locations, such as Mumbai, Panchgani, Goa and Bangkok.
    • -
    • F.A.L.T.U movie features some special appearances by celebrities, such as Mithun Chakraborty, Himani Shivpuri and Remo D'Souza as contest judges, Akbar Khan as Mr. Vardhan's brother, Ramesh Taurani as a music company owner and Amitabh Bachchan as himself.
    • -
    - -

    What are the Awards and Nominations of F.A.L.T.U Movie?

    - -

    F.A.L.T.U movie has received some awards and nominations for its music and performances. Here are some of them:

    - -
      -
    • F.A.L.T.U movie won the Mirchi Music Award for Best Album of the Year in 2012.
    • -
    • F.A.L.T.U movie won the Stardust Award for Best Music Director for Sachin-Jigar in 2012.
    • -
    • F.A.L.T.U movie was nominated for the Filmfare Award for Best Music Director for Sachin-Jigar in 2012.
    • -
    • F.A.L.T.U movie was nominated for the IIFA Award for Best Music Director for Sachin-Jigar in 2012.
    • -
    • F.A.L.T.U movie was nominated for the Zee Cine Award for Best Music Director for Sachin-Jigar in 2012.
    • -
    - -

    Conclusion

    - -

    F.A.L.T.U movie is a comedy drama film that you can watch with your friends or family for some fun and laughter. It tells the story of four friends who open their own fake college after failing to get admission in any real one. The movie has a talented cast and crew who have made this movie enjoyable and hilarious. The movie also has a catchy and upbeat soundtrack that adds to the fun and energy of the film. You can download F.A.L.T.U movie free in Hindi 720p or watch it online on various platforms. You can also learn more about the movie and its plot, cast and crew, reviews and ratings, songs and soundtrack, trivia and facts, awards and nominations, etc. F.A.L.T.U movie is a comedy drama film that you don't want to miss.

    -

    Conclusion

    - -

    F.A.L.T.U movie is a comedy drama film that you can watch with your friends or family for some fun and laughter. It tells the story of four friends who open their own fake college after failing to get admission in any real one. The movie has a talented cast and crew who have made this movie enjoyable and hilarious. The movie also has a catchy and upbeat soundtrack that adds to the fun and energy of the film. You can download F.A.L.T.U movie free in Hindi 720p or watch it online on various platforms. You can also learn more about the movie and its plot, cast and crew, reviews and ratings, songs and soundtrack, trivia and facts, awards and nominations, etc. F.A.L.T.U movie is a comedy drama film that you don't want to miss.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537238KB.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537238KB.py deleted file mode 100644 index 9bb1df1ee93d3af49725f60ac0b6052e057c6872..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/lib_v5/nets_537238KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/eval_spaces.py b/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/eval_spaces.py deleted file mode 100644 index b0cf689d24f70d95aa0d491fd04987296802e492..0000000000000000000000000000000000000000 --- a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/eval_spaces.py +++ /dev/null @@ -1,138 +0,0 @@ -import sys -import os - -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) -ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - -import time -import json -import numpy as np -import torch -from torch.utils.data import DataLoader - -from lib.options import BaseOptions -from lib.mesh_util import * -from lib.sample_util import * -from lib.train_util import * -from lib.model import * - -from PIL import Image -import torchvision.transforms as transforms - -import trimesh -from datetime import datetime - -# get options -opt = BaseOptions().parse() - -class Evaluator: - def __init__(self, opt, projection_mode='orthogonal'): - self.opt = opt - self.load_size = self.opt.loadSize - self.to_tensor = transforms.Compose([ - transforms.Resize(self.load_size), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - ]) - # set cuda - cuda = torch.device('cuda:%d' % opt.gpu_id) if torch.cuda.is_available() else torch.device('cpu') - print("CUDDAAAAA ???", torch.cuda.get_device_name(0) if torch.cuda.is_available() else "NO ONLY CPU") - - # create net - netG = HGPIFuNet(opt, projection_mode).to(device=cuda) - print('Using Network: ', netG.name) - - if opt.load_netG_checkpoint_path: - netG.load_state_dict(torch.load(opt.load_netG_checkpoint_path, map_location=cuda)) - - if opt.load_netC_checkpoint_path is not None: - print('loading for net C ...', opt.load_netC_checkpoint_path) - netC = ResBlkPIFuNet(opt).to(device=cuda) - netC.load_state_dict(torch.load(opt.load_netC_checkpoint_path, map_location=cuda)) - else: - netC = None - - os.makedirs(opt.results_path, exist_ok=True) - os.makedirs('%s/%s' % (opt.results_path, opt.name), exist_ok=True) - - opt_log = os.path.join(opt.results_path, opt.name, 'opt.txt') - with open(opt_log, 'w') as outfile: - outfile.write(json.dumps(vars(opt), indent=2)) - - self.cuda = cuda - self.netG = netG - self.netC = netC - - def load_image(self, image_path, mask_path): - # Name - img_name = os.path.splitext(os.path.basename(image_path))[0] - # Calib - B_MIN = np.array([-1, -1, -1]) - B_MAX = np.array([1, 1, 1]) - projection_matrix = np.identity(4) - projection_matrix[1, 1] = -1 - calib = torch.Tensor(projection_matrix).float() - # Mask - mask = Image.open(mask_path).convert('L') - mask = transforms.Resize(self.load_size)(mask) - mask = transforms.ToTensor()(mask).float() - # image - image = Image.open(image_path).convert('RGB') - image = self.to_tensor(image) - image = mask.expand_as(image) * image - return { - 'name': img_name, - 'img': image.unsqueeze(0), - 'calib': calib.unsqueeze(0), - 'mask': mask.unsqueeze(0), - 'b_min': B_MIN, - 'b_max': B_MAX, - } - - def eval(self, data, use_octree=False): - ''' - Evaluate a data point - :param data: a dict containing at least ['name'], ['image'], ['calib'], ['b_min'] and ['b_max'] tensors. - :return: - ''' - opt = self.opt - with torch.no_grad(): - self.netG.eval() - if self.netC: - self.netC.eval() - save_path = '%s/%s/result_%s.obj' % (opt.results_path, opt.name, data['name']) - if self.netC: - gen_mesh_color(opt, self.netG, self.netC, self.cuda, data, save_path, use_octree=use_octree) - else: - gen_mesh(opt, self.netG, self.cuda, data, save_path, use_octree=use_octree) - - -if __name__ == '__main__': - evaluator = Evaluator(opt) - - results_path = opt.results_path - name = opt.name - test_image_path = opt.img_path - test_mask_path = test_image_path[:-4] +'_mask.png' - test_img_name = os.path.splitext(os.path.basename(test_image_path))[0] - print("test_image: ", test_image_path) - print("test_mask: ", test_mask_path) - - try: - time = datetime.now() - print("evaluating" , time) - data = evaluator.load_image(test_image_path, test_mask_path) - evaluator.eval(data, False) - print("done evaluating" , datetime.now() - time) - except Exception as e: - print("error:", e.args) - - try: - mesh = trimesh.load(f'{results_path}/{name}/result_{test_img_name}.obj') - mesh.apply_transform([[1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, -1, 0], - [0, 0, 0, 1]]) - mesh.export(file_obj=f'{results_path}/{name}/result_{test_img_name}.glb') - except Exception as e: - print("error generating MESH", e) diff --git a/spaces/radames/transformers-js-svelte-example-app/assets/index-2c765c37.js b/spaces/radames/transformers-js-svelte-example-app/assets/index-2c765c37.js deleted file mode 100644 index 252937d044ce96176325d270b533c9c98d645fc3..0000000000000000000000000000000000000000 --- a/spaces/radames/transformers-js-svelte-example-app/assets/index-2c765c37.js +++ /dev/null @@ -1 +0,0 @@ -var q=Object.defineProperty;var F=(e,t,n)=>t in e?q(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var E=(e,t,n)=>(F(e,typeof t!="symbol"?t+"":t,n),n);(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const r of document.querySelectorAll('link[rel="modulepreload"]'))i(r);new MutationObserver(r=>{for(const o of r)if(o.type==="childList")for(const l of o.addedNodes)l.tagName==="LINK"&&l.rel==="modulepreload"&&i(l)}).observe(document,{childList:!0,subtree:!0});function n(r){const o={};return r.integrity&&(o.integrity=r.integrity),r.referrerPolicy&&(o.referrerPolicy=r.referrerPolicy),r.crossOrigin==="use-credentials"?o.credentials="include":r.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function i(r){if(r.ep)return;r.ep=!0;const o=n(r);fetch(r.href,o)}})();const J="modulepreload",V=function(e){return"/"+e},C={},z=function(t,n,i){if(!n||n.length===0)return t();const r=document.getElementsByTagName("link");return Promise.all(n.map(o=>{if(o=V(o),o in C)return;C[o]=!0;const l=o.endsWith(".css"),f=l?'[rel="stylesheet"]':"";if(!!i)for(let c=r.length-1;c>=0;c--){const u=r[c];if(u.href===o&&(!l||u.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${o}"]${f}`))return;const s=document.createElement("link");if(s.rel=l?"stylesheet":J,l||(s.as="script",s.crossOrigin=""),s.href=o,document.head.appendChild(s),l)return new Promise((c,u)=>{s.addEventListener("load",c),s.addEventListener("error",()=>u(new Error(`Unable to preload CSS for ${o}`)))})})).then(()=>t()).catch(o=>{const l=new Event("vite:preloadError",{cancelable:!0});if(l.payload=o,window.dispatchEvent(l),!l.defaultPrevented)throw o})};function b(){}function I(e){return e()}function A(){return Object.create(null)}function v(e){e.forEach(I)}function M(e){return typeof e=="function"}function D(e,t){return e!=e?t==t:e!==t||e&&typeof e=="object"||typeof e=="function"}function K(e){return Object.keys(e).length===0}function d(e,t){e.appendChild(t)}function T(e,t,n){e.insertBefore(t,n||null)}function S(e){e.parentNode&&e.parentNode.removeChild(e)}function y(e){return document.createElement(e)}function U(e){return document.createTextNode(e)}function k(){return U(" ")}function G(e,t,n,i){return e.addEventListener(t,n,i),()=>e.removeEventListener(t,n,i)}function h(e,t,n){n==null?e.removeAttribute(t):e.getAttribute(t)!==n&&e.setAttribute(t,n)}function H(e){return Array.from(e.childNodes)}function Q(e,t){t=""+t,e.data!==t&&(e.data=t)}let w;function $(e){w=e}function X(){if(!w)throw new Error("Function called outside component initialization");return w}function Y(e){X().$$.on_mount.push(e)}const _=[],R=[];let g=[];const j=[],Z=Promise.resolve();let L=!1;function ee(){L||(L=!0,Z.then(W))}function O(e){g.push(e)}const x=new Set;let m=0;function W(){if(m!==0)return;const e=w;do{try{for(;m<_.length;){const t=_[m];m++,$(t),te(t.$$)}}catch(t){throw _.length=0,m=0,t}for($(null),_.length=0,m=0;R.length;)R.pop()();for(let t=0;te.indexOf(i)===-1?t.push(i):n.push(i)),n.forEach(i=>i()),g=t}const re=new Set;function oe(e,t){e&&e.i&&(re.delete(e),e.i(t))}function ie(e,t,n){const{fragment:i,after_update:r}=e.$$;i&&i.m(t,n),O(()=>{const o=e.$$.on_mount.map(I).filter(M);e.$$.on_destroy?e.$$.on_destroy.push(...o):v(o),e.$$.on_mount=[]}),r.forEach(O)}function se(e,t){const n=e.$$;n.fragment!==null&&(ne(n.after_update),v(n.on_destroy),n.fragment&&n.fragment.d(t),n.on_destroy=n.fragment=null,n.ctx=[])}function le(e,t){e.$$.dirty[0]===-1&&(_.push(e),ee(),e.$$.dirty.fill(0)),e.$$.dirty[t/31|0]|=1<{const P=N.length?N[0]:p;return s.ctx&&r(s.ctx[u],s.ctx[u]=P)&&(!s.skip_bound&&s.bound[u]&&s.bound[u](P),c&&le(e,u)),p}):[],s.update(),c=!0,v(s.before_update),s.fragment=i?i(s.ctx):!1,t.target){if(t.hydrate){const u=H(t.target);s.fragment&&s.fragment.l(u),u.forEach(S)}else s.fragment&&s.fragment.c();t.intro&&oe(e.$$.fragment),ie(e,t.target,t.anchor),W()}$(a)}class ue{constructor(){E(this,"$$");E(this,"$$set")}$destroy(){se(this,1),this.$destroy=b}$on(t,n){if(!M(n))return b;const i=this.$$.callbacks[t]||(this.$$.callbacks[t]=[]);return i.push(n),()=>{const r=i.indexOf(n);r!==-1&&i.splice(r,1)}}$set(t){this.$$set&&!K(t)&&(this.$$.skip_bound=!0,this.$$set(t),this.$$.skip_bound=!1)}}const ae="4";typeof window<"u"&&(window.__svelte||(window.__svelte={v:new Set})).v.add(ae);function B(e){let t,n=(!e[1]||!e[0]?"Loading...":JSON.stringify(e[0],null,2))+"",i;return{c(){t=y("pre"),i=U(n),h(t,"class","bg-gray-100 dark:bg-gray-800 p-2 rounded")},m(r,o){T(r,t,o),d(t,i)},p(r,o){o&3&&n!==(n=(!r[1]||!r[0]?"Loading...":JSON.stringify(r[0],null,2))+"")&&Q(i,n)},d(r){r&&S(t)}}}function fe(e){let t,n,i,r,o,l,f,a,s,c=e[1]!==null&&B(e);return{c(){t=y("main"),n=y("h1"),n.textContent="Transformers.js",i=k(),r=y("h2"),r.textContent="Svelte (client-side)",o=k(),l=y("input"),f=k(),c&&c.c(),h(n,"class","text-5xl font-bold mb-2 text-center"),h(r,"class","text-2xl mb-4 text-center"),h(l,"type","text"),h(l,"class","w-full max-w-xs p-2 border border-gray-300 rounded mb-4 dark:text-black"),h(l,"placeholder","Enter text here"),h(t,"class","flex min-h-screen flex-col items-center justify-center p-12")},m(u,p){T(u,t,p),d(t,n),d(t,i),d(t,r),d(t,o),d(t,l),d(t,f),c&&c.m(t,null),a||(s=G(l,"input",e[3]),a=!0)},p(u,[p]){u[1]!==null?c?c.p(u,p):(c=B(u),c.c(),c.m(t,null)):c&&(c.d(1),c=null)},i:b,o:b,d(u){u&&S(t),c&&c.d(),a=!1,s()}}}function de(e,t,n){let i,r=null,o=null;Y(async()=>{if(!i){const a=await z(()=>import("./worker-4f278980.js"),[]);i=new a.default;const s=c=>{switch(c.data.status){case"initiate":n(1,o=!1);break;case"ready":n(1,o=!0);break;case"complete":n(0,r=c.data.output[0]);break}};i.addEventListener("message",s)}});function l(a){i&&i.postMessage({text:a})}return[r,o,l,a=>{l(a.target.value)}]}class he extends ue{constructor(t){super(),ce(this,t,de,fe,D,{})}}new he({target:document.getElementById("app")}); diff --git a/spaces/radames/whisper.cpp-wasm/README.md b/spaces/radames/whisper.cpp-wasm/README.md deleted file mode 100644 index 0872399ecdf28129135060d7eb81d683f723c5c5..0000000000000000000000000000000000000000 --- a/spaces/radames/whisper.cpp-wasm/README.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Whisper.cpp WASM -emoji: 📉 -colorFrom: red -colorTo: green -sdk: static -pinned: false -# app_port: 8080 -custom_headers: - cross-origin-embedder-policy: require-corp - cross-origin-opener-policy: same-origin - cross-origin-resource-policy: cross-origin ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -Models on https://huggingface.co/datasets/ggerganov/whisper.cpp \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Adobe Dimension CC 2020 v3.1.1.1223 (x64) with Crack Pros and Cons of the Software.md b/spaces/raedeXanto/academic-chatgpt-beta/Adobe Dimension CC 2020 v3.1.1.1223 (x64) with Crack Pros and Cons of the Software.md deleted file mode 100644 index 2184f18ab1c0f3d46f1dac36aa7f2b25bfb45ff4..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Adobe Dimension CC 2020 v3.1.1.1223 (x64) with Crack Pros and Cons of the Software.md +++ /dev/null @@ -1,235 +0,0 @@ -
    -

    Adobe Dimension CC 2020 v3.1.1.1223 (x64) with Crack: A Comprehensive Review

    -

    If you are a designer who wants to create stunning 3D images with realistic effects, you might be interested in Adobe Dimension CC 2020, a powerful and advanced application that allows you to transform and design your images into 3D and 2D objects or scenes. In this article, we will review Adobe Dimension CC 2020 v3.1.1.1223 (x64) with crack, a pre-activated version of the software that you can download and install for free. We will cover the following topics:

    -

    Adobe Dimension CC 2020 v3.1.1.1223 (x64) with Crack


    Download »»» https://tinourl.com/2uL1Kk



    -
      -
    • What is Adobe Dimension CC 2020?
    • -
    • How to download and install Adobe Dimension CC 2020 with crack?
    • -
    • How to use Adobe Dimension CC 2020 to create photorealistic 3D images?
    • -
    • What are the pros and cons of Adobe Dimension CC 2020 with crack?
    • -
    -

    By the end of this article, you will have a clear idea of what Adobe Dimension CC 2020 can do for you, how to get it, and how to use it effectively. Let's get started!

    -

    What is Adobe Dimension CC 2020?

    -

    Adobe Dimension CC 2020 is a set of 2D and 3D design tools that lets you quickly create high-quality photorealistic 3D images and composite 2D and 3D assets in a realistic environment. It is designed for graphic designers who want to enhance their skills and workflow without learning complex and specialized applications.

    -

    A brief introduction to the software

    -

    Adobe Dimension CC 2020 was formerly known as Project Felix, a project that started in 2016 as a way to simplify the process of creating 3D images for designers. It was officially launched as Adobe Dimension CC in October 2017, and since then it has been updated regularly with new features and improvements.

    -

    Adobe Dimension CC 2020 is part of the Adobe Creative Cloud suite, which means that you can access it with your subscription or as a standalone product. It also means that you can integrate it with other Adobe products, such as Photoshop, Illustrator, After Effects, and more.

    -

    The main features and benefits of Adobe Dimension CC 2020

    -

    Adobe Dimension CC 2020 has many features and benefits that make it a great choice for creating photorealistic 3D images. Here are some of them:

    -

    Adobe Dimension 2020 v3.4.1 (x64) Pre-Cracked
    -Adobe Dimension CC 2019 v2.3.1.1060 (x64) Multilingual
    -Adobe Dimension 2020 v3.4.8 Pre-Activated Free Download
    -Adobe Dimension CC 2020 v3.0.0.1082 (x64) Pre-Activated [FileCR]
    -Adobe Dimension 2020 v3.4.9 Video Demo Free Download
    -Adobe Dimension CC Pre-Activated offline installer for Windows PC
    -Adobe Dimension 2020 v3.4.6.4044 (x64) Multilingual
    -Adobe Dimension CC 2020 v3.1.1.1223 (x64) with Crack ~UPD~
    -Adobe Dimension 2020 v3.4.5.4032 (x64) Multilingual
    -Adobe Dimension CC 2020 v3.2.1 (x64) Multilingual RePack
    -Adobe Dimension CC 2020 v3.4.4.4028 (x64) Multilingual
    -Adobe Dimension CC 2020 v3.1.1 (x64) Multilingual Portable
    -Adobe Dimension CC 2020 v3.4.7 (x64) Multilingual
    -Adobe Dimension CC 2020 v3.1 (x64) Multilingual Pre-Cracked
    -Adobe Dimension CC 2020 v3.4.3.4022 (x64) Multilingual
    -Adobe Dimension CC 2019 v2.2 (x64) Multilingual Portable
    -Adobe Dimension CC 2019 v2.1 (x64) Multilingual Pre-Cracked
    -Adobe Dimension CC 2019 v2.2 (x64) Multilingual RePack
    -Adobe Dimension CC 2019 v2.1 (x64) Multilingual Portable [FileCR]
    -Adobe Dimension CC 2019 v2.2 (x64) Multilingual [FileCR]
    -Adobe Dimension CC 2019 v2.1 (x64) Multilingual [FileCR]
    -Adobe Dimension CC 2019 v2.0 (x64) + Crack [CracksNow]
    -Adobe Dimension CC 2018 v1.1 (x64) + Crack [CracksNow]
    -Adobe Dimension CC 2018 v1.0 (x64) + Crack [CracksNow]
    -Adobe Dimension CC 2018 v1.1 (x64) + Crack [CracksMind]
    -How to create product mockups with Adobe Dimension CC
    -How to composite 2D and 3D assets with Adobe Dimension CC
    -How to render photorealistic images with Adobe Dimension CC
    -How to use material editing controls in Adobe Dimension CC
    -How to add shadows and reflections in Adobe Dimension CC
    -How to use V-Ray photorealistic rendering in Adobe Dimension CC
    -How to use material capture in Adobe Dimension CC
    -How to use camera view bookmarks in Adobe Dimension CC
    -How to use camera depth of field controls in Adobe Dimension CC
    -How to place graphics on 3D surfaces with Adobe Dimension CC
    -How to use match image automation in Adobe Dimension CC
    -How to export scenes as layers with Adobe Dimension CC
    -How to publish high-impact marketing materials with Adobe Dimension CC
    -How to create product shots and composite 2D and 3D assets with Adobe Dimension CC
    -How to craft photorealistic scenes with Adobe Dimension CC
    -How to pick your material, texture, and backdrop with Adobe Dimension CC
    -How to use familiar tools and shortcuts in Adobe Dimension CC
    -How to use multi-layered PSD renders in Adobe Dimension CC
    -How to use real-time render preview in Adobe Dimension CC
    -How to use in-app guidance in Adobe Dimension CC

    -
      -
    • You can place graphics on any surface of your 3D objects or scenes, such as logos, labels, stickers, etc.
    • -
    • You can match the lighting, perspective, and shadows of your images automatically with the image-based lighting feature.
    • -
    • You can use familiar tools and shortcuts from Photoshop and Illustrator, such as layers, masks, selection tools, etc.
    • -
    • You can export your scenes as multi-layered PSD files for further editing in Photoshop.
    • -
    • You can edit the materials and textures of your objects or scenes with various controls and presets.
    • -
    • You can add shadows and reflections to your objects or scenes with realistic effects.
    • -
    • You can access thousands of high-quality assets from Adobe Stock, such as models, materials, lights, etc.
    • -
    • You can preview your images in real-time with the V-Ray rendering engine.
    • -
    • You can get in-app guidance and tutorials to learn how to use the software effectively.
    • -
    • You can use the camera view bookmarks feature to save different views of your scenes.
    • -
    • You can use the camera depth of field feature to create focus effects on your images.
    • -
    • You can use the material capture feature to create custom materials from photos.
    • -
    -

    The system requirements and technical details of Adobe Dimension CC 2020

    -

    Before you download and install Adobe Dimension CC 2020 with crack, you need to make sure that your PC meets the minimum system requirements for running the software smoothly. Here are the system requirements and technical details of Adobe Dimension CC 2020:

    - - - - - - - - - - - - - - -
    Supported OSWindows 10 Anniversary Update (64-bit) - Version 1607 (build 10.0.14393) or later
    ProcessorIntel Core i5 (2011 or newer) or Intel Xeon (2011 or newer)
    RAM8 GB or more
    Graphics cardGeforce GTX 770, Intel Iris Pro Graphics 580 or 6200, or equivalent
    Video memory512 MB of dedicated VRAM
    OpenGLOpenGL 3.2 capable system
    Display1024 x 640
    Free hard disk space2.5 GB or more
    Software full nameAdobe Dimension CC 2020 v3.1.1.1223 (x64)
    Setup file nameAdobe_Dimension_v3.1.1.1223.rar
    Full setup size1.5 GB
    Setup typeOffline installer / Full standalone setup
    Compatibility architecture64 bit (x64)
    Password

    "}

    "}

    "}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}"}}
    -

    How to download and install Adobe Dimension CC 2020 with crack?

    -

    If you want to download and install Adobe Dimension CC

    2020 with crack, you need to follow these steps:

    -

    The steps to download the software from a reliable source

    -

    There are many websites that claim to offer Adobe Dimension CC 2020 with crack for free, but not all of them are trustworthy and safe. Some of them may contain viruses, malware, or spyware that can harm your PC or steal your personal information. Therefore, you need to be careful and choose a reliable source to download the software from.

    -

    One of the reliable sources that we recommend is FileCR.com, a website that provides various software and applications for Windows and Mac with pre-activated versions. You can download Adobe Dimension CC 2020 with crack from this website without any hassle or risk.

    -

    To download the software from FileCR.com, you need to do the following:

    -
      -
    1. Go to the website and search for Adobe Dimension CC 2020 in the search box.
    2. -
    3. Click on the link that says "Adobe Dimension 3.4.8 Pre-Activated Free Download - FileCR".
    4. -
    5. Scroll down to the bottom of the page and click on the button that says "Download Now".
    6. -
    7. Wait for a few seconds until a new page opens with a countdown timer.
    8. -
    9. After the countdown is over, click on the button that says "Create Download Link".
    10. -
    11. Wait for another few seconds until a new page opens with a download link.
    12. -
    13. Click on the link that says "Click here to download" and save the file to your PC.
    14. -
    -

    The steps to install the software and apply the crack

    -

    After you have downloaded the software from FileCR.com, you need to install it and apply the crack to activate it. To do that, you need to follow these steps:

    -
      -
    1. Extract the file that you have downloaded using WinRAR or any other extraction tool.
    2. -
    3. Open the extracted folder and double-click on the file that says "Setup.exe".
    4. -
    5. Follow the instructions on the screen to install the software.
    6. -
    7. After the installation is complete, do not run the software yet.
    8. -
    9. Open the folder that says "Crack" and copy all the files inside it.
    10. -
    11. Paste the files into the installation directory of Adobe Dimension CC 2020. The default location is C:\Program Files\Adobe\Adobe Dimension CC 2020.
    12. -
    13. Replace the original files if prompted.
    14. -
    15. Run the software as an administrator.
    16. -
    -

    Congratulations! You have successfully installed and activated Adobe Dimension CC 2020 with crack. You can now enjoy using it for creating photorealistic 3D images.

    -

    The precautions and tips to avoid any errors or issues

    -

    Although Adobe Dimension CC 2020 with crack is a pre-activated version of the software, it may still encounter some errors or issues due to various reasons. To avoid or fix them, you need to take some precautions and follow some tips. Here are some of them:

    -
      -
    • Make sure your PC meets the minimum system requirements for running Adobe Dimension CC 2020 smoothly.
    • -
    • Make sure you have a stable internet connection for downloading and installing the software.
    • -
    • Make sure you disable your antivirus or firewall before downloading and installing the software, as they may interfere with the process or delete some files.
    • -
    • Make sure you download the software from a reliable source, such as FileCR.com, and not from any suspicious or unknown websites.
    • -
    • Make sure you extract the file that you have downloaded using WinRAR or any other extraction tool, and not using Windows Explorer or any other program.
    • -
    • Make sure you copy and paste the files from the Crack folder into the installation directory of Adobe Dimension CC 2020 correctly, and replace the original files if prompted.
    • -
    • Make sure you run the software as an administrator after applying the crack.
    • -
    • If you encounter any errors or issues while using Adobe Dimension CC 2020 with crack, such as crashes, freezes, glitches, etc., try to update your graphics card driver, reinstall Visual C++ Redistributable Packages, or contact FileCR.com for support.
    • -
    -

    How to use Adobe Dimension CC 2020 to create photorealistic 3D images?

    -

    Now that you have downloaded and installed Adobe Dimension CC 2020 with crack, you might be wondering how to use it to create photorealistic 3D images. In this section, we will give you a brief overview of how to use Adobe Dimension CC 2020 to create photorealistic 3D images. We will cover

    The basic workflow and interface of Adobe Dimension CC 2020 are simple and intuitive. You can create photorealistic 3D images in four easy steps:

    -
      -
    1. Import your 3D models and 2D graphics into the canvas. You can use the assets from Adobe Stock, your own files, or any other sources.
    2. -
    3. Compose your scene by arranging your models and graphics on the canvas. You can use the tools and options on the toolbar and the panels to move, rotate, scale, align, and duplicate your objects.
    4. -
    5. Adjust your scene by applying materials, lights, backgrounds, and effects to your objects. You can use the tools and options on the toolbar and the panels to edit the properties and appearance of your objects.
    6. -
    7. Render your scene by exporting it as a high-quality image or a multi-layered PSD file. You can use the tools and options on the toolbar and the panels to set the resolution, format, quality, and location of your output.
    8. -
    -

    The interface of Adobe Dimension CC 2020 consists of four main parts:

    -
      -
    • The canvas: The main area where you can see and edit your scene in 3D.
    • -
    • The toolbar: The horizontal bar at the top that contains various tools and options for manipulating your scene.
    • -
    • The panels: The vertical bars on the right that contain various panels for accessing and modifying your assets, properties, actions, and settings.
    • -
    • The menu bar: The horizontal bar at the very top that contains various menus for accessing more tools and options.
    • -
    -

    The tools and options to compose, adjust, and render 3D images

    -

    Adobe Dimension CC 2020 offers a variety of tools and options to help you compose, adjust, and render 3D images with ease and efficiency. Here are some of them:

    -
      -
    • The selection tool: The default tool that allows you to select one or more objects on the canvas by clicking or dragging.
    • -
    • The move tool: The tool that allows you to move one or more objects on the canvas by dragging or using the arrows.
    • -
    • The rotate tool: The tool that allows you to rotate one or more objects on the canvas by dragging or using the circles.
    • -
    • The scale tool: The tool that allows you to scale one or more objects on the canvas by dragging or using the squares.
    • -
    • The align tool: The tool that allows you to align one or more objects on the canvas by using the icons.
    • -
    • The distribute tool: The tool that allows you to distribute one or more objects on the canvas by using the icons.
    • -
    • The group tool: The tool that allows you to group one or more objects on the canvas by clicking on the icon.
    • -
    • The ungroup tool: The tool that allows you to ungroup one or more objects on the canvas by clicking on the icon.
    • -
    • The duplicate tool: The tool that allows you to duplicate one or more objects on the canvas by clicking on the icon.
    • -
    • The delete tool: The tool that allows you to delete one or more objects on the canvas by clicking on the icon.
    • -
    • The undo tool: The tool that allows you to undo your last action on the canvas by clicking on the icon.
    • -
    • The redo tool: The tool that allows you to redo your last action on the canvas by clicking on the icon.
    • -
    • The zoom tool: The tool that allows you to zoom in or out of your scene on the canvas by scrolling or using

      the icons.

    • -
    • The pan tool: The tool that allows you to pan your scene on the canvas by dragging or using the icons.
    • -
    • The orbit tool: The tool that allows you to orbit your scene on the canvas by dragging or using the icons.
    • -
    • The dolly tool: The tool that allows you to dolly your scene on the canvas by dragging or using the icons.
    • -
    • The horizon tool: The tool that allows you to adjust the horizon of your scene on the canvas by dragging or using the icons.
    • -
    • The camera tool: The tool that allows you to switch between different camera views of your scene on the canvas by clicking on the icons.
    • -
    • The bookmark tool: The tool that allows you to save and access different camera views of your scene on the canvas by clicking on the icons.
    • -
    • The render tool: The tool that allows you to export your scene as a high-quality image or a multi-layered PSD file by clicking on the icon.
    • -
    • The assets panel: The panel that allows you to access and import various assets for your scene, such as models, materials, lights, images, etc.
    • -
    • The properties panel: The panel that allows you to edit and modify the properties and appearance of your objects, such as position, rotation, scale, material, light, background, effect, etc.
    • -
    • The actions panel: The panel that allows you to access and apply various actions for your scene, such as match image, magic layout, render cloud, etc.
    • -
    • The settings panel: The panel that allows you to access and change various settings for your scene, such as canvas size, grid, snapping, rendering quality, etc.
    • -
    -

    The examples and tutorials to learn from and get inspired by

    -

    If you want to learn more about how to use Adobe Dimension CC 2020 to create photorealistic 3D images, you can check out some of the examples and tutorials that are available online. Here are some of them:

    -
      -
    • The official Adobe Dimension website: The website that provides various information and resources for Adobe Dimension CC 2020, such as features, updates, tutorials, tips, inspiration, etc.
    • -
    • The official Adobe Dimension YouTube channel: The channel that provides various videos and playlists for Adobe Dimension CC 2020, such as tutorials, tips, tricks, showcases, etc.
    • -
    • The official Adobe Dimension help center: The help center that provides various articles and guides for Adobe Dimension CC 2020, such as getting started, user guide, troubleshooting, etc.
    • -
    • The official Adobe Dimension community forum: The forum that provides various discussions and questions for Adobe Dimension CC 2020, such as feedback, suggestions, issues, etc.
    • -
    • The official Adobe Dimension blog: The blog that provides various stories and insights for Adobe Dimension CC 2020

      , such as news, updates, features, tips, etc.

    • -
    • The official Adobe Dimension Behance page: The page that provides various projects and portfolios for Adobe Dimension CC 2020, such as showcases, examples, inspiration, etc.
    • -
    • The official Adobe Dimension Instagram page: The page that provides various images and videos for Adobe Dimension CC 2020, such as showcases, examples, inspiration, etc.
    • -
    • The official Adobe Dimension Twitter page: The page that provides various tweets and retweets for Adobe Dimension CC 2020, such as news, updates, features, tips, etc.
    • -
    • The official Adobe Dimension Facebook page: The page that provides various posts and comments for Adobe Dimension CC 2020, such as news, updates, features, tips, etc.
    • -
    • The unofficial Adobe Dimension Reddit community: The community that provides various posts and comments for Adobe Dimension CC 2020, such as questions, answers, feedback, suggestions, issues, etc.
    • -
    -

    What are the pros and cons of Adobe Dimension CC 2020 with crack?

    -

    Adobe Dimension CC 2020 with crack is a pre-activated version of the software that you can download and install for free. However, it also has some pros and cons that you need to consider before using it. Here are some of them:

    -

    The advantages of using Adobe Dimension CC 2020 with crack

    -

    Some of the advantages of using Adobe Dimension CC 2020 with crack are:

    -
      -
    • You can save money by not paying for the subscription or the standalone product.
    • -
    • You can access all the features and updates of the software without any limitations or restrictions.
    • -
    • You can use the software offline without any internet connection or verification.
    • -
    • You can use the software on any PC without any activation or registration.
    • -
    -

    The disadvantages and risks of using Adobe Dimension CC 2020 with crack

    -

    Some of the disadvantages and risks of using Adobe Dimension CC 2020 with crack are:

    -
      -
    • You may violate the terms and conditions of Adobe and face legal consequences or penalties.
    • -
    • You may not receive any support or assistance from Adobe or FileCR.com if you encounter any errors or issues while using the software.
    • -
    • You may expose your PC to viruses, malware, or spyware that may harm your system or steal your personal information.
    • -
    • You may compromise the quality and performance of the software by using a cracked version that may contain bugs or errors.
    • -
    -

    Conclusion

    -

    In conclusion, Adobe Dimension CC 2020 is a powerful and advanced application that allows you to create stunning 3D images with realistic effects. It is designed for graphic designers who want to enhance their skills and workflow without learning complex and specialized applications. It has many features and benefits that make it a great choice for creating photorealistic 3D images. However, it also has some drawbacks and risks that you need to consider before using it.

    -

    If you want to download and install Adobe Dimension CC 2020 with crack for free

    , you can follow the steps that we have provided in this article. You can also check out some of the examples and tutorials that we have listed in this article to learn more about how to use the software effectively. However, you also need to be aware of the pros and cons of using Adobe Dimension CC 2020 with crack and take some precautions and tips to avoid or fix any errors or issues that may arise.

    -

    We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    -

    FAQs

    -

    Here are some of the frequently asked questions and answers about Adobe Dimension CC 2020 with crack:

    -
      -
    1. What is the difference between Adobe Dimension CC 2020 and Adobe Photoshop?
    2. -
    3. Adobe Dimension CC 2020 and Adobe Photoshop are both Adobe products that can be used for creating and editing images. However, Adobe Dimension CC 2020 is more focused on creating photorealistic 3D images and composite 2D and 3D assets in a realistic environment, while Adobe Photoshop is more focused on creating and editing 2D images and graphics with various tools and effects.
    4. -
    5. Can I use Adobe Dimension CC 2020 with crack on Mac?
    6. -
    7. No, Adobe Dimension CC 2020 with crack is only available for Windows PC. If you want to use Adobe Dimension CC 2020 on Mac, you need to purchase the subscription or the standalone product from the official Adobe website.
    8. -
    9. How can I update Adobe Dimension CC 2020 with crack?
    10. -
    11. You cannot update Adobe Dimension CC 2020 with crack, as it is a pre-activated version of the software that does not require any updates or verification. If you want to update Adobe Dimension CC 2020, you need to purchase the subscription or the standalone product from the official Adobe website.
    12. -
    13. Is Adobe Dimension CC 2020 with crack safe to use?
    14. -
    15. Adobe Dimension CC 2020 with crack is not safe to use, as it may violate the terms and conditions of Adobe and face legal consequences or penalties. It may also expose your PC to viruses, malware, or spyware that may harm your system or steal your personal information. It may also compromise the quality and performance of the software by using a cracked version that may contain bugs or errors.
    16. -
    17. What are some alternatives to Adobe Dimension CC 2020 with crack?
    18. -
    19. Some of the alternatives to Adobe Dimension CC 2020 with crack are:
    20. -
        -
      • Blender: A free and open-source 3D creation suite that can be used for modeling, sculpting, animating, rendering, compositing, and more.
      • -
      • Cinema 4D: A professional 3D modeling, animation, and rendering software that can be used for creating photorealistic 3D images and scenes.
      • -
      • SketchUp: A simple and easy-to-use 3D modeling and design software that can be used for creating architectural, interior, landscape, and other types of 3D projects.
      • -
      -
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Diablo 2 Cd Key Generator Crack Tips and Tricks for Successful Installation.md b/spaces/raedeXanto/academic-chatgpt-beta/Diablo 2 Cd Key Generator Crack Tips and Tricks for Successful Installation.md deleted file mode 100644 index d6b17a452794277de50d1273fb66b38a60c26ff2..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Diablo 2 Cd Key Generator Crack Tips and Tricks for Successful Installation.md +++ /dev/null @@ -1,144 +0,0 @@ - -

    Diablo 2 CD Key Generator Crack: How to Get a Free CD Key for Diablo 2 and Diablo 2 Lord of Destruction

    -

    Diablo 2 is one of the most popular and influential action role-playing games of all time. Released in 2000 by Blizzard Entertainment, it has captivated millions of players with its dark fantasy setting, addictive gameplay, and endless replayability. Diablo 2 also has an expansion pack called Diablo 2 Lord of Destruction, which adds more content, features, and challenges to the game.

    -

    Diablo 2 Cd Key Generator Crack


    DOWNLOAD »»» https://tinourl.com/2uL5iN



    -

    However, if you want to play Diablo 2 or Diablo 2 Lord of Destruction on your computer, you need a valid CD key. A CD key is a unique code that is required to install and run the game. Without a CD key, you cannot play the game online or offline.

    -

    But what if you don't have a CD key or you lost it? What if you want to play on multiple computers or accounts? What if you want to save some money and get the game for free? This is where a CD key generator crack comes in handy.

    -

    A CD key generator crack is a software tool that can create new CD keys for Diablo 2 and Diablo 2 Lord of Destruction. By using a CD key generator crack, you can bypass the need for an original CD key and play the game without any restrictions.

    -

    In this article, we will show you how to download Diablo 2 and Diablo 2 Lord of Destruction from Blizzard, how to use a CD key generator crack to get a free CD key for both games, and what are the risks and benefits of using such a tool. Let's get started!

    -

    How to Download Diablo 2 and Diablo 2 Lord of Destruction from Blizzard

    -

    If you already have the physical copies of Diablo 2 and Diablo 2 Lord of Destruction, you can skip this step. However, if you don't have them or you want to download the latest versions of the games, you can do so from Blizzard's official website.

    -

    Here are the steps you need to follow:

    -

    Diablo 2 Resurrected Cd Key Generator Free Download
    -How to Crack Diablo 2 Cd Key with Generator Online
    -Diablo 2 Lord of Destruction Cd Key Generator No Survey
    -Diablo 2 Classic Cd Key Generator and Crack for Mac
    -Diablo 2 Expansion Cd Key Generator and Crack for Windows
    -Diablo 2 Cd Key Generator Reddit Working 2023
    -Diablo 2 Cd Key Generator and Crack Torrent Download
    -Diablo 2 Cd Key Generator and Crack Serial Number
    -Diablo 2 Cd Key Generator and Crack Activation Code
    -Diablo 2 Cd Key Generator and Crack License Key
    -Diablo 2 Cd Key Generator and Crack Product Key
    -Diablo 2 Cd Key Generator and Crack Registration Code
    -Diablo 2 Cd Key Generator and Crack Patch
    -Diablo 2 Cd Key Generator and Crack Full Version
    -Diablo 2 Cd Key Generator and Crack Latest Update
    -Diablo 2 Cd Key Generator and Crack No Virus
    -Diablo 2 Cd Key Generator and Crack Safe and Secure
    -Diablo 2 Cd Key Generator and Crack Easy to Use
    -Diablo 2 Cd Key Generator and Crack Fast and Reliable
    -Diablo 2 Cd Key Generator and Crack Tested and Verified
    -Diablo 2 Cd Key Generator and Crack Free Trial
    -Diablo 2 Cd Key Generator and Crack Lifetime Access
    -Diablo 2 Cd Key Generator and Crack Unlimited Use
    -Diablo 2 Cd Key Generator and Crack Support All Platforms
    -Diablo 2 Cd Key Generator and Crack Compatible with All Versions
    -Diablo 2 Cd Key Generator and Crack No Installation Required
    -Diablo 2 Cd Key Generator and Crack No Password Needed
    -Diablo 2 Cd Key Generator and Crack No Human Verification
    -Diablo 2 Cd Key Generator and Crack No Captcha Required
    -Diablo 2 Cd Key Generator and Crack No Ads or Popups
    -Diablo 2 Cd Key Generator and Crack No Hidden Fees or Charges
    -Diablo 2 Cd Key Generator and Crack No Spam or Malware
    -Diablo 2 Cd Key Generator and Crack No Fake or Scam
    -Diablo 2 Cd Key Generator and Crack No Expired or Invalid Keys
    -Diablo 2 Cd Key Generator and Crack No Banned or Blocked Keys
    -Diablo 2 Cd Key Generator and Crack Guaranteed Working Keys
    -Diablo 2 Cd Key Generator and Crack High Quality Keys
    -Diablo 2 Cd Key Generator and Crack Unique and Random Keys
    -Diablo 2 Cd Key Generator and Crack Legit and Legal Keys
    -Diablo 2 Cd Key Generator and Crack Original and Genuine Keys
    -How to Get Free Diablo 2 Resurrected Beta Access with CD KEY GENERATOR CRACK
    -How to Play Online Multiplayer with DIABLO II CD KEY GENERATOR CRACK
    -How to Fix DIABLO II CD KEY GENERATOR CRACK Errors or Issues
    -How to Update DIABLO II CD KEY GENERATOR CRACK to Latest Version
    -How to Backup or Restore DIABLO II CD KEY GENERATOR CRACK Data
    -How to Uninstall or Remove DIABLO II CD KEY GENERATOR CRACK Completely
    -How to Customize or Modify DIABLO II CD KEY GENERATOR CRACK Settings
    -How to Enhance or Improve DIABLO II CD KEY GENERATOR CRACK Performance
    -How to Troubleshoot or Solve DIABLO II CD KEY GENERATOR CRACK Problems
    -How to Contact or Support DIABLO II CD KEY GENERATOR CRACK Developers

    -
      -
    1. Go to https://www.blizzard.com/en-us/download/ and create an account or log in with your existing one.
    2. -
    3. Go to "Account settings", in the "Redeem a code" box enter your old 16 digit cd-keys (one for D2 and one for LoD). You can find them on the back of your game manuals or cases. If you don't have them, you can use one of the free cd-keys listed below.
    4. -
    5. Then under "Games and subscriptions" you'll have Diablo 2 and LoD and be able to download the installer and patch files.
    6. -
    7. Run the installer files and follow the instructions on screen. You will need about 4 GB of free disk space for both games.
    8. -
    9. Once installed, run the patch files to update your games to the latest version (1.14d).
    10. -
    11. You're done! You can now launch your games from your desktop or start menu shortcuts.
    12. -
    -

    Here are some free cd-keys that you can use if you don't have your own:

    - - - - - - - - - -
    D2:PRTYN4NRBBC924N4EJ7W4VHJZH
    D2 LoD:DCT72T9NRTNZCP8D8KW48DBBNV
    D2:6J872T6DGNBJTZ2Z
    D2 LoD:DVFG9JK6JX7RNC94
    D2:FBT66798DMHNE8HD
    D2 LoD:GBNCPE8TBPE6RERE
    D2:PNFN22JVBNRBKV4J
    D2 LoD:YYDG2KVRNNGHNMKYFP2V4VFCWX
    -

    Note: These cd-keys are not guaranteed to work on Battle.net or be unbanned. Use them at your own risk.

    -

    How to Use a CD Key Generator Crack to Get a Free CD Key for Diablo 2 and Diablo 2 Lord of Destruction

    -

    Now that you have downloaded and installed your games, you might want to use a cd key generator crack to get a new cd key for them. This way, you can play online without any problems or create multiple accounts for different characters or realms.

    -

    A cd key generator crack is a software tool that can create new cd keys for both games based on an algorithm. You can use these cd keys instead of your original ones or change them whenever you want.

    -

    Here are the steps you need to follow:

    -
      -
    1. Download a reliable cd key generator crack from https://github.com/blizzhackers/diablo2-tools-utilities. This is a collection of tools and utilities for Diablo 2 that includes a cd key viewer, a defaultkey changer, a loaderkey creator, and other useful features.
    2. -
    3. Extract the zip file into your game folder (usually C:\Program Files (x86)\Diablo II).
    4. -
    5. Run the cdkey viewer.exe file. This will show you your current cd keys for both games.
    6. -
    7. Run the cdkey generator.exe file. This will generate new cd keys for both games based on an algorithm. You can copy them or save them as text files.
    8. -
    9. Run the loaderkey creator.exe file. This will create new .mpq files that contain your new cd keys. You can rename them as DIIKEY.mpq (for DII) or LODKEY.mpq (for LoD).
    10. -
    11. Copy these .mpq files into your game folder (usually C:\Program Files (x86)\Diablo II).
    12. -
    13. You're done! You can now launch your games with your new cd keys by using dloader.exe instead of game.exe or diablo ii.exe.
    14. -
    -

    Note: You can change your cd keys anytime by repeating these steps with different generated cd keys.

    -

    How to Connect to Battle.net and Play Online

    -

    If you want to play online with other players on Battle.net, you need to make sure that your new cd keys are valid and unbanned. Otherwise, you might get an error message or be kicked out of the server.

    -

    To check if your new cd keys are valid and unbanned, you can use this website: https://darcvigilante.site/. This is an online tool that can verify your cd keys for both games on all realms ( Continuing the article: US East, US West, Europe, and Asia). Just enter your cd keys and click on "Check". If your cd keys are valid and unbanned, you will see a green message saying "OK". If not, you will see a red message saying "Banned" or "Invalid". If your cd keys are banned, you can try to use another cd key generator crack to generate new ones. However, be aware that Blizzard may ban your cd keys at any time for violating their terms of use. You can also buy new cd keys from Blizzard's official website or other online retailers. If your cd keys are valid and unbanned, you can connect to Battle.net and play online with other players. Here are the steps you need to follow:

      -
    1. Launch your game with dloader.exe instead of game.exe or diablo ii.exe.
    2. -
    3. Click on "Battle.net" on the main menu.
    4. -
    5. Select the realm you want to play on (US East, US West, Europe, or Asia).
    6. -
    7. Enter your account name and password or create a new account if you don't have one.
    8. -
    9. You're done! You can now join or create games, chat with other players, trade items, etc.
    10. -
    -

    Note: You can create up to 18 characters per account and up to 8 accounts per cd key. You can also switch realms by changing your gateway with the gateway editor tool.

    -

    Risks and Benefits of Using a CD Key Generator Crack for Diablo 2 and Diablo 2 Lord of Destruction

    -

    Using a cd key generator crack for Diablo 2 and Diablo 2 Lord of Destruction may seem like a good idea to get free access to the game and play online without any limitations. However, there are also some risks and drawbacks that you should be aware of before using such a tool.

    -

    Here are some of the risks and benefits of using a cd key generator crack for Diablo 2 and Diablo 2 Lord of Destruction:

    -

    Risks

    -
      -
    • Legal issues: Using a cd key generator crack is illegal and violates Blizzard's terms of use. You may face legal consequences if you are caught using such a tool.
    • -
    • Malware: Some cd key generator cracks may contain viruses, trojans, spyware, or other malicious software that can harm your computer or steal your personal information. You should always scan any downloaded files with an antivirus program before running them.
    • -
    • Bans: Blizzard may ban your cd keys at any time for using a cd key generator crack. This means you will lose access to your account and characters and you will not be able to play online anymore. Bans are permanent and irreversible.
    • -
    • Errors: Some cd key generator cracks may not work properly or cause errors in your game. You may experience crashes, glitches, bugs, or other problems that can affect your gameplay.
    • -
    -

    Benefits

    -
      -
    • Free access: Using a cd key generator crack allows you to get free access to Diablo 2 and Diablo 2 Lord of Destruction without paying anything. You can save money and enjoy the game without any restrictions.
    • -
    • Multiple accounts: Using a cd key generator crack allows you to create multiple accounts for different characters or realms. You can have more variety and flexibility in your gameplay.
    • -
    • Backup keys: Using a cd key generator crack allows you to have backup keys in case you lose or forget your original ones. You can always generate new ones and continue playing.
    • -
    -

    Conclusion

    -

    In this article, we have shown you how to download Diablo 2 and Diablo 2 Lord of Destruction from Blizzard, how to use a cd key generator crack to get a free cd key for both games, and what are the risks and benefits of using such a tool.

    -

    We hope you have found this article helpful and informative. However, we do not encourage or endorse the use of a cd key generator crack for Diablo 2 and Diablo 2 Lord of Destruction. We respect Blizzard's rights and policies and we advise you to do the same.

    -

    If you want to play Diablo 2 and Diablo 2 Lord of Destruction legally and safely, we recommend you to buy the games from Blizzard's official website or other online retailers. This way, you will support the developers and enjoy the game without any problems or worries.

    -

    If you want to learn more about Diablo 2 and Diablo 2 Lord of Destruction, such as tips, tricks, guides, builds, mods, hacks, cheats, etc., please visit our website at https://www.example.com. We have everything you need to know about this amazing game.

    -

    Thank you for reading this article and happy gaming!

    -

    FAQs

    -

    Here are some common questions and answers related to the topic:

    -
      -
    1. Q: What is the difference between Diablo 2 Classic and Diablo 2 Lord of Destruction?
    2. -
    3. A: Diablo 2 Classic is the original version of the game that was released in 2000. It has four acts, five character classes (Amazon, Barbarian, Necromancer, Paladin, Sorceress), seven skill trees (one per class plus two common ones), four difficulty levels (Normal, Nightmare, Hell), six item quality levels (Normal, Magic, Rare, Set, Unique), four item sockets (one per item type), three gem types (Chipped, Flawed, Normal), three rune types (El - Dol), three horadric cube recipes (gem upgrade, rune upgrade Continuing the FAQs: , item repair), and a level cap of 99. Diablo 2 Lord of Destruction is the expansion pack for the game that was released in 2001. It adds a fifth act, two new character classes (Assassin, Druid), two new skill trees per class (one for Assassin and Druid plus one common one), three new difficulty levels (Players X, Ladder Only, Uber Tristram), six new item quality levels (Crafted, Magic with 4 properties, Rare with 6 properties, Set with partial bonuses, Unique with variable stats, Ancient), six item sockets (two per item type), five gem types (Normal, Flawed, Flawless, Perfect, Skull), 33 rune types (El - Zod), 78 horadric cube recipes (including runewords, crafted items, item upgrade, etc.), and a level cap of 99.
    4. -
    5. Q: What is a map hack and why is it used?
    6. -
    7. A: A map hack is a cheat tool that reveals the entire map of the game area. It allows the player to see hidden areas, enemies, items, shrines, waypoints, etc. without exploring them. It is used to gain an unfair advantage over other players or to speed up the gameplay.
    8. -
    9. Q: What is a plugy and why is it used?
    10. -
    11. A: A plugy is a mod that enhances the game features and options. It allows the player to access some features that are normally available only on Battle.net or on Ladder mode, such as infinite stash space, shared stash between characters, respec skills and stats anytime, access to Uber Tristram and Pandemonium events, etc. It also adds some new features such as world events, extra pages for stats and skills, multiple save files, etc. It is used to improve the game experience and enjoyment.
    12. -
    13. Q: What is a version changer and why is it used?
    14. -
    15. A: A version changer is a tool that allows the player to switch between different versions of the game. It can be used to play on different realms or servers that have different patch versions or to access some features or mods that are compatible only with certain versions. It can also be used to downgrade the game version to avoid some bugs or errors.
    16. -
    17. Q: What is a pixel bot and why is it used?
    18. -
    19. A: A pixel bot is a tool that automates some actions in the game based on pixel detection. It can be used to perform tasks such as magic finding, leveling up, farming items or gold, etc. without human intervention. It is used to save time and effort or to gain an edge over other players.
    20. -
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Etka Id Username Password A Comprehensive Review of the ETKA Software.md b/spaces/raedeXanto/academic-chatgpt-beta/Etka Id Username Password A Comprehensive Review of the ETKA Software.md deleted file mode 100644 index 7e8a49f8cebf4da6f186e735aca9a1a486b2992b..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Etka Id Username Password A Comprehensive Review of the ETKA Software.md +++ /dev/null @@ -1,120 +0,0 @@ - -
    - Benefits of using ETKA online service for car owners, mechanics and enthusiasts | | **H2: What is ETKA Online Service?** | - Definition and description of ETKA online service
    - History and evolution of ETKA online service
    - Supported brands and models by ETKA online service | | **H2: How to Access ETKA Online Service?** | - Requirements for using ETKA online service
    - Steps to register and login to ETKA online service
    - Troubleshooting tips for common login issues | | **H2: How to Search, Compare and Order Car Parts Using ETKA Online Service?** | - Tips and tricks for navigating ETKA online service
    - How to search by various criteria such as part number, VIN, model code, etc.
    - How to compare prices and availability of different parts
    - How to order online and track your delivery using ETKA online service | | **H2: How to Access Technical Documentation and Repair Manuals Using ETKA Online Service?** | - Types and categories of technical documentation and repair manuals available on ETKA online service
    - How to view, download and print PDF files of technical documentation and repair manuals using ETKA online service
    - How to search for specific keywords or phrases within technical documentation and repair manuals using ETKA online service | | **H2: How to Update ETKA Online Service Regularly?** | - Why it is important to update ETKA online service
    - How to check for updates and install them using ETKA online service
    - How to backup and restore your data using ETKA online service | | **H2: Conclusion** | - Summary of the main points of the article
    - Call to action for the readers to try ETKA online service | # Article with HTML formatting

    How to Use ETKA Online Service for Car Parts and Repair Information

    -

    If you are a car owner, a mechanic, or a car enthusiast, you probably know how important it is to have accurate and reliable information about car parts and repair procedures. You don't want to waste time and money on buying the wrong parts, or risk damaging your car by following incorrect instructions. That's why you need ETKA online service.

    -

    Etka Id Username Password


    Download Zip 🌟 https://tinourl.com/2uKZdb



    -

    ETKA online service is an electronic catalog system that provides comprehensive and up-to-date information about original parts and accessories for various car brands and models. It also includes technical documentation, repair manuals, wiring diagrams, service bulletins, and more. With ETKA online service, you can easily find the right parts for your car model, compare prices and availability, order online, and access detailed instructions on how to install and maintain them.

    -

    In this article, we will show you how to use ETKA online service effectively. We will cover the following topics:

    -
      -
    • What is ETKA online service?
    • -
    • How to access ETKA online service?
    • -
    • How to search, compare and order car parts using ETKA online service?
    • -
    • How to access technical documentation and repair manuals using ETKA online service?
    • -
    • How to update ETKA online service regularly?
    • -
    -

    By the end of this article, you will have a clear understanding of how to use ETKA online service for your car parts and repair needs. So let's get started!

    -

    What is ETKA Online Service?

    -

    ETKA online service is an electronic catalog system that provides comprehensive and up-to-date information about original parts and accessories for Volkswagen, Audi, Seat, Skoda, Porsche, Bentley, Lamborghini, Bugatti, MAN and Scania vehicles . It was developed by LexCom GmbH in Germany and has been in use since 1989. It is updated regularly with new data and features to keep up with the latest developments and innovations in the automotive industry. ETKA online service covers a wide range of car models from different years and generations, from classic to modern ones. You can find parts and information for cars such as Golf, Passat, Polo, A3, A4, A6, Q5, Q7, Octavia, Fabia, Leon, Ibiza, Cayenne, Panamera, Continental GT, Aventador, Chiron, TGX, R500 and many more .

    -

    How to Access ETKA Online Service?

    -

    ETKA online service is available as an online service that you can access from any device with an internet connection. You don't need to download or install any software on your computer or phone. However, you do need some requirements before you can use ETKA online service.

    -

    First, you need a valid company ID, username and password that you can obtain from your authorized dealer or distributor. If you don't have them yet, you can contact them and request them. You will also need to agree to the terms and conditions of use of ETKA online service.

    -

    Second, you need a screen resolution of at least 1024x768 pixels. This will ensure that you can view all the information clearly and comfortably on your screen. You can adjust your screen resolution settings on your device if needed.

    -

    Etka login page
    -Etka online service login
    -Etka password manager
    -Etka engine code
    -Etka hardlock password
    -Etka myetkainfo ID
    -Etka username and password
    -Etka login windows 8
    -Etka electronic parts catalogue
    -Etka Volkswagen and Audi
    -Etka update password
    -Etka forgot password
    -Etka screen resolution
    -Etka LexCom GmbH
    -Etka registered trademark
    -Etka login form
    -Etka online service accedi
    -Etka web form filler
    -Etka engine code page
    -Etka login do
    -Etka password protects
    -Etka complete electronic
    -Etka update until 644
    -Etka login forgot password
    -Etka electronic parts catalog EPC
    -Etka company ID username password
    -Etka login screen resolution 1280x1024
    -Etka login screen resolution 1024x768
    -Etka login screen resolution 1680x1050
    -Etka RoboForm password manager
    -Etka VAG codes ETKA engine code page 002 EC KW HP Ltr Cyl 03.08.201517:05 Mounting Time Model Remark 1B 147 200 2,20 502/88-11/90 AUDI 200 1T 88 120 2,50 501/90-12/90 AUDI 100 T-DIESEL

    -

    Once you have these requirements ready, you can follow these steps to register and login to ETKA online service:

    -
      -
    1. Go to https://www.etka.com/etka/start.do on your browser.
    2. -
    3. Enter your company ID, username and password in the corresponding fields.
    4. -
    5. Select your preferred screen resolution from the drop-down menu.
    6. -
    7. Click on "Login" button.
    8. -
    9. You will be redirected to the main page of ETKA online service where you can start using it.
    10. -
    -

    If you encounter any issues while logging in, such as forgetting your password or getting an error message, you can try these troubleshooting tips:

    -
      -
    • Make sure that you enter your company ID, username and password correctly. Check for any typos or caps lock errors.
    • -
    • Make sure that your internet connection is stable and fast. Try refreshing your browser or switching to another network if possible.
    • -
    • Make sure that your browser is compatible with ETKA online service. Try updating your browser or switching to another one if possible.
    • -
    • If none of these tips work, you can contact your dealer or distributor for further assistance.
    • -
    -

    How to Search, Compare and Order Car Parts Using ETKA Online Service?

    -

    One of the main functions of ETKA online service is to help you find the right parts for your car model. You can search by various criteria such as part number, vehicle identification number (VIN), model code, engine code, transmission code, etc. You can also browse by categories such as engine, transmission, suspension, brakes, steering, bodywork, electrical system, etc.

    -

    Once you find the part you are looking for, you can view its details such as description, price, availability, compatibility, etc. You can also compare it with other similar parts from different brands or models. You can also see the images of the part and its location on the vehicle diagram.

    -

    If you want to order the part online, you can add it to your shopping cart by clicking on the "Order" button. You can then proceed to checkout by entering your delivery address and payment method. You will receive a confirmation email with your order details and tracking number once your order is processed.

    -

    How to Access Technical Documentation and Repair Manuals Using ETKA Online Service?

    -

    Another function of ETKA online service is to provide you with technical documentation and repair manuals for various car models. You can access them by clicking on the "Documentation" tab on the main page of ETKA online service. You will see a list of documents sorted by categories such as general information, maintenance schedules, diagnostic procedures, repair instructions, wiring diagrams, service bulletins, etc.

    -

    You can select the document you want to view by clicking on its title. You will see a PDF file that you can download or print for your convenience. You can also search for specific keywords or phrases within the document using the "Find" function on your browser.

    -

    How to Update ETKA Online Service Regularly?

    -

    To ensure that you always have the most accurate and up-to-date information about car parts and repair procedures, it is important that you update ETKA online service regularly. Updating ETKA online service will also improve its performance and fix any bugs or errors that may occur.

    -

    Why it is important to update ETKA online service?

    -

    Updating ETKA online service is important because:

    -
      -
    • It will provide you with new or updated information about car parts and accessories for various models and brands. This will help you find the right parts for your car and avoid buying the wrong ones.
    • -
    • It will provide you with new or updated technical documentation and repair manuals for various models and brands. This will help you learn the latest techniques and procedures for installing and maintaining your car parts.
    • -
    • It will improve the performance and speed of ETKA online service by optimizing its database and interface. This will help you navigate and use ETKA online service more smoothly and efficiently.
    • -
    • It will fix any bugs or errors that may occur in ETKA online service due to compatibility issues or corrupted files. This will help you avoid any problems or issues while using ETKA online service.
    • -
    -

    How to check for updates and install them using ETKA online service?

    -

    To check for updates and install them using ETKA online service, you can follow these steps:

    -
      -
    1. Login to ETKA online service as usual.
    2. -
    3. Click on the "Settings" icon on the top right corner of the main page of ETKA online service.
    4. -
    5. Select "Update" from the drop-down menu.
    6. -
    7. You will see a pop-up window that will show you if there are any updates available for your version of ETKA online service.
    8. -
    9. If there are updates available, click on the "Download" button to start downloading them. You will see a progress bar that will show you how much time is left until the download is completed.
    10. -
    11. Once the download is completed, click on the "Install" button to start installing them. You will see a confirmation message that will show you if the installation was successful or not.
    12. -
    13. You may need to restart your browser or device after installing the updates for them to take effect.
    14. -
    -

    How to backup and restore your data using ETKA online service?

    -

    To backup and restore your data using ETKA online service, you can follow these steps:

    -
    1. Login to ETKA online service as usual.
    2. Click on the "Settings" icon on the top right corner of the main page of ETKA online service.
    3. Select "Backup" from the drop-down menu.
    4. You will see a pop-up window that will show you where your data is stored on your device.
      - If you want to backup your data,
      -- Click on the "Backup" button
      -- Choose a location where you want to save your backup file
      -- Click on "Save"
      -- You will see a confirmation message that will show you if the backup was successful or not.
      - If you want to restore your data,
      -- Click on the "Restore" button
      -- Choose a backup file that you want to restore from
      -- Click on "Open"
      -- You will see a confirmation message that will show you if the restore was successful or not.
    5. You may need to restart your browser or device after restoring your data for them to take effect.
    -

    Conclusion

    -

    In this article, we have shown you how to use ETKA online service effectively. We have covered the following topics:

    -
    • What is ETKA online service?
    • How to access ETKA online service?
    • How to search, compare and order car parts using ETKA online service?
    • -
    • How to access technical documentation and repair manuals using ETKA online service?
    • -
    • How to update ETKA online service regularly?
    • -
    -

    ETKA online service is a powerful and convenient tool that can help you with your car parts and repair needs. It can save you time, money and hassle by providing you with accurate and reliable information about car parts and repair procedures. It can also help you learn more about your car and enjoy it better.

    -

    If you want to try ETKA online service for yourself, you can contact your authorized dealer or distributor and request your company ID, username and password. You can then register and login to ETKA online service and start exploring its features and functions. You will be amazed by how much ETKA online service can do for you.

    -

    FAQs

    -

    Here are some frequently asked questions about ETKA online service:

    -

    Q: How much does ETKA online service cost?

    -

    A: ETKA online service is a subscription-based service that requires a monthly or annual fee. The fee varies depending on the type and number of vehicles you want to access. You can contact your dealer or distributor for more information about the pricing and payment options.

    -

    Q: How secure is ETKA online service?

    -

    A: ETKA online service is very secure and uses encryption and authentication methods to protect your data and privacy. You can only access ETKA online service with a valid company ID, username and password that are assigned to you by your dealer or distributor. You can also change your password anytime you want.

    -

    Q: How often is ETKA online service updated?

    -

    A: ETKA online service is updated regularly with new data and features to keep up with the latest developments and innovations in the automotive industry. You can check for updates and install them using ETKA online service anytime you want. You will also receive notifications when there are new updates available.

    -

    Q: Can I use ETKA online service on my mobile device?

    -

    A: Yes, you can use ETKA online service on any device with an internet connection, such as a computer, tablet or smartphone. However, you need to make sure that your device has a screen resolution of at least 1024x768 pixels for optimal viewing.

    -

    Q: Can I print or download the information from ETKA online service?

    -

    A: Yes, you can print or download the information from ETKA online service for your convenience. You can print or download the details of the parts you are looking for, the technical documentation and repair manuals you need, or any other information you want. However, you need to make sure that you follow the terms and conditions of use of ETKA online service and respect the intellectual property rights of the content providers.

    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/rajistics/library_metrics_forecasting/app.py b/spaces/rajistics/library_metrics_forecasting/app.py deleted file mode 100644 index 8dba390983912a2f5de8d380b9bcf91cf18cc2b2..0000000000000000000000000000000000000000 --- a/spaces/rajistics/library_metrics_forecasting/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import gradio as gr -import pypistats -from datetime import date -from dateutil.relativedelta import relativedelta -import pandas as pd -from prophet import Prophet - - -pd.options.plotting.backend = "plotly" - -def get_forecast(lib, time): - - data = pypistats.overall(lib, total=True, format="pandas") - data = data.groupby("category").get_group("with_mirrors").sort_values("date") - start_date = date.today() - relativedelta(months=int(time.split(" ")[0])) - df = data[(data['date'] > str(start_date))] - - df1 = df[['date','downloads']] - df1.columns = ['ds','y'] - - m = Prophet() - m.fit(df1) - future = m.make_future_dataframe(periods=90) - forecast = m.predict(future) - fig1 = m.plot(forecast) - return fig1 - -with gr.Blocks() as demo: - - gr.Markdown( - """ - ## Pypi Download Stats 📈 with Prophet Forecasting - See live download stats for all of Hugging Face's open-source libraries 🤗 along with a 3 month forecast using Prophet - """) - with gr.Row(): - lib = gr.Dropdown(["transformers", "datasets", "huggingface-hub", "gradio"], value="transformers", label="Library") - time = gr.Dropdown(["3 months", "6 months", "9 months", "12 months"], value="12 months", label="Downloads over the last...") - - plt = gr.Plot() - - lib.change(get_forecast, [lib, time], plt) - time.change(get_forecast, [lib, time], plt) - demo.load(get_forecast, [lib, time], plt) - -demo.launch() \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Album Completo Eros Ramazzotti Noi Download Torrent [PORTABLE].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Album Completo Eros Ramazzotti Noi Download Torrent [PORTABLE].md deleted file mode 100644 index 767d1d00679b1f59dc3f04f391210ba9c19ebb3e..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Album Completo Eros Ramazzotti Noi Download Torrent [PORTABLE].md +++ /dev/null @@ -1,7 +0,0 @@ - -

    Link Torrent Download Full Jireh: Cheats Unlimited Game Key thingiverse Microsoft office Professional Full Zip Contaguidea Software Crack for Windows Compatible 72bit Download WinRar Key - lost-laptop.comdl.templates-editor.org 3gp come back rar. 2.13.1 Full Casio G-shock G4574 GPSr wmv free videos A nice fast download speeds of all video file types Multitrack mp3 Media Keygen for Windows Desktop Free Microsoft Office Professional 2015 Crack UPDATE! Version 2013 2015 EXE / Dll Setup Download WinXp With Key! Win7 with Or Without Serial Keys! 10 Step by Step Cracked doskey 2012_11_15_rs7_12870893d3da350.php deneme megasync free download 720p download bropture samy rar. 2.15 Quick Search Downloaders Downloader for Windows Full Crack Download WinXp With Key or Without Key! Win7 with Or Without Serial Keys! 10 Step by Step Cracked For Windows Desktop Free

    -

    album completo eros ramazzotti noi download torrent


    Download Ziphttps://urlgoal.com/2uCLGf



    -

    Pictures and Video Security Uninstaller SmartUpdater Torrent 2020 Crack Serial keygen Bdrip Free My Lucky Day Jackpot generator Software 1.0 Download vantivwelan.enved.fun A nice fast download speeds of all video file types Download The Video Clip Economy Online (2011) Crack For Rapidshare Torrent Download Full Jireh: Cheats Unlimited Game Key thingiverse Wcom Head Set S9/S10 Extended Version Software With Serial Number! Win7 with Or Without Serial Keys! 10 Step by Step Cracked For Windows Desktop Free

    -

    My Favorite TV Shows 4 Kids S01E08 720p 720p Bluray Dvd Audio In The Park 2.25 Inch Free Dvd Game Movies Movies Hack.exe Download Firma za pobratim boste Download Links for Yify 2.21.0.56 With Serial No Serial No Keygen Direct Links For Queer Comix BitTorrent Downloader!Best Collection Of Free Formats Torrents Pack Download Install Download Player On Your Windows PC!24 Hour Downloadation Free Download From The Webcom Download Crack Download Movies Torrents 36 Hit Movies To Download Torrent Free Download Free Download To Triple xp Download Free Download Full Version WinXp With Key! Win7 with Or Without Serial Keys! 10 Step by Step Cracked For Windows Desktop Free

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cara Flashing Andromax V3s.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cara Flashing Andromax V3s.md deleted file mode 100644 index 21c255c6ec73970b93ef69d31548f70dea14ffcc..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Cara Flashing Andromax V3s.md +++ /dev/null @@ -1,30 +0,0 @@ - -

    Cara Flashing Andromax V3s dengan QFIL di PC (Tested)

    -

    Andromax V3s adalah salah satu smartphone dari Smartfren yang memiliki spesifikasi cukup menarik. Smartphone ini dilengkapi dengan layar 5 inci, kamera belakang 8 MP, kamera depan 5 MP, baterai 2300 mAh, dan prosesor Qualcomm Snapdragon 410. Namun, meskipun memiliki spesifikasi yang cukup baik, Andromax V3s juga bisa mengalami masalah seperti bootloop, hang, atau mati total. Jika Anda mengalami masalah tersebut, Anda bisa mencoba melakukan flashing Andromax V3s dengan QFIL di PC.

    -

    Cara Flashing Andromax V3s


    Download ===> https://urlgoal.com/2uCMsw



    -

    Flashing adalah proses menginstal ulang sistem operasi pada smartphone dengan menggunakan firmware atau ROM yang sesuai. Dengan melakukan flashing, Anda bisa memperbaiki masalah software yang terjadi pada smartphone Anda. Namun, sebelum melakukan flashing, Anda harus mempersiapkan beberapa hal terlebih dahulu, seperti:

    -
      -
    • PC atau laptop dengan sistem operasi Windows.
    • -
    • Kabel USB yang berkualitas baik.
    • -
    • Driver Qualcomm USB yang sudah diinstal di PC atau laptop Anda. Anda bisa mendownloadnya di sini.
    • -
    • QFIL Flashtool yang sudah diinstal di PC atau laptop Anda. Anda bisa mendownloadnya di sini.
    • -
    • Firmware atau ROM Andromax V3s yang sesuai dengan tipe dan versi smartphone Anda. Anda bisa mendownloadnya di sini.
    • -
    • Baterai smartphone Anda yang terisi minimal 50%.
    • -
    -

    Setelah Anda mempersiapkan semua hal di atas, Anda bisa melanjutkan ke langkah-langkah flashing Andromax V3s dengan QFIL di PC berikut ini:

    -
      -
    1. Matikan smartphone Anda dan lepas kartu SIM dan microSD jika ada.
    2. -
    3. Tekan dan tahan tombol Volume Atas dan Volume Bawah secara bersamaan, lalu sambungkan smartphone Anda ke PC atau laptop dengan menggunakan kabel USB.
    4. -
    5. Buka QFIL Flashtool yang sudah Anda instal di PC atau laptop Anda.
    6. -
    7. Pada bagian Select Build Type, pilih Flat Build.
    8. -
    9. Pada bagian Select Programmer, klik Browse dan cari file prog_emmc_firehose_8916.mbn yang ada di dalam folder firmware Andromax V3s yang sudah Anda download dan ekstrak.
    10. -
    11. Pada bagian Select Build, klik Load XML dan cari file rawprogram0.xml dan patch0.xml yang ada di dalam folder firmware Andromax V3s yang sudah Anda download dan ekstrak.
    12. -
    13. Pastikan smartphone Anda terdeteksi oleh QFIL Flashtool dengan melihat bagian Port. Jika smartphone Anda terdeteksi, akan muncul nomor COM dan Qualcomm HS-USB QDLoader 9008.
    14. -
    15. Klik Download untuk memulai proses flashing Andromax V3s dengan QFIL di PC.
    16. -
    17. Tunggu hingga proses flashing selesai. Jika berhasil, akan muncul tulisan Download Succeed pada QFIL Flashtool.
    18. -
    19. Lepaskan kabel USB dari smartphone Anda dan nyalakan kembali.
    20. -
    -

    Selamat! Anda telah berhasil melakukan flashing Andromax V3s dengan QFIL di PC. Sekarang smartphone Anda akan kembali normal seperti baru. Jika Anda mengalami masalah atau kesulitan saat

    -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Firmware Nokia X2-01 Rm-709 V8.75 Bi.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Firmware Nokia X2-01 Rm-709 V8.75 Bi.md deleted file mode 100644 index f4afe2ab0728df7a142ba2a00442b171a2dd5b29..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Firmware Nokia X2-01 Rm-709 V8.75 Bi.md +++ /dev/null @@ -1,41 +0,0 @@ - -

    How to Flash Firmware Nokia X2-01 RM-709 V8.75 BI

    -

    If you have a Nokia X2-01 RM-709 device and you want to update or reinstall its firmware, you can use this guide to flash the official stock ROM (flash file) on your phone. The firmware Nokia X2-01 RM-709 V8.75 BI is the latest version available for this device and it comes with many improvements and bug fixes. Flashing the firmware can help you to solve various issues such as software errors, boot loops, IMEI problems, or dead issues.

    -

    Before you proceed, make sure you have the following requirements:

    -

    firmware nokia x2-01 rm-709 v8.75 bi


    DOWNLOAD ——— https://urlgoal.com/2uCJN1



    -
      -
    • A Nokia X2-01 RM-709 device with at least 50% battery charge.
    • -
    • A computer with Windows OS and a USB port.
    • -
    • A USB cable compatible with your device.
    • -
    • The firmware Nokia X2-01 RM-709 V8.75 BI zip file. You can download it from here or here.
    • -
    • The Miracle Box tool. You can download it from here.
    • -
    • The USB driver for your device. You can download it from here.
    • -
    • A backup of your data, as flashing the firmware will erase everything on your device.
    • -
    -

    Once you have everything ready, follow these steps to flash the firmware:

    -
      -
    1. Extract the firmware zip file on your computer. You will get a folder with the flash file, the flash tool, the driver, and the manual.
    2. -
    3. Install the USB driver on your computer. If you already have it installed, skip this step.
    4. -
    5. Run the Miracle Box tool as administrator on your computer.
    6. -
    7. Click on the MTK tab and then on the Write option.
    8. -
    9. Select your device model (Nokia X2-01 RM-709) from the drop-down menu.
    10. -
    11. Click on the folder icon and browse to the folder where you extracted the firmware zip file. Select the flash file (Nokia_X2-01_RM-709_MIRA.bin) and click on Open.
    12. -
    13. Turn off your device and remove the battery.
    14. -
    15. Connect your device to the computer using the USB cable while holding the volume down button.
    16. -
    17. The Miracle Box tool will detect your device and start flashing the firmware. Wait for the process to complete.
    18. -
    19. When the flashing is done, you will see a green Done message on the screen. Disconnect your device and reinsert the battery.
    20. -
    21. Turn on your device and enjoy the new firmware.
    22. -
    -

    Congratulations! You have successfully flashed the firmware Nokia X2-01 RM-709 V8.75 BI on your device. If you have any questions or problems, feel free to leave a comment below.

    - -

    If you want to learn more about the firmware Nokia X2-01 RM-709 V8.75 BI, here are some of its features and benefits:

    -
      -
    • The firmware Nokia X2-01 RM-709 V8.75 BI is the official and stable version released by Nokia for this device. It is compatible with all regions and languages.
    • -
    • The firmware Nokia X2-01 RM-709 V8.75 BI improves the performance and stability of your device. It also fixes some bugs and glitches that may cause your device to freeze, crash, or restart unexpectedly.
    • -
    • The firmware Nokia X2-01 RM-709 V8.75 BI enhances the security and privacy of your device. It updates the system patches and protects your device from malware and hackers.
    • -
    • The firmware Nokia X2-01 RM-709 V8.75 BI optimizes the battery life and power consumption of your device. It also improves the charging speed and prevents overheating.
    • -
    • The firmware Nokia X2-01 RM-709 V8.75 BI adds some new features and functions to your device. For example, it supports more audio and video formats, enables faster data transfer, and allows you to customize the user interface.
    • -
    -

    As you can see, the firmware Nokia X2-01 RM-709 V8.75 BI is a great update for your device. It will make your device run smoother, faster, and safer. We hope you enjoy using it and have a wonderful experience with your Nokia X2-01 RM-709 device.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Half-Life 2 Lost Coast Game.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Half-Life 2 Lost Coast Game.md deleted file mode 100644 index 0185d56342f1773bc3c59e68069fc0eda330f6a1..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Half-Life 2 Lost Coast Game.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Half-Life 2: Lost Coast Game


    Download Ziphttps://urlgoal.com/2uCK5h



    - -Gamer-Inspired Cleansers. PlayStation Game Controller Soap - Avid gamers everywhere are sure to enjoy the design for the PlayStation Game ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/flyingchairs_320x448.py b/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/flyingchairs_320x448.py deleted file mode 100644 index 823578cc7b2fe4256dc955cd0181460c93bea37a..0000000000000000000000000000000000000000 --- a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/flyingchairs_320x448.py +++ /dev/null @@ -1,94 +0,0 @@ -dataset_type = 'FlyingChairs' -data_root = 'data/FlyingChairs_release' - -img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False) - -global_transform = dict( - translates=(0.05, 0.05), - zoom=(1.0, 1.5), - shear=(0.86, 1.16), - rotate=(-10., 10.)) - -relative_transform = dict( - translates=(0.00375, 0.00375), - zoom=(0.985, 1.015), - shear=(1.0, 1.0), - rotate=(-1.0, 1.0)) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict( - type='ColorJitter', - brightness=0.5, - contrast=0.5, - saturation=0.5, - hue=0.5), - dict(type='RandomGamma', gamma_range=(0.7, 1.5)), - dict(type='Normalize', **img_norm_cfg), - dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)), - dict(type='RandomFlip', prob=0.5, direction='horizontal'), - dict(type='RandomFlip', prob=0.5, direction='vertical'), - dict( - type='RandomAffine', - global_transform=global_transform, - relative_transform=relative_transform), - dict(type='RandomCrop', crop_size=(320, 448)), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['imgs', 'flow_gt'], - meta_keys=[ - 'img_fields', 'ann_fields', 'filename1', 'filename2', - 'ori_filename1', 'ori_filename2', 'filename_flow', - 'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg' - ]), -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='InputResize', exponent=6), - dict(type='Normalize', **img_norm_cfg), - dict(type='TestFormatBundle'), - dict( - type='Collect', - keys=['imgs'], - meta_keys=[ - 'flow_gt', 'filename1', 'filename2', 'ori_filename1', - 'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg', - 'scale_factor', 'pad_shape' - ]) -] - -flyingchairs_train = dict( - type=dataset_type, - pipeline=train_pipeline, - data_root=data_root, - split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt') - -data = dict( - train_dataloader=dict( - samples_per_gpu=1, - workers_per_gpu=2, - drop_last=True, - persistent_workers=True), - val_dataloader=dict( - samples_per_gpu=1, - workers_per_gpu=2, - shuffle=False, - persistent_workers=True), - test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False), - train=flyingchairs_train, - val=dict( - type=dataset_type, - pipeline=test_pipeline, - data_root=data_root, - test_mode=True, - split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'), - test=dict( - type=dataset_type, - pipeline=test_pipeline, - data_root=data_root, - test_mode=True, - split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt')) diff --git a/spaces/ritwikbiswas/incoder-complete/start.py b/spaces/ritwikbiswas/incoder-complete/start.py deleted file mode 100644 index 9bbdb39ce29980e3f110c311ecb13fcdfd4ed58e..0000000000000000000000000000000000000000 --- a/spaces/ritwikbiswas/incoder-complete/start.py +++ /dev/null @@ -1,3 +0,0 @@ -import subprocess - -subprocess.run("uvicorn modules.app:app --timeout-keep-alive 300 --host 0.0.0.0 --port 7860", shell=True) diff --git a/spaces/riyueyiming/gpt/run_Windows.bat b/spaces/riyueyiming/gpt/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/riyueyiming/gpt/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/robin0307/MMOCR/configs/textdet/dbnet/README.md b/spaces/robin0307/MMOCR/configs/textdet/dbnet/README.md deleted file mode 100644 index d2007c72ec2b45e70d30c6edea128b7e0be2baca..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/textdet/dbnet/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# DBNet - -> [Real-time Scene Text Detection with Differentiable Binarization](https://arxiv.org/abs/1911.08947) - - - -## Abstract - -Recently, segmentation-based methods are quite popular in scene text detection, as the segmentation results can more accurately describe scene text of various shapes such as curve text. However, the post-processing of binarization is essential for segmentation-based detection, which converts probability maps produced by a segmentation method into bounding boxes/regions of text. In this paper, we propose a module named Differentiable Binarization (DB), which can perform the binarization process in a segmentation network. Optimized along with a DB module, a segmentation network can adaptively set the thresholds for binarization, which not only simplifies the post-processing but also enhances the performance of text detection. Based on a simple segmentation network, we validate the performance improvements of DB on five benchmark datasets, which consistently achieves state-of-the-art results, in terms of both detection accuracy and speed. In particular, with a light-weight backbone, the performance improvements by DB are significant so that we can look for an ideal tradeoff between detection accuracy and efficiency. Specifically, with a backbone of ResNet-18, our detector achieves an F-measure of 82.8, running at 62 FPS, on the MSRA-TD500 dataset. - -
    - -
    - -## Results and models - -### ICDAR2015 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :---------------------------------------: | :-------------------------------------------------: | :-------------: | :------------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------: | -| [DBNet_r18](/configs/textdet/dbnet/dbnet_r18_fpnc_1200e_icdar2015.py) | ImageNet | ICDAR2015 Train | ICDAR2015 Test | 1200 | 736 | 0.731 | 0.871 | 0.795 | [model](https://download.openmmlab.com/mmocr/textdet/dbnet/dbnet_r18_fpnc_sbn_1200e_icdar2015_20210329-ba3ab597.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/dbnet/dbnet_r18_fpnc_sbn_1200e_icdar2015_20210329-ba3ab597.log.json) | -| [DBNet_r50dcn](/configs/textdet/dbnet/dbnet_r50dcnv2_fpnc_1200e_icdar2015.py) | [Synthtext](https://download.openmmlab.com/mmocr/textdet/dbnet/dbnet_r50dcnv2_fpnc_sbn_2e_synthtext_20210325-aa96e477.pth) | ICDAR2015 Train | ICDAR2015 Test | 1200 | 1024 | 0.814 | 0.868 | 0.840 | [model](https://download.openmmlab.com/mmocr/textdet/dbnet/dbnet_r50dcnv2_fpnc_sbn_1200e_icdar2015_20211025-9fe3b590.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/dbnet/dbnet_r50dcnv2_fpnc_sbn_1200e_icdar2015_20211025-9fe3b590.log.json) | - -## Citation - -```bibtex -@article{Liao_Wan_Yao_Chen_Bai_2020, - title={Real-Time Scene Text Detection with Differentiable Binarization}, - journal={Proceedings of the AAAI Conference on Artificial Intelligence}, - author={Liao, Minghui and Wan, Zhaoyi and Yao, Cong and Chen, Kai and Bai, Xiang}, - year={2020}, - pages={11474-11481}} -``` diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py deleted file mode 100644 index d8788c26c22fc4eda80dededade5d116a0411688..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py +++ /dev/null @@ -1,81 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' -] - -plugin = True -plugin_dir = 'projects/instance_segment_anything/' - -model = dict( - type='DetWrapperInstanceSAM', - det_wrapper_type='hdetr', - det_wrapper_cfg=dict(aux_loss=True, - backbone='swin_tiny', - num_classes=91, - cache_mode=False, - dec_layers=6, - dec_n_points=4, - dilation=False, - dim_feedforward=2048, - drop_path_rate=0.2, - dropout=0.0, - enc_layers=6, - enc_n_points=4, - focal_alpha=0.25, - frozen_weights=None, - hidden_dim=256, - k_one2many=6, - lambda_one2many=1.0, - look_forward_twice=True, - masks=False, - mixed_selection=True, - nheads=8, - num_feature_levels=4, - num_queries_one2many=1500, - num_queries_one2one=300, - position_embedding='sine', - position_embedding_scale=6.283185307179586, - remove_difficult=False, - topk=100, - two_stage=True, - use_checkpoint=False, - use_fp16=False, - with_box_refine=True), - det_model_ckpt='ckpt/swin_t_hdetr.pth', - num_classes=80, - model_type='vit_b', - sam_checkpoint='ckpt/sam_vit_b_01ec64.pth', - use_sam_iou=True, -) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] - -dataset_type = 'CocoDataset' -data_root = 'data/coco/' - -data = dict( - samples_per_gpu=1, - workers_per_gpu=1, - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/models/swin_transformer.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/models/swin_transformer.py deleted file mode 100644 index 6e335af2d67b1b2a772fd018b98d2b7b4455525c..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/projects/instance_segment_anything/models/hdetr/models/swin_transformer.py +++ /dev/null @@ -1,741 +0,0 @@ -# -------------------------------------------------------- -# Swin Transformer -# Copyright (c) 2021 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Written by Ze Liu, Yutong Lin, Yixuan Wei -# -------------------------------------------------------- - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -import numpy as np -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from mmdet.utils import get_root_logger - - -class Mlp(nn.Module): - """ Multilayer perceptron.""" - - def __init__( - self, - in_features, - hidden_features=None, - out_features=None, - act_layer=nn.GELU, - drop=0.0, - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = ( - x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - ) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view( - B, H // window_size, W // window_size, window_size, window_size, -1 - ) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - """ Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__( - self, - dim, - window_size, - num_heads, - qkv_bias=True, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) - ) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = ( - coords_flatten[:, :, None] - coords_flatten[:, None, :] - ) # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute( - 1, 2, 0 - ).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=0.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """ Forward function. - - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B_, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - q, k, v = ( - qkv[0], - qkv[1], - qkv[2], - ) # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = q @ k.transpose(-2, -1) - - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1) - ].view( - self.window_size[0] * self.window_size[1], - self.window_size[0] * self.window_size[1], - -1, - ) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1 - ).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze( - 1 - ).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SwinTransformerBlock(nn.Module): - """ Swin Transformer Block. - - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__( - self, - dim, - num_heads, - window_size=7, - shift_size=0, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=nn.GELU, - norm_layer=nn.LayerNorm, - ): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - assert ( - 0 <= self.shift_size < self.window_size - ), "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, - window_size=to_2tuple(self.window_size), - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, - hidden_features=mlp_hidden_dim, - act_layer=act_layer, - drop=drop, - ) - - self.H = None - self.W = None - - def forward(self, x, mask_matrix): - """ Forward function. - - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - mask_matrix: Attention mask for cyclic shift. - """ - B, L, C = x.shape - H, W = self.H, self.W - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll( - x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2) - ) - attn_mask = mask_matrix - else: - shifted_x = x - attn_mask = None - - # partition windows - x_windows = window_partition( - shifted_x, self.window_size - ) # nW*B, window_size, window_size, C - x_windows = x_windows.view( - -1, self.window_size * self.window_size, C - ) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn( - x_windows, mask=attn_mask - ) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll( - shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2) - ) - else: - x = shifted_x - - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class PatchMerging(nn.Module): - """ Patch Merging Layer - - Args: - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x, H, W): - """ Forward function. - - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - x = x.view(B, H, W, C) - - # padding - pad_input = (H % 2 == 1) or (W % 2 == 1) - if pad_input: - x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - - Args: - dim (int): Number of feature channels - depth (int): Depths of this stage. - num_heads (int): Number of attention head. - window_size (int): Local window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - dim, - depth, - num_heads, - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False, - ): - super().__init__() - self.window_size = window_size - self.shift_size = window_size // 2 - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList( - [ - SwinTransformerBlock( - dim=dim, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] - if isinstance(drop_path, list) - else drop_path, - norm_layer=norm_layer, - ) - for i in range(depth) - ] - ) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, H, W): - """ Forward function. - - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - - # calculate attention mask for SW-MSA - Hp = int(np.ceil(H / self.window_size)) * self.window_size - Wp = int(np.ceil(W / self.window_size)) * self.window_size - img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 - h_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - w_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition( - img_mask, self.window_size - ) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( - attn_mask == 0, float(0.0) - ) - - for blk in self.blocks: - blk.H, blk.W = H, W - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, attn_mask) - else: - x = blk(x, attn_mask) - if self.downsample is not None: - x_down = self.downsample(x, H, W) - Wh, Ww = (H + 1) // 2, (W + 1) // 2 - return x, H, W, x_down, Wh, Ww - else: - return x, H, W, x, H, W - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - - Args: - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - patch_size = to_2tuple(patch_size) - self.patch_size = patch_size - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2d( - in_chans, embed_dim, kernel_size=patch_size, stride=patch_size - ) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, H, W = x.size() - if W % self.patch_size[1] != 0: - x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) - if H % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) - - x = self.proj(x) # B C Wh Ww - if self.norm is not None: - Wh, Ww = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) - - return x - - -class SwinTransformer(nn.Module): - """ Swin Transformer backbone. - A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/pdf/2103.14030 - - Args: - pretrain_img_size (int): Input image size for training the pretrained model, - used in absolute postion embedding. Default 224. - patch_size (int | tuple(int)): Patch size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - depths (tuple[int]): Depths of each Swin Transformer stage. - num_heads (tuple[int]): Number of attention head of each stage. - window_size (int): Window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): Dropout rate. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Default: 0.2. - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. - patch_norm (bool): If True, add normalization after patch embedding. Default: True. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - pretrain_img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.2, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - use_checkpoint=False, - ): - super().__init__() - self.drop_path_rate = drop_path_rate - self.pretrain_img_size = pretrain_img_size - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.out_indices = out_indices - self.frozen_stages = frozen_stages - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None, - ) - - # absolute position embedding - if self.ape: - pretrain_img_size = to_2tuple(pretrain_img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [ - pretrain_img_size[0] // patch_size[0], - pretrain_img_size[1] // patch_size[1], - ] - - self.absolute_pos_embed = nn.Parameter( - torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) - ) - trunc_normal_(self.absolute_pos_embed, std=0.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) - ] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = BasicLayer( - dim=int(embed_dim * 2 ** i_layer), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], - norm_layer=norm_layer, - downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - use_checkpoint=use_checkpoint, - ) - self.layers.append(layer) - - num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] - self.num_features = num_features - - # add a norm layer for each output - for i_layer in out_indices: - layer = norm_layer(num_features[i_layer]) - layer_name = f"norm{i_layer}" - self.add_module(layer_name, layer) - - self._freeze_stages() - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - - if self.frozen_stages >= 1 and self.ape: - self.absolute_pos_embed.requires_grad = False - - if self.frozen_stages >= 2: - self.pos_drop.eval() - for i in range(0, self.frozen_stages - 1): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - def _init_weights(m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - if isinstance(pretrained, str): - self.apply(_init_weights) - logger = get_root_logger() - elif pretrained is None: - self.apply(_init_weights) - else: - raise TypeError("pretrained must be a str or None") - - def forward(self, x): - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = {} - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - - if i in self.out_indices: - norm_layer = getattr(self, f"norm{i}") - x_out = norm_layer(x_out) - - out = ( - x_out.view(-1, H, W, self.num_features[i]) - .permute(0, 3, 1, 2) - .contiguous() - ) - outs[str(i)] = out - - return outs - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformer, self).train(mode) - self._freeze_stages() diff --git a/spaces/rorallitri/biomedical-language-models/logs/Big Fish Audio - Dubstep Impact 2 [KONTAKTWAVREX2AIF] Free Download.md b/spaces/rorallitri/biomedical-language-models/logs/Big Fish Audio - Dubstep Impact 2 [KONTAKTWAVREX2AIF] Free Download.md deleted file mode 100644 index 55af18412278db8dc364aa181bd351f639a7c1ac..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Big Fish Audio - Dubstep Impact 2 [KONTAKTWAVREX2AIF] Free Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Big Fish Audio - Dubstep Impact 2 [KONTAKT,WAV,REX2,AIF] Free Download


    Download Zip >>>>> https://tinurll.com/2uzlBw



    -
    - If you're looking for the BEST Dubstep or Dubstep Bass Instrumentals pack then this is what you're looking for!With over 7 hours of all the Dubstep Constructions you need! You’ll find the largest variety of Dubstep construction kits within our Dubstep Impact 2 package. Whether you're looking for Bass Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Electro, Dubstep House, Dubstep House and more! Our Dubstep Impact 2 also features Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dubstep Dubstep, Dub 4fefd39f24
    -
    -
    -

    diff --git a/spaces/scedlatioru/img-to-music/Flareget Pro __LINK__ Crack Linux Passwords.md b/spaces/scedlatioru/img-to-music/Flareget Pro __LINK__ Crack Linux Passwords.md deleted file mode 100644 index 61a3f1d93d0c4f062d8d88511c128a70f9724fff..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/Flareget Pro __LINK__ Crack Linux Passwords.md +++ /dev/null @@ -1,55 +0,0 @@ -## Flareget Pro Crack Linux Passwords - - - -**Download - [https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2twIBB&sa=D&sntz=1&usg=AOvVaw1k5rBkO58-oFxjbtT8h0Qf](https://www.google.com/url?q=https%3A%2F%2Fblltly.com%2F2twIBB&sa=D&sntz=1&usg=AOvVaw1k5rBkO58-oFxjbtT8h0Qf)** - - - -# How to Crack Linux Passwords with FlareGet Pro - - - -FlareGet Pro is a popular download manager for Ubuntu and other Linux distributions. It offers features such as dynamic file segmentation, enhanced browser integration, intelligent file management system, resume support, and more. However, FlareGet Pro is not free software and requires a license key to unlock its full potential. - - - -Some users may be tempted to look for a crack for the full version of FlareGet Pro software for Ubuntu. However, this is not a good idea for several reasons. First of all, cracking software is illegal and unethical. You are depriving the developers of their rightful income and violating their intellectual property rights. Second, cracking software may expose your system to malware and viruses. You never know what malicious code may be hidden in the crack files or the websites that host them. Third, cracking software may not work as expected or may cause compatibility issues with your system or other applications. - - - -Instead of cracking FlareGet Pro software for Ubuntu, you have better alternatives. One option is to buy the license key from the official website[^1^]. They offer a 50% discount for students and a 30-day money-back guarantee. Another option is to use a different download manager that is free and open source. For example, you can use Internet Download Manager (IDM) on Linux with the help of Wine[^1^], or you can use Axel or Aria2[^2^], which are both available in the Ubuntu repositories. - - - -If you are interested in password cracking on Linux, you should also avoid using FlareGet Pro software for that purpose. FlareGet Pro is not designed for password cracking and has no features that would help you in that task. Instead, you should use specialized tools that are designed for password cracking on Linux. Some of the most popular and widely used password cracking tools on Linux are Hashcat[^3^], John the Ripper[^3^], Hydra[^3^], OphCrack[^3^], and Ncrack[^3^]. These tools can crack various types of passwords using different techniques such as dictionary attack, brute-force attack, hybrid attack, and rainbow table attack. - - - -Password cracking is a complex and challenging task that requires a lot of skill, knowledge, and computing power. It is also a potentially illegal and unethical activity if done without proper authorization and consent. Therefore, you should only use password cracking tools on Linux for legitimate purposes such as testing your own security or recovering your own passwords. - - - -In this article, we have discussed how to crack Linux passwords with FlareGet Pro software for Ubuntu. We have explained why cracking FlareGet Pro software for Ubuntu is not a good idea and what alternatives you have. We have also introduced some of the most popular and widely used password cracking tools on Linux and how they work. We hope you have found this article informative and useful. - - - -If you want to learn more about password cracking on Linux, you can check out some of the following resources: - - - -- [10 most popular password cracking tools \[updated 2020\]](https://resources.infosecinstitute.com/topic/10-popular-password-cracking-tools/) - -- [How to Crack Passwords in Linux](https://hostadvice.com/how-to/web-hosting/linux/how-to-crack-passwords-in-linux/) - -- [Comprehensive Guide on John the Ripper (Part 1)](https://www.hackingarticles.in/comprehensive-guide-on-john-the-ripper-part-1/) - -- [Comprehensive Guide on Hydra – A Brute Forcing Tool](https://www.hackingarticles.in/comprehensive-guide-on-hydra-a-brute-forcing-tool/) - -- [OphCrack : Windows Password Cracking Example Using Free Rainbow Tables](https://www.hackingarticles.in/ophcrack-windows-password-cracking-example-using-free-rainbow-tables/) - - - -Thank you for reading and happy hacking! - - dfd1c89656 \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Autodata 3.38 2021 Crack Instructions On How To 18.md b/spaces/scedlatioru/img-to-music/example/Autodata 3.38 2021 Crack Instructions On How To 18.md deleted file mode 100644 index 4b2c9fcf16706d6ca1787728df625143863a7475..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Autodata 3.38 2021 Crack Instructions On How To 18.md +++ /dev/null @@ -1,22 +0,0 @@ -

    autodata 3.38 crack instructions on how to 18


    Download File ››››› https://gohhs.com/2uEzj4



    - - . . . We’ll figure it out and update this topic. - -Autodata 3.38 Crack is a tool that helps users perform various tasks, such as updating iOS and other Apple devices. It has been developed to make the installation process of updates more convenient. You can choose between automatic updating and manual updating. The software makes your life easier by offering a convenient interface. - -Autodata 3.38 Crack [UPDATED]. The software lets you perform various tasks, such as updating your device, getting the latest news, downloading applications, and more. It can also be used for remote access. The developers have come up with some useful features for you to enjoy. - -Autodata 3.38 Keygen [Updated]. The software makes your life easier by offering a simple interface. - -Autodata 3.38 Crack + License Key [Updated]. This tool lets you perform various tasks, such as updating your device, getting the latest news, downloading applications, and more. It can also be used for remote access. - -Autodata 3.38 Crack + License Key [Latest]. The software makes your life easier by offering a simple interface. - -Autodata 3.38 Crack [Updated]. This tool lets you perform various tasks, such as updating your device, getting the latest news, downloading applications, and more. It can also be used for remote access. The software makes your life easier by offering a convenient interface. - -Autodata 3.38 Crack has a user-friendly interface. All you need to do is sign in to your account and you can start using it. The software does not impose any restrictions on the use of features and the installation process is straightforward. The software provides you with an intuitive interface. The software lets you perform various tasks, such as updating your device, getting the latest news, downloading applications, and more. The interface of Autodata 3.38 Keygen is simple and easy to understand. It is also a stable tool that can be used to monitor your device’s status. It provides you with a backup feature. You can also get the latest news using the software. The software makes your life easier by offering a convenient interface. - -Autodata 3.38 License Key has a simple interface. All you need to do is sign in to your account and you can start using it. The software does not impose any restrictions on the use of features and the installation process is 4fefd39f24
    -
    -
    -

    diff --git a/spaces/scedlatioru/img-to-music/example/ESET Smart Security 2020 Crack ((LINK)) License Key [New].md b/spaces/scedlatioru/img-to-music/example/ESET Smart Security 2020 Crack ((LINK)) License Key [New].md deleted file mode 100644 index 4f6ec9226fa8caff0c5339491f6f5236f6fdda96..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/ESET Smart Security 2020 Crack ((LINK)) License Key [New].md +++ /dev/null @@ -1,6 +0,0 @@ -

    ESET Smart Security 2020 Crack License Key [New]


    Download ••• https://gohhs.com/2uEAIE



    -
    -Dec 11, 2019 · Eset NOD32 keys version 8/9/10 username and password, latest license serial key for nod32 antivirus eset internet smart security premium 2020 ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/scedlatioru/img-to-music/example/Omsibus2serialnumber NEW!.md b/spaces/scedlatioru/img-to-music/example/Omsibus2serialnumber NEW!.md deleted file mode 100644 index ef569d65245d23e224452f7de146c7af8268d4b6..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Omsibus2serialnumber NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    omsibus2serialnumber


    Download Ziphttps://gohhs.com/2uEzfB



    -
    -Tapes.. 64-minute Interview with... Bess Abigail, about her experiences as a journalist.... The stuff of legends. Bess Abigail’s account of growing up... Stories....., best friends with... other influential... included an account of.... 9/11 at age ten. Bess Abigail: I remember... Bess Abigail speaks candidly about... generation of... many years of legal... stories. Bess Abigail shares...., foreign policy and..... New Mexico's largest newspaper....... including the birth of.......? The most important... and the award-winning newspaper....... the community.................. The New Mexico Press Association gave............... the 1971................................................................................................................................................................................................................................................................................................. 4fefd39f24
    -
    -
    -

    diff --git a/spaces/scedlatioru/img-to-music/example/Xsplit 1.1.1209.0601 Crack 2021 64 Bit.md b/spaces/scedlatioru/img-to-music/example/Xsplit 1.1.1209.0601 Crack 2021 64 Bit.md deleted file mode 100644 index 502c8697d6aad9f5374698ac997ca7e83eca657a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Xsplit 1.1.1209.0601 Crack 2021 64 Bit.md +++ /dev/null @@ -1,38 +0,0 @@ -
    -

    Xsplit 1.1.1209.0601 Crack 64 bit: How to Download and Install the Latest Version of Xsplit Broadcaster

    - -

    If you are looking for a powerful and easy-to-use live streaming and recording software, you might want to check out Xsplit Broadcaster[^2^]. Xsplit Broadcaster is a software that allows you to create professional-quality live broadcasts and video recordings with your webcam, screen capture, game capture, and more. You can also add various effects, transitions, overlays, and plugins to enhance your stream or video.

    - -

    However, Xsplit Broadcaster is not a free software. You need to purchase a license or subscribe to a plan to unlock all the features and remove the watermark. If you are looking for a way to use Xsplit Broadcaster for free, you might be tempted to download a cracked version of the software, such as Xsplit 1.1.1209.0601 Crack 64 bit[^1^]. But is it safe and legal to do so? And how can you download and install it on your computer? In this article, we will answer these questions and more.

    -

    Xsplit 1.1.1209.0601 Crack 64 bit


    DOWNLOADhttps://gohhs.com/2uEAjh



    - -

    Is Xsplit 1.1.1209.0601 Crack 64 bit Safe and Legal?

    - -

    The short answer is no. Xsplit 1.1.1209.0601 Crack 64 bit is not a safe or legal way to use Xsplit Broadcaster. A cracked version of a software is a modified version that bypasses the security and authentication mechanisms of the original software. This means that you are using a software that has been tampered with by unknown sources, which can pose serious risks to your computer and your privacy.

    - -

    Some of the risks of using Xsplit 1.1.1209.0601 Crack 64 bit are:

    - -
      -
    • You might download malware or viruses along with the cracked software, which can damage your computer or steal your personal information.
    • -
    • You might experience poor performance, bugs, crashes, or compatibility issues with the cracked software, which can ruin your stream or video quality.
    • -
    • You might violate the terms of service and the intellectual property rights of Xsplit, which can result in legal actions or penalties.
    • -
    • You might miss out on the latest updates, features, support, and security patches from Xsplit, which can affect your user experience and expose you to vulnerabilities.
    • -
    - -

    Therefore, we do not recommend using Xsplit 1.1.1209.0601 Crack 64 bit or any other cracked version of Xsplit Broadcaster. Instead, we suggest that you purchase a license or subscribe to a plan from the official website of Xsplit[^2^], which will give you access to all the benefits and features of the software without any risks or limitations.

    - -

    How to Download and Install Xsplit 1.1.1209.0601 Crack 64 bit?

    - -

    If you still want to download and install Xsplit 1.1.1209.0601 Crack 64 bit despite the risks and consequences, here are the steps that you need to follow:

    - -
      -
    1. Go to a torrent site that hosts Xsplit 1.1.1209.0601 Crack 64 bit, such as frosty-chandrasekhar-99f6c1.netlify.app[^1^]. Be careful of fake or malicious sites that might harm your computer.
    2. -
    3. Download the torrent file of Xsplit 1.1.1209.0601 Crack 64 bit and open it with a torrent client, such as uTorrent or BitTorrent.
    4. -
    5. Wait for the download to finish and locate the folder where the cracked software is saved.
    6. -
    7. Run the setup file of Xsplit 1.1.1209.0601 Crack 64 bit and follow the installation instructions.
    8. -
    9. Launch Xsplit Broadcaster and enjoy using it for free.
    10. -
    - -

    Note: We do not endorse or support downloading or installing Xsplit 1.1.1209.0601 Crack 64 bit or any other cracked software. This article is for informational purposes only and we are not responsible for any damages or losses that may occur from using such software.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/sdhsdhk/bingosjj/src/app/page.tsx b/spaces/sdhsdhk/bingosjj/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> -
    - - - ) -} diff --git a/spaces/seanpedrickcase/Light-PDF-Web-QA-Chatbot/app.py b/spaces/seanpedrickcase/Light-PDF-Web-QA-Chatbot/app.py deleted file mode 100644 index 622841fee84ac4f8dac5f14e72a0646d0d390f81..0000000000000000000000000000000000000000 --- a/spaces/seanpedrickcase/Light-PDF-Web-QA-Chatbot/app.py +++ /dev/null @@ -1,309 +0,0 @@ -# # Load in packages - -# + -import os - -# Need to overwrite version of gradio present in Huggingface spaces as it doesn't have like buttons/avatars (Oct 2023) -#os.system("pip uninstall -y gradio") -os.system("pip install gradio==3.42.0") - -from typing import TypeVar -from langchain.embeddings import HuggingFaceEmbeddings#, HuggingFaceInstructEmbeddings -from langchain.vectorstores import FAISS -import gradio as gr - -from transformers import AutoTokenizer - -# Alternative model sources -from ctransformers import AutoModelForCausalLM - -PandasDataFrame = TypeVar('pd.core.frame.DataFrame') - -# Disable cuda devices if necessary -#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' - -#from chatfuncs.chatfuncs import * -import chatfuncs.ingest as ing - -## Load preset embeddings, vectorstore, and model - -embeddings_name = "BAAI/bge-base-en-v1.5" - -def load_embeddings(embeddings_name = "BAAI/bge-base-en-v1.5"): - - - #if embeddings_name == "hkunlp/instructor-large": - # embeddings_func = HuggingFaceInstructEmbeddings(model_name=embeddings_name, - # embed_instruction="Represent the paragraph for retrieval: ", - # query_instruction="Represent the question for retrieving supporting documents: " - # ) - - #else: - embeddings_func = HuggingFaceEmbeddings(model_name=embeddings_name) - - global embeddings - - embeddings = embeddings_func - - return embeddings - -def get_faiss_store(faiss_vstore_folder,embeddings): - import zipfile - with zipfile.ZipFile(faiss_vstore_folder + '/' + faiss_vstore_folder + '.zip', 'r') as zip_ref: - zip_ref.extractall(faiss_vstore_folder) - - faiss_vstore = FAISS.load_local(folder_path=faiss_vstore_folder, embeddings=embeddings) - os.remove(faiss_vstore_folder + "/index.faiss") - os.remove(faiss_vstore_folder + "/index.pkl") - - global vectorstore - - vectorstore = faiss_vstore - - return vectorstore - -import chatfuncs.chatfuncs as chatf - -chatf.embeddings = load_embeddings(embeddings_name) -chatf.vectorstore = get_faiss_store(faiss_vstore_folder="faiss_embedding",embeddings=globals()["embeddings"]) - -def load_model(model_type, gpu_layers, gpu_config=None, cpu_config=None, torch_device=None): - print("Loading model") - - # Default values inside the function - if gpu_config is None: - gpu_config = chatf.gpu_config - if cpu_config is None: - cpu_config = chatf.cpu_config - if torch_device is None: - torch_device = chatf.torch_device - - if model_type == "Mistral Open Orca (larger, slow)": - if torch_device == "cuda": - gpu_config.update_gpu(gpu_layers) - else: - gpu_config.update_gpu(gpu_layers) - cpu_config.update_gpu(gpu_layers) - - print("Loading with", cpu_config.gpu_layers, "model layers sent to GPU.") - - print(vars(gpu_config)) - print(vars(cpu_config)) - - try: - #model = AutoModelForCausalLM.from_pretrained('Aryanne/Orca-Mini-3B-gguf', model_type='llama', model_file='q5_0-orca-mini-3b.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu()) - #model = AutoModelForCausalLM.from_pretrained('Aryanne/Wizard-Orca-3B-gguf', model_type='llama', model_file='q4_1-wizard-orca-3b.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu()) - model = AutoModelForCausalLM.from_pretrained('TheBloke/Mistral-7B-OpenOrca-GGUF', model_type='mistral', model_file='mistral-7b-openorca.Q4_K_M.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu()) - #model = AutoModelForCausalLM.from_pretrained('TheBloke/MistralLite-7B-GGUF', model_type='mistral', model_file='mistrallite.Q4_K_M.gguf', **vars(gpu_config)) # **asdict(CtransRunConfig_cpu()) - - except: - #model = AutoModelForCausalLM.from_pretrained('Aryanne/Orca-Mini-3B-gguf', model_type='llama', model_file='q5_0-orca-mini-3b.gguf', **vars(cpu_config)) #**asdict(CtransRunConfig_gpu()) - #model = AutoModelForCausalLM.from_pretrained('Aryanne/Wizard-Orca-3B-gguf', model_type='llama', model_file='q4_1-wizard-orca-3b.gguf', **vars(cpu_config)) # **asdict(CtransRunConfig_cpu()) - model = AutoModelForCausalLM.from_pretrained('TheBloke/Mistral-7B-OpenOrca-GGUF', model_type='mistral', model_file='mistral-7b-openorca.Q4_K_M.gguf', **vars(cpu_config)) # **asdict(CtransRunConfig_cpu()) - #model = AutoModelForCausalLM.from_pretrained('TheBloke/MistralLite-7B-GGUF', model_type='mistral', model_file='mistrallite.Q4_K_M.gguf', **vars(cpu_config)) # **asdict(CtransRunConfig_cpu()) - - tokenizer = [] - - if model_type == "Flan Alpaca (small, fast)": - # Huggingface chat model - hf_checkpoint = 'declare-lab/flan-alpaca-large'#'declare-lab/flan-alpaca-base' # # # - - def create_hf_model(model_name): - - from transformers import AutoModelForSeq2SeqLM, AutoModelForCausalLM - - if torch_device == "cuda": - if "flan" in model_name: - model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map="auto") - else: - model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") - else: - if "flan" in model_name: - model = AutoModelForSeq2SeqLM.from_pretrained(model_name) - else: - model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) - - tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = chatf.context_length) - - return model, tokenizer, model_type - - model, tokenizer, model_type = create_hf_model(model_name = hf_checkpoint) - - chatf.model = model - chatf.tokenizer = tokenizer - chatf.model_type = model_type - - load_confirmation = "Finished loading model: " + model_type - - print(load_confirmation) - return model_type, load_confirmation, model_type - -# Both models are loaded on app initialisation so that users don't have to wait for the models to be downloaded -#model_type = "Mistral Open Orca (larger, slow)" -#load_model(model_type, chatf.gpu_layers, chatf.gpu_config, chatf.cpu_config, chatf.torch_device) - -model_type = "Flan Alpaca (small, fast)" -load_model(model_type, 0, chatf.gpu_config, chatf.cpu_config, chatf.torch_device) - -def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings=embeddings): - - print(f"> Total split documents: {len(docs_out)}") - - print(docs_out) - - vectorstore_func = FAISS.from_documents(documents=docs_out, embedding=embeddings) - - - chatf.vectorstore = vectorstore_func - - out_message = "Document processing complete" - - return out_message, vectorstore_func - - # Gradio chat - -block = gr.Blocks(theme = gr.themes.Base())#css=".gradio-container {background-color: black}") - -with block: - ingest_text = gr.State() - ingest_metadata = gr.State() - ingest_docs = gr.State() - - model_type_state = gr.State(model_type) - embeddings_state = gr.State(globals()["embeddings"]) - vectorstore_state = gr.State(globals()["vectorstore"]) - - model_state = gr.State() # chatf.model (gives error) - tokenizer_state = gr.State() # chatf.tokenizer (gives error) - - chat_history_state = gr.State() - instruction_prompt_out = gr.State() - - gr.Markdown("

    Lightweight PDF / web page QA bot

    ") - - gr.Markdown("Chat with PDF, web page or (new) csv/Excel documents. The default is a small model (Flan Alpaca), that can only answer specific questions that are answered in the text. It cannot give overall impressions of, or summarise the document. The alternative (Mistral Open Orca (larger, slow)), can reason a little better, but is much slower (See Advanced tab).\n\nBy default the Lambeth Borough Plan '[Lambeth 2030 : Our Future, Our Lambeth](https://www.lambeth.gov.uk/better-fairer-lambeth/projects/lambeth-2030-our-future-our-lambeth)' is loaded. If you want to talk about another document or web page, please select from the second tab. If switching topic, please click the 'Clear chat' button.\n\nCaution: This is a public app. Please ensure that the document you upload is not sensitive is any way as other users may see it! Also, please note that LLM chatbots may give incomplete or incorrect information, so please use with care.") - - with gr.Row(): - current_source = gr.Textbox(label="Current data source(s)", value="Lambeth_2030-Our_Future_Our_Lambeth.pdf", scale = 10) - current_model = gr.Textbox(label="Current model", value=model_type, scale = 3) - - with gr.Tab("Chatbot"): - - with gr.Row(): - #chat_height = 500 - chatbot = gr.Chatbot(avatar_images=('user.jfif', 'bot.jpg'),bubble_full_width = False, scale = 1) # , height=chat_height - with gr.Accordion("Open this tab to see the source paragraphs used to generate the answer", open = False): - sources = gr.HTML(value = "Source paragraphs with the most relevant text will appear here", scale = 1) # , height=chat_height - - with gr.Row(): - message = gr.Textbox( - label="Enter your question here", - lines=1, - ) - with gr.Row(): - submit = gr.Button(value="Send message", variant="secondary", scale = 1) - clear = gr.Button(value="Clear chat", variant="secondary", scale=0) - stop = gr.Button(value="Stop generating", variant="secondary", scale=0) - - examples_set = gr.Radio(label="Examples for the Lambeth Borough Plan", - #value = "What were the five pillars of the previous borough plan?", - choices=["What were the five pillars of the previous borough plan?", - "What is the vision statement for Lambeth?", - "What are the commitments for Lambeth?", - "What are the 2030 outcomes for Lambeth?"]) - - - current_topic = gr.Textbox(label="Feature currently disabled - Keywords related to current conversation topic.", placeholder="Keywords related to the conversation topic will appear here") - - - - with gr.Tab("Load in a different file to chat with"): - with gr.Accordion("PDF file", open = False): - in_pdf = gr.File(label="Upload pdf", file_count="multiple", file_types=['.pdf']) - load_pdf = gr.Button(value="Load in file", variant="secondary", scale=0) - - with gr.Accordion("Web page", open = False): - with gr.Row(): - in_web = gr.Textbox(label="Enter web page url") - in_div = gr.Textbox(label="(Advanced) Web page div for text extraction", value="p", placeholder="p") - load_web = gr.Button(value="Load in webpage", variant="secondary", scale=0) - - with gr.Accordion("CSV/Excel file", open = False): - in_csv = gr.File(label="Upload CSV/Excel file", file_count="multiple", file_types=['.csv', '.xlsx']) - in_text_column = gr.Textbox(label="Enter column name where text is stored") - load_csv = gr.Button(value="Load in CSV/Excel file", variant="secondary", scale=0) - - ingest_embed_out = gr.Textbox(label="File/web page preparation progress") - - with gr.Tab("Advanced features"): - out_passages = gr.Slider(minimum=1, value = 2, maximum=10, step=1, label="Choose number of passages to retrieve from the document. Numbers greater than 2 may lead to increased hallucinations or input text being truncated.") - temp_slide = gr.Slider(minimum=0.1, value = 0.1, maximum=1, step=0.1, label="Choose temperature setting for response generation.") - with gr.Row(): - model_choice = gr.Radio(label="Choose a chat model", value="Flan Alpaca (small, fast)", choices = ["Flan Alpaca (small, fast)", "Mistral Open Orca (larger, slow)"]) - change_model_button = gr.Button(value="Load model", scale=0) - with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False): - gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=5, step = 1, visible=True) - - load_text = gr.Text(label="Load status") - - - gr.HTML( - "
    This app is based on the models Flan Alpaca and Mistral Open Orca. It powered by Gradio, Transformers, Ctransformers, and Langchain.
    " - ) - - examples_set.change(fn=chatf.update_message, inputs=[examples_set], outputs=[message]) - - change_model_button.click(fn=chatf.turn_off_interactivity, inputs=[message, chatbot], outputs=[message, chatbot], queue=False).\ - then(fn=load_model, inputs=[model_choice, gpu_layer_choice], outputs = [model_type_state, load_text, current_model]).\ - then(lambda: chatf.restore_interactivity(), None, [message], queue=False).\ - then(chatf.clear_chat, inputs=[chat_history_state, sources, message, current_topic], outputs=[chat_history_state, sources, message, current_topic]).\ - then(lambda: None, None, chatbot, queue=False) - - # Load in a pdf - load_pdf_click = load_pdf.click(ing.parse_file, inputs=[in_pdf], outputs=[ingest_text, current_source]).\ - then(ing.text_to_docs, inputs=[ingest_text], outputs=[ingest_docs]).\ - then(docs_to_faiss_save, inputs=[ingest_docs], outputs=[ingest_embed_out, vectorstore_state]).\ - then(chatf.hide_block, outputs = [examples_set]) - - # Load in a webpage - load_web_click = load_web.click(ing.parse_html, inputs=[in_web, in_div], outputs=[ingest_text, ingest_metadata, current_source]).\ - then(ing.html_text_to_docs, inputs=[ingest_text, ingest_metadata], outputs=[ingest_docs]).\ - then(docs_to_faiss_save, inputs=[ingest_docs], outputs=[ingest_embed_out, vectorstore_state]).\ - then(chatf.hide_block, outputs = [examples_set]) - - # Load in a csv/excel file - load_csv_click = load_csv.click(ing.parse_csv_or_excel, inputs=[in_csv, in_text_column], outputs=[ingest_text, current_source]).\ - then(ing.csv_excel_text_to_docs, inputs=[ingest_text, in_text_column], outputs=[ingest_docs]).\ - then(docs_to_faiss_save, inputs=[ingest_docs], outputs=[ingest_embed_out, vectorstore_state]).\ - then(chatf.hide_block, outputs = [examples_set]) - - # Load in a webpage - - # Click/enter to send message action - response_click = submit.click(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_state, model_type_state, out_passages], outputs=[chat_history_state, sources, instruction_prompt_out], queue=False, api_name="retrieval").\ - then(chatf.turn_off_interactivity, inputs=[message, chatbot], outputs=[message, chatbot], queue=False).\ - then(chatf.produce_streaming_answer_chatbot, inputs=[chatbot, instruction_prompt_out, model_type_state, temp_slide], outputs=chatbot) - response_click.then(chatf.highlight_found_text, [chatbot, sources], [sources]).\ - then(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\ - then(lambda: chatf.restore_interactivity(), None, [message], queue=False) - - response_enter = message.submit(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_state, model_type_state, out_passages], outputs=[chat_history_state, sources, instruction_prompt_out], queue=False).\ - then(chatf.turn_off_interactivity, inputs=[message, chatbot], outputs=[message, chatbot], queue=False).\ - then(chatf.produce_streaming_answer_chatbot, [chatbot, instruction_prompt_out, model_type_state, temp_slide], chatbot) - response_enter.then(chatf.highlight_found_text, [chatbot, sources], [sources]).\ - then(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\ - then(lambda: chatf.restore_interactivity(), None, [message], queue=False) - - # Stop box - stop.click(fn=None, inputs=None, outputs=None, cancels=[response_click, response_enter]) - - # Clear box - clear.click(chatf.clear_chat, inputs=[chat_history_state, sources, message, current_topic], outputs=[chat_history_state, sources, message, current_topic]) - clear.click(lambda: None, None, chatbot, queue=False) - - # Thumbs up or thumbs down voting function - chatbot.like(chatf.vote, [chat_history_state, instruction_prompt_out, model_type_state], None) - -block.queue(concurrency_count=1).launch(debug=True) -# - - diff --git a/spaces/segments-tobias/conex/espnet2/bin/lm_train.py b/spaces/segments-tobias/conex/espnet2/bin/lm_train.py deleted file mode 100644 index f60e9f3b89162f347a44e95265f3ba4c9d615fb1..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/bin/lm_train.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python3 -from espnet2.tasks.lm import LMTask - - -def get_parser(): - parser = LMTask.get_parser() - return parser - - -def main(cmd=None): - """LM training. - - Example: - - % python lm_train.py asr --print_config --optim adadelta - % python lm_train.py --config conf/train_asr.yaml - """ - LMTask.main(cmd=cmd) - - -if __name__ == "__main__": - main() diff --git a/spaces/shgao/EditAnything/ldm/models/diffusion/ddim.py b/spaces/shgao/EditAnything/ldm/models/diffusion/ddim.py deleted file mode 100644 index 27ead0ea914c64c747b64e690662899fb3801144..0000000000000000000000000000000000000000 --- a/spaces/shgao/EditAnything/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,336 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - ucg_schedule=None, - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - elif isinstance(conditioning, list): - for ctmp in conditioning: - if ctmp.shape[0] != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ucg_schedule=ucg_schedule - ) - return samples, intermediates - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, - ucg_schedule=None): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - if ucg_schedule is not None: - assert len(ucg_schedule) == len(time_range) - unconditional_guidance_scale = ucg_schedule[i] - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, - dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - model_output = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = dict() - for k in c: - if isinstance(c[k], list): - c_in[k] = [torch.cat([ - unconditional_conditioning[k][i], - c[k][i]]) for i in range(len(c[k]))] - else: - c_in[k] = torch.cat([ - unconditional_conditioning[k], - c[k]]) - elif isinstance(c, list): - c_in = list() - assert isinstance(unconditional_conditioning, list) - for i in range(len(c)): - c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) - else: - c_in = torch.cat([unconditional_conditioning, c]) - model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) - - if self.model.parameterization == "v": - e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) - else: - e_t = model_output - - if score_corrector is not None: - assert self.model.parameterization == "eps", 'not implemented' - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - if self.model.parameterization != "v": - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - else: - pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) - - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - - if dynamic_threshold is not None: - raise NotImplementedError() - - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - @torch.no_grad() - def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, - unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): - num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] - - assert t_enc <= num_reference_steps - num_steps = t_enc - - if use_original_steps: - alphas_next = self.alphas_cumprod[:num_steps] - alphas = self.alphas_cumprod_prev[:num_steps] - else: - alphas_next = self.ddim_alphas[:num_steps] - alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) - - x_next = x0 - intermediates = [] - inter_steps = [] - for i in tqdm(range(num_steps), desc='Encoding Image'): - t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) - if unconditional_guidance_scale == 1.: - noise_pred = self.model.apply_model(x_next, t, c) - else: - assert unconditional_conditioning is not None - e_t_uncond, noise_pred = torch.chunk( - self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), - torch.cat((unconditional_conditioning, c))), 2) - noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) - - xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next - weighted_noise_pred = alphas_next[i].sqrt() * ( - (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred - x_next = xt_weighted + weighted_noise_pred - if return_intermediates and i % ( - num_steps // return_intermediates) == 0 and i < num_steps - 1: - intermediates.append(x_next) - inter_steps.append(i) - elif return_intermediates and i >= num_steps - 2: - intermediates.append(x_next) - inter_steps.append(i) - if callback: callback(i) - - out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} - if return_intermediates: - out.update({'intermediates': intermediates}) - return x_next, out - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + - extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) - - @torch.no_grad() - def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - use_original_steps=False, callback=None): - - timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) - x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - if callback: callback(i) - return x_dec \ No newline at end of file diff --git a/spaces/shivammehta25/Diff-TTSG/diff_ttsg/models/__init__.py b/spaces/shivammehta25/Diff-TTSG/diff_ttsg/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Best Permanent Unlock APK Tools for Android in 2023.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Best Permanent Unlock APK Tools for Android in 2023.md deleted file mode 100644 index 657b5bb104dd67b5d91648c293b9ebac4593b4fd..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Best Permanent Unlock APK Tools for Android in 2023.md +++ /dev/null @@ -1,137 +0,0 @@ -
    -

    What is Permanent Unlock APK and How to Use It?

    -

    If you are an Android user who wants to customize your device, install custom ROMs, or access advanced features, you may have heard of permanent unlock apk. This is a type of software that can help you unlock the bootloader, bypass the FRP lock, or root your device. But what exactly is permanent unlock apk and how can you use it safely? In this article, we will explain everything you need to know about permanent unlock apk and introduce some of the best tools for different Android devices.

    -

    permanent unlock apk


    Download File ->->->-> https://ssurll.com/2uNUxP



    -

    Introduction

    -

    Permanent unlock apk is a generic term that refers to any application that can permanently unlock your Android device from its original restrictions. Depending on the tool and the device, permanent unlock apk can perform different functions, such as:

    -
      -
    • Unlocking bootloader: This allows you to flash custom ROMs, kernels, or recovery images on your device. Bootloader is the software that runs before the operating system and decides what to load. By default, most Android devices have a locked bootloader that only accepts official firmware from the manufacturer.
    • -
    • Bypassing FRP lock: This allows you to remove the Google account verification that prevents you from using your device after a factory reset. FRP stands for Factory Reset Protection, which is a security feature introduced by Google in Android 5.0 Lollipop. It requires you to enter the Google account that was previously synced with your device before you can access it.
    • -
    • Rooting: This allows you to gain full control over your device and access system files and settings that are normally hidden or restricted. Rooting is the process of obtaining superuser privileges on your device, which means you can modify or delete anything on your device.
    • -
    -

    Using permanent unlock apk can have some benefits, such as:

    -
      -
    • Customizing your device according to your preferences and needs
    • -
    • Installing custom ROMs that offer better performance, features, or updates
    • -
    • Removing bloatware or unwanted apps that take up space and resources
    • -
    • Enhancing your device's security and privacy by installing ad blockers, firewalls, or VPNs
    • -
    • Backing up or restoring your device's data easily
    • -
    -

    However, using permanent unlock apk also comes with some risks, such as:

    -
      -
    • Voiding your device's warranty or violating its terms of service
    • -
    • Bricking your device or making it unusable if something goes wrong
    • -
    • Losing your data or settings if you don't backup properly
    • -
    • Exposing your device to malware or viruses if you download untrusted apps
    • -
    • Reducing your device's battery life or stability if you overclock or tweak it excessively
    • -
    -

    Permanent Unlock APK Tools for Different Android Devices

    -

    There are many permanent unlock apk tools available online, but not all of them are compatible with every Android device. Some tools are designed for specific brands or models, while others are more universal. Here are some of the most popular permanent unlock apk tools for different Android devices:

    Mi Unlock for Xiaomi Devices -

    If you have a Xiaomi device and you want to unlock its bootloader, you can use the official Mi Unlock tool. This tool is developed by Xiaomi and allows you to unlock the bootloader of your Xiaomi device with a few clicks. However, you need to have a Mi account with approved unlock permission and bind it to your device before using the tool. You also need to enable OEM unlocking and USB debugging on your device. Here are the steps to use Mi Unlock to unlock bootloader on Xiaomi devices:

    -
      -
    1. Download the Mi Unlock tool from [here](^1^) and install it on your PC.
    2. -
    3. Sign in with your Mi account on the tool and on your device.
    4. -
    5. Shut down your device manually, and hold volume down key and power button to enter Fastboot mode.
    6. -
    7. Connect your device to your PC using a USB cable and click \"Unlock\" on the Mi Unlock tool.
    8. -
    9. Wait for the process to complete and reboot your device.
    10. -
    -

    The pros of using Mi Unlock are:

    -

    * How to use Mi Unlock Bootloader APK to unlock Android bootloader
    -* Best device unlock APK tool - LockWiper (Android)
    -* Free download device unlock APK [2023 new] - iMyFone
    -* Top 3 bootloader unlock APKs to unlock Android bootloader easily
    -* Unlock your device by Google account bypass APK here - iMyFone
    -* How to bypass the FRP lock using LockWiper (Android)
    -* Device unlock APK for Samsung, LG, Sony, HTC, Huawei, etc.
    -* How to download and install device unlock APK on Android phone
    -* Benefits and risks of using bootloader unlock APKs on Android devices
    -* How to fix device unlock APK not working issues
    -* Device unlock APK vs official unlocking methods - which one is better?
    -* How to root Android device after using bootloader unlock APK
    -* How to customize and optimize Android device with bootloader unlock APK
    -* Device unlock APK for Android 9.0, 10.0, 11.0 and 12.0
    -* How to backup and restore data before using device unlock APK
    -* Device unlock APK for Mi MIX Alpha, Mi 10, Mi 9, Mi 8, etc.
    -* How to remove Google account verification (FRP) without password using device unlock APK
    -* Device unlock APK for different types of Android lock screen, such as PIN, pattern, password, fingerprint, etc.
    -* How to enable OEM unlock and USB debugging before using device unlock APK
    -* Device unlock APK for different models of Android phones from different brands
    -* How to download and extract device data package and firmware using device unlock APK
    -* How to enter fastboot mode and downloading mode with device unlock APK
    -* How to avoid being tracked or limited by previous Google account after using device unlock APK
    -* How to use a new Google account on the device after using device unlock APK
    -* How to get rid of the old Google account and FRP lock permanently with device unlock APK
    -* How to access all the features on the Android device with device unlock APK
    -* How to install custom ROM on Android device after using bootloader unlock APK
    -* How to update Android system and security patches after using bootloader unlock APK
    -* How to troubleshoot common problems and errors with device unlock APK
    -* How to uninstall device unlock APK from Android phone
    -* Device unlock APK reviews and ratings from real users
    -* Device unlock APK alternatives and comparisons with other tools
    -* Device unlock APK FAQs and tips for beginners
    -* Device unlock APK download links and sources for free and safe download
    -* Device unlock APK user guide and tutorial with screenshots and videos
    -* Device unlock APK features and specifications for different scenarios and purposes
    -* Device unlock APK compatibility and requirements for different Android versions and devices
    -* Device unlock APK support and customer service contact information
    -* Device unlock APK license and terms of use agreement
    -* Device unlock APK pros and cons analysis and evaluation

    -
      -
    • It is an official and reliable tool from Xiaomi.
    • -
    • It is easy to use and has a user-friendly interface.
    • -
    • It supports most Xiaomi devices running MIUI.
    • -
    -

    The cons of using Mi Unlock are:

    -
      -
    • You need to apply for unlocking permission and wait for approval, which may take up to 10 days.
    • -
    • You need to bind your Mi account to your device and wait for 72 hours before unlocking.
    • -
    • You may lose your data or settings if you don't backup properly.
    • -
    -

    LockWiper (Android) for Samsung Devices

    -

    If you have a Samsung device and you want to bypass the FRP lock or the screen lock, you can use LockWiper (Android). This tool is developed by iMyFone and allows you to remove any kind of lock from your Samsung device without losing data. You don't need to provide any password or Google account credentials to use this tool. Here are the steps to use LockWiper (Android) to bypass FRP lock and screen lock on Samsung devices:

    -
      -
    1. Download LockWiper (Android) from [here](^7^) and install it on your PC.
    2. -
    3. Launch the tool and choose \"Remove Google Lock (FRP)\" or \"Remove Screen Lock\" mode according to your need.
    4. -
    5. Connect your device to your PC using a USB cable and confirm your device information.
    6. -
    7. Follow the instructions on the screen to download and extract the relevant firmware package for your device.
    8. -
    9. Wait for the tool to install the firmware package on your device and remove the lock.
    10. -
    -

    The pros of using LockWiper (Android) are:

    -
      -
    • It is a powerful, efficient, and safe FRP removal tool.
    • -
    • It can remove FRP lock without password or Google account verification.
    • -
    • It can remove various Android screen lock, including PIN, pattern, password, fingerprint, and face lock.
    • -
    • It supports 6,000+ Android devices, including devices running on Android 12.0.
    • -
    -

    The cons of using LockWiper (Android) are:

    -
      -
    • You need to purchase the full version of the software to unlock all features.
    • -
    • You may void your warranty or violate the terms of service by using this tool.
    • -
    -

    KingoRoot for Other Android Devices

    -

    If you have an Android device other than Xiaomi or Samsung and you want to root it and unlock its bootloader, you can use KingoRoot. This tool is one of the most popular one-click root methods that can root and unlock most Android devices with ease. You can use either the PC version or the APK version of this tool. Here are the steps to use KingoRoot to root and unlock bootloader on other Android devices:

    -
  • Download KingoRoot from [here](^12^) and install it on your PC or your device.
  • Enable USB debugging and OEM unlocking on your device by going to Settings > Developer options.If you are using the PC version, connect your device to your PC using a USB cable. If you are using the APK version, launch the app on your device.Click \"Root\" on the KingoRoot tool or app and wait for the process to complete.The pros of using KingoRoot are:

    It is a simple and fast one-click root method that works for most Android devices.It can root and unlock bootloader simultaneously without wiping data. -
  • It supports various Android versions from 4.2.2 to 12.0.
  • -
-

The cons of using KingoRoot are:

-
    -
  • It may not work for some devices or cause bootloop or brick issues.
  • -
  • It may install unwanted apps or ads on your device without your consent.
  • -
  • It may expose your device to security risks or malware infections.
  • -
-

Conclusion

-

In conclusion, permanent unlock apk is a type of software that can help you unlock your Android device from its original restrictions, such as bootloader, FRP lock, or root. However, using permanent unlock apk also involves some benefits and risks that you should be aware of before proceeding. Depending on your device model and brand, you may need to use different permanent unlock apk tools to achieve your desired results. Some of the most popular tools are Mi Unlock for Xiaomi devices, LockWiper (Android) for Samsung devices, and KingoRoot for other Android devices. Before using any of these tools, make sure you backup your data and follow the instructions carefully. Also, be prepared to face the possible consequences of voiding your warranty or bricking your device.

-

FAQs

-

Here are some of the frequently asked questions about permanent unlock apk:

-

Q1: What is the difference between unlocking bootloader and rooting?

-

A1: Unlocking bootloader is the process of allowing your device to accept custom firmware or software, such as custom ROMs, kernels, or recovery images. Rooting is the process of obtaining superuser privileges on your device, which means you can modify or delete anything on your device. Unlocking bootloader is usually a prerequisite for rooting, but not vice versa.

-

Q2: Will using permanent unlock apk void my warranty?

-

A2: Yes, using permanent unlock apk will most likely void your warranty or violate the terms of service of your device manufacturer or carrier. This means that if you encounter any problems with your device after using permanent unlock apk, you will not be able to claim any warranty service or support from them.

-

Q3: Can I use permanent unlock apk on any Android device?

-

A3: No, not all Android devices are compatible with permanent unlock apk tools. Some devices have more strict security measures or encryption that prevent permanent unlock apk from working. Some devices may also require specific tools or methods to unlock them. Therefore, you should always check the compatibility and requirements of the tool and the device before using permanent unlock apk.

-

Q4: What are the advantages of unlocking bootloader and rooting?

-

A4: Unlocking bootloader and rooting can give you more freedom and control over your device. You can customize your device according to your preferences and needs, install custom ROMs that offer better performance, features, or updates, remove bloatware or unwanted apps that take up space and resources, enhance your device's security and privacy by installing ad blockers, firewalls, or VPNs, backup or restore your device's data easily, and more.

-

Q5: What are the disadvantages of unlocking bootloader and rooting?

-

A5: Unlocking bootloader and rooting can also have some drawbacks and risks. You can void your warranty or violate the terms of service of your device manufacturer or carrier, brick your device or make it unusable if something goes wrong, lose your data or settings if you don't backup properly, expose your device to malware or viruses if you download untrusted apps, reduce your device's battery life or stability if you overclock or tweak it excessively, and more.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call of Duty Mobile How to Install and Play on Your Android Device.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call of Duty Mobile How to Install and Play on Your Android Device.md deleted file mode 100644 index 0a47e4b998a308897090ee850017600821dda981..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Call of Duty Mobile How to Install and Play on Your Android Device.md +++ /dev/null @@ -1,102 +0,0 @@ -
-

How to Download Call of Duty Mobile on Play Store

-

Call of Duty Mobile is one of the most popular and exciting mobile games that you can play on your Android device. It offers you a thrilling experience of shooting, fighting, and surviving in various modes and maps. If you are a fan of Call of Duty franchise or just looking for a fun and action-packed game to play on your phone, then you should definitely try Call of Duty Mobile.

-

how to download call of duty mobile on play store


DOWNLOADhttps://ssurll.com/2uNZXm



-

What is Call of Duty Mobile?

-

Call of Duty Mobile is a free-to-play first-person shooter game developed by TiMi Studios and published by Activision. It was released globally in October 2019 and has since attracted millions of players from around the world. It is based on the Call of Duty series, which is one of the most successful video game franchises in history.

-

Features of Call of Duty Mobile

-

Call of Duty Mobile has many features that make it a great game to play on your mobile device. Some of these features are:

-
    -
  • Multiplayer modes: You can play various multiplayer modes such as Team Deathmatch, Domination, Kill Confirmed, Search and Destroy, Hardpoint, Free for All, Gunfight, Capture the Flag, and more. You can also play ranked matches to climb up the leaderboards and earn rewards.
  • -
  • Battle Royale mode: You can also play a 100-player battle royale mode where you have to parachute into a large map, loot weapons and items, fight against other players and zombies, and be the last one standing.
  • -
  • Classic maps: You can play on iconic maps from Call of Duty history such as Nuketown, Crash, Firing Range, Hijacked, Crossfire, Standoff, Raid, Summit, Rust, Shipment, Terminal, Highrise, and more.
  • -
  • Iconic characters: You can unlock and customize dozens of iconic characters from Call of Duty universe such as Captain Price, Ghost, Soap, Alex Mason, Frank Woods, John "Soap" MacTavish , Simon "Ghost" Riley , David "Section" Mason , Alex Mason , Frank Woods , Edward Richtofen , Nikto, Mara, Krueger, and more.
  • -
  • Powerful weapons: You can equip and upgrade a variety of weapons such as assault rifles, sniper rifles, shotguns, SMGs, LMGs, pistols, launchers, melee weapons, and more. You can also customize your weapons with skins, attachments, camos, and charms.
  • -
  • Operator skills and scorestreaks: You can unleash special abilities and rewards that can turn the tide of the battle. You can use operator skills such as Purifier, War Machine, Death Machine, Gravity Spikes, Sparrow, Tempest, and more. You can also use scorestreaks such as UAV, Counter-UAV, Predator Missile, Sentry Gun, Stealth Chopper, VTOL, and more.
  • -
-

Requirements for Call of Duty Mobile

-

Call of Duty Mobile is compatible with most Android devices that have at least 2 GB of RAM and run Android 5.1 or higher. However, for the best performance and experience, you may need a device with higher specifications. You can check the minimum and recommended requirements for Call of Duty Mobile on the official website. You will also need a stable internet connection to play the game online.

-

How to Download Call of Duty Mobile on Play Store

-

Downloading Call of Duty Mobile on Play Store is very easy and simple. You just need to follow these steps:

-

How to download call of duty mobile on play store in India
-How to download call of duty mobile on play store for free
-How to download call of duty mobile on play store without VPN
-How to download call of duty mobile on play store in Pakistan
-How to download call of duty mobile on play store on PC
-How to download call of duty mobile on play store in Philippines
-How to download call of duty mobile on play store not compatible
-How to download call of duty mobile on play store in Bangladesh
-How to download call of duty mobile on play store with OBB
-How to download call of duty mobile on play store in Nepal
-How to download call of duty mobile on play store in USA
-How to download call of duty mobile on play store 2023
-How to download call of duty mobile on play store latest version
-How to download call of duty mobile on play store in Malaysia
-How to download call of duty mobile on play store in Indonesia
-How to download call of duty mobile on play store after update
-How to download call of duty mobile on play store in Sri Lanka
-How to download call of duty mobile on play store in UAE
-How to download call of duty mobile on play store without wifi
-How to download call of duty mobile on play store faster
-How to download call of duty mobile on play store in Nigeria
-How to download call of duty mobile on play store in UK
-How to download call of duty mobile on play store in Canada
-How to download call of duty mobile on play store in Australia
-How to download call of duty mobile on play store in South Africa
-How to download call of duty mobile on play store error
-How to download call of duty mobile on play store step by step
-How to download call of duty mobile on play store using APKPure
-How to download call of duty mobile on play store using VPN Master
-How to download call of duty mobile on play store using QooApp
-How to download call of duty mobile on play store using TapTap
-How to download call of duty mobile on play store using Uptodown
-How to download call of duty mobile on play store using APKMirror
-How to download call of duty mobile on play store using APKMonk
-How to download call of duty mobile on play store using APKCombo
-How to download call of duty mobile on play store using APKFab
-How to download call of duty mobile on play store using APKPure.com
-How to download call of duty mobile on play store using VPN Proxy Master
-How to download call of duty mobile on play store using Turbo VPN Lite
-How to download call of duty mobile on play store using Panda VPN Pro
-How to download call of duty mobile on play store using NordVPN
-How to download call of duty mobile on play store using ExpressVPN
-How to download call of duty mobile on play store using Surfshark VPN
-How to download call of duty mobile on play store using CyberGhost VPN
-How to download call of duty mobile on play store using PureVPN
-How to download call of duty mobile on play store using IPVanish VPN
-How to download call of duty mobile on play store using Private Internet Access
-How to download call of duty mobile on play store using Hotspot Shield VPN

-

Step 1: Open Play Store app on your device

-

The first step is to open the Play Store app on your Android device. You can find it on your home screen or app drawer. If you don't have the Play Store app installed on your device, you can download it from here.

-

Step 2: Search for Call of Duty Mobile

-

The next step is to search for Call of Duty Mobile on the Play Store app. You can use the search bar at the top of the screen and type in "Call of Duty Mobile". You can also use voice search by tapping on the microphone icon and saying "Call of Duty Mobile".

-

Step 3: Tap on Install button

-

Once you find Call of Duty Mobile on the search results, tap on it to open its page. You will see some information about the game such as its rating, reviews, screenshots, videos, description, and more. You will also see a green Install button at the bottom right corner of the screen. Tap on it to start downloading the game.

-

Step 4: Wait for the download to finish

-

After you tap on the Install button, you will see a progress bar showing how much of the game has been downloaded. The game size is about 2 GB, so it may take some time depending on your internet speed and device storage. You can also pause or cancel the download at any time by tapping on the X button next to the progress bar.

-

Step 5: Launch Call of Duty Mobile and enjoy

-

Once the download is complete, you will see an Open button instead of the Install button. Tap on it to launch Call of Duty Mobile on your device. You will also see a shortcut icon for Call of Duty Mobile on your home screen or app drawer. You can use it to launch the game anytime you want.

-

Congratulations! You have successfully downloaded Call of Duty Mobile on Play Store. Now you can enjoy playing this amazing game on your Android device.

-

Tips and Tricks for Call of Duty Mobile

-

To help you get started with Call of Duty Mobile, here are some tips and tricks that you can use to improve your skills and have more fun:

-

Customize your controls and sensitivity

-

One of the first things you should do before playing Call of Duty Mobile is to customize your controls and sensitivity according to your preference and comfort. You can access the settings menu by tapping on the gear icon at the top right corner of the screen. There you can adjust various options such as layout, sensitivity, aim assist, gyroscope, auto fire, sound, graphics, and more. You can also test your settings in a practice mode before applying them.

-

Choose your preferred game mode and map

-

Call of Duty Mobile offers you a variety of game modes and maps to choose from. You can select your preferred game mode and map by tapping on the mode icon at the bottom left corner of the screen. There you can see all the available modes and maps that you can play. You can also filter them by category such as featured, core, casual, ranked, private, etc. You can also create your own custom matches with your own rules and settings. You can also invite your friends and join other players online.

-

Use your operator skills and scorestreaks wisely

-

Operator skills and scorestreaks are powerful abilities and rewards that you can use in Call of Duty Mobile. They can give you an edge over your enemies and help you win the game. However, you should use them wisely and strategically, as they have cooldowns and limitations. You can select your operator skill and scorestreaks by tapping on the loadout icon at the bottom right corner of the screen. There you can see all the available options and their descriptions. You can also unlock more operator skills and scorestreaks by leveling up and completing challenges.

-

Communicate with your teammates and friends

-

Call of Duty Mobile is a team-based game, so communication is very important. You can communicate with your teammates and friends by using the voice chat or text chat features. You can access them by tapping on the microphone or chat icons at the top left corner of the screen. You can also mute or unmute yourself or others by tapping on their names. Communication can help you coordinate your actions, share information, and have more fun.

-

Conclusion

-

Call of Duty Mobile is a fantastic game that you can download and play on your Android device. It offers you a lot of features, modes, maps, characters, weapons, and more that will keep you entertained for hours. It is also easy to download and install from Play Store. All you need to do is follow the steps in this article and you will be ready to enjoy this amazing game. We hope this article was helpful and informative for you. If you have any questions or feedback, please let us know in the comments below.

-

FAQs

-

Here are some frequently asked questions about Call of Duty Mobile:

-
    -
  1. Q: How much space does Call of Duty Mobile take on my device?
    A: Call of Duty Mobile requires about 2 GB of storage space on your device. However, this may vary depending on your device model and updates.
  2. -
  3. Q: How can I update Call of Duty Mobile?
    A: You can update Call of Duty Mobile by opening the Play Store app on your device and tapping on the My apps & games option. There you will see if there are any updates available for Call of Duty Mobile. You can also enable auto-update for Call of Duty Mobile by tapping on the three dots icon next to it and selecting Auto-update.
  4. -
  5. Q: How can I get free CP (COD Points) in Call of Duty Mobile?
    A: CP (COD Points) are the premium currency in Call of Duty Mobile that you can use to buy various items such as crates, bundles, battle passes, skins, etc. You can get free CP by completing tasks, watching ads, participating in events, or using third-party apps or websites. However, be careful as some of these methods may be illegal, unsafe, or fraudulent.
  6. -
  7. Q: How can I play Call of Duty Mobile on PC?
    A: You can play Call of Duty Mobile on PC by using an Android emulator such as BlueStacks, NoxPlayer, LDPlayer, etc. These emulators allow you to run Android apps and games on your PC with keyboard and mouse support. However, you may face some issues such as lag, compatibility, or bans.
  8. -
  9. Q: How can I report a hacker or cheater in Call of Duty Mobile?
    A: You can report a hacker or cheater in Call of Duty Mobile by tapping on their name in the match results screen or the leaderboard screen and selecting Report Player. You can also report them by contacting the customer support team via email or social media.
  10. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download HiLook Software and App for Live View Playback and Alarm Notification.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download HiLook Software and App for Live View Playback and Alarm Notification.md deleted file mode 100644 index 87dd90240a585afcaeb02753b43d146c31b387d8..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download HiLook Software and App for Live View Playback and Alarm Notification.md +++ /dev/null @@ -1,206 +0,0 @@ -
-

What is HiLookVision and Why You Need It

-

If you are looking for a versatile video management software for your DVRs, NVRs, IP cameras, encoders, decoders, etc., you might want to check out HiLookVision. HiLookVision is a software developed by Hikvision, a leading provider of security products and solutions. HiLookVision allows you to monitor your cameras remotely, record and playback video, backup files, receive motion detection alerts, and more. In this article, we will show you how to download and use HiLookVision for Windows, Mac, Android, and iOS devices. We will also compare HiLookVision with other video management software and highlight its benefits and drawbacks.

-

hilook download


Download Filehttps://ssurll.com/2uNSfp



-

How to Download HiLookVision for Windows and Mac

-

HiLookVision is available for both Windows and Mac operating systems. You can download it from the official website of Hikvision or from the links below:

- -

To install HiLookVision on your PC, follow these steps:

-
    -
  1. Run the downloaded file and follow the instructions on the screen.
  2. -
  3. Choose the language and accept the license agreement.
  4. -
  5. Select the destination folder and click Next.
  6. -
  7. Wait for the installation process to complete and click Finish.
  8. -
  9. Launch HiLookVision from the desktop shortcut or the Start menu.
  10. -
-

How to Add Devices to HiLookVision

-

To add devices to HiLookVision, you need to use the Cloud P2P function. This function allows you to connect your devices without configuring the network settings or port forwarding. To use the Cloud P2P function, you need to create an account and register your devices on the Hik-Connect app or website. Then, you can add them to HiLookVision by following these steps:

-
    -
  1. Click on the Device Management icon on the toolbar.
  2. -
  3. Click on Add Device and select Cloud P2P Device.
  4. -
  5. Enter your Hik-Connect account information and click Login.
  6. -
  7. Select the devices you want to add and click Add.
  8. -
  9. Click OK to finish adding the devices.
  10. -
-

How to View Live Video from HiLookVision

-

To view live video from your devices on HiLookVision, follow these steps:

-
    -
  1. Click on the Live View icon on the toolbar.
  2. -
  3. Select the device or group you want to view and drag them to the desired window.
  4. -
  5. Adjust the layout and screen division as needed.
  6. -
  7. Use the toolbar buttons to control the live view, such as capture, record, audio, PTZ, etc.
  8. -
-

How to Playback Recorded Video from HiLookVision

-

To playback recorded video from your devices on HiLookVision, follow these steps:

-
    -
  1. Click on the Playback icon on the toolbar.
  2. -
  3. Select the device or group you want to playback and drag them to the desired window.
  4. -
  5. Select the date and time range you want to search for the video files.
  6. -
  7. Click on the Search button and wait for the results to appear.
  8. -
  9. Double-click on the file you want to play or drag it to the playback window.
  10. -
  11. Use the toolbar buttons to control the playback, such as pause, resume, speed, volume, etc.
  12. -
-

How to Receive Motion Detection Alarm Notification from HiLookVision

-

To receive motion detection alarm notification from your devices on HiLookVision, you need to enable and customize the motion detection settings on your devices first. You can do this by using the web browser or the Hik-Connect app. Then, you can follow these steps to receive the notification on HiLookVision:

-

hilook download for pc
-hilook download for windows 10
-hilook download for mac
-hilook download apk
-hilook download app
-hilook download software
-hilook download for android
-hilook download for iphone
-hilook download for ipad
-hilook download for laptop
-hilook download free
-hilook download latest version
-hilook download windows 7
-hilook download windows 8
-hilook download windows xp
-hilook download linux
-hilook download chromebook
-hilook download ios
-hilook download online
-hilook download offline
-hilook download setup
-hilook download exe
-hilook download zip
-hilook download rar
-hilook download filehippo
-hilook download softonic
-hilook download uptodown
-hilook download cnet
-hilook download old version
-hilook download new version
-hilook download 32 bit
-hilook download 64 bit
-hilook download macbook pro
-hilook download macbook air
-hilook download imac
-hilook download mac mini
-hilook download mac os x
-hilook download mac os catalina
-hilook download mac os big sur
-hilook download mac os monterey
-hilook download samsung galaxy s21
-hilook download samsung galaxy note 20 ultra
-hilook download huawei p40 pro
-hilook download oneplus 9 pro
-hilook download google pixel 5
-hilook download iphone 12 pro max
-hilook download iphone 13 pro max
-hilook download ipad pro 2021
-hilook download ipad air 2020

-
    -
  1. Click on the Alarm Management icon on the toolbar.
  2. -
  3. Click on Alarm Settings and select Motion Detection Alarm.
  4. -
  5. Check the box of Enable Alarm Notification and click OK.
  6. -
  7. Click on Alarm Information and select Motion Detection Alarm.
  8. -
  9. You will see a list of devices that have triggered motion detection alarms and their details.
  10. -
  11. You can click on View Live Video or View Playback Video to see the corresponding video of the alarm event.
  12. -
-

How to Download HiLookVision for Android and iOS

-

HiLookVision is also available for Android and iOS devices. You can download it from the Google Play Store or the App Store or from the links below:

- -

To install HiLookVision on your mobile device, follow these steps:

-
    -
  1. Open the app store and search for HiLookVision or scan the QR code below.
  2. -QR code for HiLookVision app -
  3. Tap on Install and wait for the app to download and install.
  4. -
  5. Open HiLookVision and accept the terms of service and privacy policy.
  6. -
  7. Allow HiLookVision to access your device's camera, microphone, storage, location, etc.
  8. -
-

How to Add Devices to HiLookVision

-

To add devices to HiLookVision on your mobile device, you need to use the Cloud P2P function as well. To use this function, you need to create an account and register your devices on the Hik-Connect app or website. Then, you can add them to HiLookVision by following these steps:

-
    -
  1. Tap on the Menu icon on the top left corner and select Device Management.
  2. -
  3. Tap on Add Device and select Cloud P2P Device.
  4. -
  5. Enter your Hik-Connect account information and tap Login.
  6. -
  7. Select the devices you want to add and tap Add.
  8. -
  9. Tap OK to finish adding the devices.
  10. -
-

How to View Live Video from HiLookVision

-

To view live video from your devices on HiLookVision on your mobile device, follow these steps:

-
    -
  1. Tap on Live View at the bottom of the screen.
  2. -
  3. Select the device or group you want to view and tap on the play icon.
  4. -
  5. Adjust the layout and screen division as needed.
  6. -
  7. Use the toolbar buttons to control the live view, such as capture, record, audio, PTZ, etc.
  8. -
-

How to Playback Recorded Video from HiLookVision

-

To playback recorded video from your devices on HiLookVision on your mobile device, follow these steps:

-
    -
  1. Tap on Playback at the bottom of the screen.
  2. -
  3. Select the device or group you want to playback and tap on the play icon.
  4. -
  5. Select the date and time range you want to search for the video files.
  6. -
  7. Tap on the Search button and wait for the results to appear.
  8. -
  9. Tap on the file you want to play or drag it to the playback window.
  10. -
  11. Use the toolbar buttons to control the playback, such as pause, resume, speed, volume, etc.
  12. -
-

How to Receive Motion Detection Alarm Notification from HiLookVision

-

To receive motion detection alarm notification from your devices on HiLookVision on your mobile device, you need to enable and customize the motion detection settings on your devices first. You can do this by using the web browser or the Hik-Connect app. Then, you can follow these steps to receive the notification on HiLookVision:

-
    -
  1. Tap on Alarm at the bottom of the screen.
  2. -
  3. Tap on Alarm Settings and select Motion Detection Alarm.
  4. -
  5. Check the box of Enable Alarm Notification and tap OK.
  6. -
  7. Tap on Alarm Information and select Motion Detection Alarm.
  8. -
  9. You will see a list of devices that have triggered motion detection alarms and their details.
  10. -
  11. You can tap on View Live Video or View Playback Video to see the corresponding video of the alarm event.
  12. -
-

Benefits of Using HiLookVision

-

HiLookVision is not the only video management software available in the market. There are other alternatives, such as iVMS-4200, iVMS-4500, EZVIZ Studio, etc. However, HiLookVision has some advantages and disadvantages that make it different from others. Let's take a look at them below:

-

Advantages of HiLookVision

-
    -
  • HiLookVision is compatible with various devices from Hikvision and other brands. You can use it to manage your DVRs, NVRs, IP cameras, encoders, decoders, etc.
  • -
  • HiLookVision supports Cloud P2P function, which makes it easy to connect your devices without configuring the network settings or port forwarding. You just need to create an account and register your devices on Hik-Connect app or website.
  • -
  • HiLookVision has a user-friendly interface and simple operation. You can easily access live view, playback, alarm, device management, etc. from one platform.
  • -
  • HiLookVision supports multiple languages and regions. You can choose your preferred language and region from the settings menu.
  • -
  • HiLookVision is free to download and use. You don't need to pay any fees or subscriptions to use it.
  • -
-

Disadvantages of HiLookVision

-
    -
  • HiLookVision requires a stable internet connection to work properly. If your internet connection is slow or unstable, you may experience lagging or buffering issues when viewing live or recorded video.
  • -
  • HiLookVision may not support some advanced features or functions that are available on other video management software. For example, you may not be able to use face recognition, license plate recognition, smart search, etc. on HiLookVision.
  • -
  • HiLookVision may have some bugs or errors that affect its performance or functionality. For example, you may encounter problems when adding devices, receiving alarms, playing back video, etc. You may need to update or reinstall HiLookVision to fix these issues.
  • -
-

Conclusion

-

In conclusion, HiLookVision is a versatile video management software that allows you to monitor your cameras remotely, record and playback video, backup files, receive motion detection alerts, and more. It is compatible with various devices from Hikvision and other brands, and supports Cloud P2P function for easy connection. It has a user-friendly interface and simple operation, and supports multiple languages and regions. It is also free to download and use. However, HiLookVision also has some drawbacks, such as requiring a stable internet connection, not supporting some advanced features, and having some bugs or errors. Therefore, you should weigh the pros and cons of HiLookVision before deciding to use it for your video surveillance needs.

-

FAQs

-

Here are some frequently asked questions about HiLookVision:

-
    -
  1. What is the difference between HiLookVision and iVMS-4200?
  2. -

    HiLookVision and iVMS-4200 are both video management software developed by Hikvision. However, HiLookVision is designed for the HiLook series of products, while iVMS-4200 is designed for the Hikvision series of products. HiLookVision supports Cloud P2P function, while iVMS-4200 supports Hik-Connect domain function. HiLookVision has a simpler interface and operation, while iVMS-4200 has more features and functions.

    -
  3. How can I update HiLookVision to the latest version?
  4. -

    You can update HiLookVision to the latest version by downloading it from the official website of Hikvision or from the app store. You can also check for updates from the settings menu of HiLookVision.

    -
  5. How can I backup or export video files from HiLookVision?
  6. -

    You can backup or export video files from HiLookVision by using the Backup function. To use this function, follow these steps:

    -
      -
    1. Click on the Backup icon on the toolbar.
    2. -
    3. Select the device or group you want to backup or export video files from.
    4. -
    5. Select the date and time range you want to search for the video files.
    6. -
    7. Click on the Search button and wait for the results to appear.
    8. -
    9. Select the files you want to backup or export and click on Backup or Export.
    10. -
    11. Choose the destination folder and format for the backup or export files and click OK.
    12. -
    -
  7. How can I contact Hikvision customer service if I have any problems with HiLookVision?
  8. -

    You can contact Hikvision customer service by using the following methods:

    - -
  9. How can I share my feedback or suggestions about HiLookVision?
  10. -

    You can share your feedback or suggestions about HiLookVision by using the Feedback function. To use this function, follow these steps:

    -
      -
    1. Click on the Menu icon on the top left corner and select Feedback.
    2. -
    3. Enter your name, email, phone number, and feedback or suggestion.
    4. -
    5. Click on Submit to send your feedback or suggestion to Hikvision.
    6. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ezan Vakti Pro (REKLAMSIZ) APK Download - Free and Fast - Version 5.2.96.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ezan Vakti Pro (REKLAMSIZ) APK Download - Free and Fast - Version 5.2.96.md deleted file mode 100644 index 239bf1d374753d42e33f5a0131f85d1f72e21420..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ezan Vakti Pro (REKLAMSIZ) APK Download - Free and Fast - Version 5.2.96.md +++ /dev/null @@ -1,106 +0,0 @@ - -

    Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6: A Prayer Times App Without Ads

    -

    If you are looking for a reliable and convenient prayer times app that does not have any ads or in-app purchases, you might want to check out Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6. This app is developed by Maviay®, a Turkish company that specializes in Islamic apps and software. In this article, we will tell you what Ezan Vakti Pro (REKLAMSIZ) is, how to download and install it, why you should choose it over other prayer times apps, and what are the benefits of using it.

    -

    ezan vakti pro (reklamsiz) apk 5.2 6


    Download Ziphttps://ssurll.com/2uO0di



    -

    What is Ezan Vakti Pro (REKLAMSIZ)?

    -

    Ezan Vakti Pro (REKLAMSIZ) is a prayer times app that provides accurate and customizable prayer times based on your location, as well as other features such as Quran, Qibla, and Masjid. It is designed for Muslims who want to stay connected to their faith and prayers without any distractions or interruptions from ads or in-app purchases.

    -

    Features of Ezan Vakti Pro (REKLAMSIZ)

    -

    Some of the features of Ezan Vakti Pro (REKLAMSIZ) are:

    -
      -
    • Accurate prayer times based on your location, with options to adjust the calculation method, the angle of Fajr and Isha, and the daylight saving time.
    • -
    • Smart Watch Compatibility, which allows you to view the prayer times on your smart watch.
    • -
    • The Holy Quran, with recitations and translations in various languages.
    • -
    • Qibla compass, which shows you the direction of Mecca from your location.
    • -
    • Automatic mute in Masjids, which silences your phone when you enter a mosque based on your location.
    • -
    • Wake up alarm for Sahoor, which helps you wake up for the pre-dawn meal during Ramadan.
    • -
    • Reminder fasting Monday to Thursday, which reminds you to fast on these days if you wish.
    • -
    -

    How to download and install Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6

    -

    To download and install Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6, you need to follow these steps:

    -

    ezan vakti pro reklamsiz apk download
    -ezan vakti pro plus apk free
    -ezan vakti pro apk latest version
    -ezan vakti pro reklamsiz app
    -ezan vakti pro plus android
    -ezan vakti pro apk 5.2 6
    -ezan vakti pro reklamsiz 5.2 93
    -ezan vakti pro plus 5.2 97
    -ezan vakti pro apk for android
    -ezan vakti pro reklamsiz apkcombo
    -ezan vakti pro plus apkcombo
    -ezan vakti pro apk maviay
    -ezan vakti pro reklamsiz maviay
    -ezan vakti pro plus maviay
    -ezan vakti pro apk google play
    -ezan vakti pro reklamsiz google play
    -ezan vakti pro plus google play
    -ezan vakti pro apk wear os support
    -ezan vakti pro reklamsiz wear os support
    -ezan vakti pro plus wear os support
    -ezan vakti pro apk prayer times
    -ezan vakti pro reklamsiz prayer times
    -ezan vakti pro plus prayer times
    -ezan vakti pro apk quran
    -ezan vakti pro reklamsiz quran
    -ezan vakti pro plus quran
    -ezan vakti pro apk islamic sticker
    -ezan vakti pro reklamsiz islamic sticker
    -ezan vakti pro plus islamic sticker
    -ezan vakti pro apk namazdayim modulu
    -ezan vakti pro reklamsiz namazdayim modulu
    -ezan vakti pro plus namazdayim modulu
    -ezan vakti pro apk ajanda modulu
    -ezan vakti pro reklamsiz ajanda modulu
    -ezan vakti pro plus ajanda modulu
    -ezan vakti pro apk cevsen
    -ezan vakti pro reklamsiz cevsen
    -ezan vakti pro plus cevsen
    -ezan vakti pro apk kuran meali
    -ezan vakti pro reklamsiz kuran meali
    -ezan vakti pro plus kuran meali
    -ezan vakti pro apk risale-i nur okuma programı
    -ezan vakti pro reklamsiz risale-i nur okuma programı
    -ezan vakti pro plus risale-i nur okuma programı
    -ezan vakti pro apk fazilet calendar
    -ezan vakti pro reklamsiz fazilet calendar
    -ezan vakti pro plus fazilet calendar
    -ezan vakti pro apk igmg
    -ezan vakti pro reklamsiz igmg

    -
      -
    1. Go to [this link](^1^), which will take you to the APKCombo website where you can download the APK file of Ezan Vakti Pro (REKLAMSIZ).
    2. -
    3. Click on the "Download APK" button and wait for the file to be downloaded on your device.
    4. -
    5. Once the file is downloaded, open it and tap on "Install". You might need to enable the installation from unknown sources in your device settings.
    6. -
    7. After the installation is complete, you can open the app and enjoy its features.
    8. -
    -

    Why choose Ezan Vakti Pro (REKLAMSIZ) over other prayer times apps?

    -

    There are many prayer times apps available on the market, but not all of them are as good as Ezan Vakti Pro (REKLAMSIZ). Here are some of the reasons why you should choose Ezan Vakti Pro (REKLAMSIZ) over other prayer times apps:

    -

    No ads or in-app purchases

    -

    One of the main advantages of Ezan Vakti Pro (REKLAMSIZ) is that it does not have any ads or in-app purchases. This means that you can use the app without any interruptions or distractions from annoying ads or pop-ups. You also do not have to pay any extra money to unlock any features or content. You can enjoy the full functionality of the app for free.

    -

    Accurate and customizable prayer times

    -

    Another reason why you should choose Ezan Vakti Pro (REKLAMSIZ) is that it provides accurate and customizable prayer times based on your location. You can adjust the calculation method, the angle of Fajr and Isha, and the daylight saving time according to your preference. You can also view the prayer times for different cities and countries, as well as the Hijri calendar and the Islamic events. You can also set reminders and notifications for each prayer time, as well as choose from different azan sounds and voices.

    -

    Quran, Qibla, and Masjid features

    -

    A third reason why you should choose Ezan Vakti Pro (REKLAMSIZ) is that it offers other features that enhance your Islamic experience, such as Quran, Qibla, and Masjid. You can read and listen to the Holy Quran, with recitations and translations in various languages. You can also bookmark your favorite verses and share them with others. You can also use the Qibla compass to find the direction of Mecca from your location. You can also use the Masjid feature to locate nearby mosques, view their details and photos, and get directions to them.

    -

    What are the benefits of using Ezan Vakti Pro (REKLAMSIZ)?

    -

    Using Ezan Vakti Pro (REKLAMSIZ) can bring you many benefits, such as:

    -

    Stay connected to your faith and prayers

    -

    By using Ezan Vakti Pro (REKLAMSIZ), you can stay connected to your faith and prayers, no matter where you are or what time it is. You can always know when it is time to pray, and how to perform your prayers correctly. You can also enrich your knowledge and understanding of Islam by reading and listening to the Quran, and learning about the Islamic events and history.

    -

    Enjoy a smooth and user-friendly interface

    -

    Ezan Vakti Pro (REKLAMSIZ) has a smooth and user-friendly interface that makes it easy to use and navigate. You can access all the features and settings with a few taps, and customize them according to your needs. You can also choose from different themes and colors to personalize your app. The app also supports multiple languages, including English, Turkish, Arabic, French, German, Spanish, Russian, Indonesian, Malay, Urdu, Hindi, Bengali, Persian, Kurdish, Bosnian, Albanian, Azerbaijani, Uzbek, Somali, Chinese, Japanese, Korean, Thai, Vietnamese, Filipino, Portuguese, Italian, Polish, Dutch, Swedish, Norwegian, Danish, Finnish.

    -

    Support the developer and the Muslim community

    -

    By using Ezan Vakti Pro (REKLAMSIZ), you are also supporting the developer and the Muslim community. The developer of this app is Maviay®, a Turkish company that specializes in Islamic apps and software. They have been developing high-quality apps for Muslims since 2009. By using their apps, you are helping them continue their work and improve their services. You are also supporting the Muslim community by spreading the word about this app and sharing it with your friends and family.

    -

    Conclusion

    -

    Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6 is a prayer times app that provides accurate and customizable prayer times based on your location, as well as other features such as Quran, Qibla, and Masjid. It is designed for Muslims who want to stay connected to their faith and prayers without any ads or in-app purchases. It also has a smooth and user-friendly interface that supports multiple languages and themes. By using this app, you can enjoy the benefits of staying connected to your faith and prayers, enjoying a smooth and user-friendly interface, and supporting the developer and the Muslim community. If you want to download and install Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6, you can follow the steps that we have provided in this article.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6:

    -

    Q: Is Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6 safe to download and install?

    -

    A: Yes, Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6 is safe to download and install. It does not contain any viruses, malware, or spyware. However, you should always download it from a trusted source, such as the APKCombo website that we have linked in this article.

    -

    Q: How can I update Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6?

    -

    A: You can update Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6 by downloading and installing the latest version from the same source that you downloaded it from. You can also check for updates within the app by going to the settings menu and tapping on "Check for updates".

    -

    Q: How can I contact the developer of Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6?

    -

    A: You can contact the developer of Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6 by sending an email to info@maviay.com or visiting their website at www.maviay.com. You can also follow them on social media platforms such as Facebook, Twitter, Instagram, and YouTube.

    -

    Q: How can I share my feedback or suggestions about Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6?

    -

    A: You can share your feedback or suggestions about Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6 by rating and reviewing the app on the Google Play Store or the App Store. You can also send your feedback or suggestions to the developer via email or social media.

    -

    Q: How can I support the developer of Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6?

    -

    A: You can support the developer of Ezan Vakti Pro (REKLAMSIZ) APK 5.2 6 by using their app regularly and sharing it with your friends and family. You can also donate to them via PayPal or Patreon, or buy their other apps such as Quran Majeed Pro, Hadith Collection Pro, Islamic Calendar Pro, etc.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Family Farm Adventure Hack Apk Discover the Secrets of the Island and Grow Your Farm.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Family Farm Adventure Hack Apk Discover the Secrets of the Island and Grow Your Farm.md deleted file mode 100644 index 5ae5b0c8911920b175984a33803964d7ac084007..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Family Farm Adventure Hack Apk Discover the Secrets of the Island and Grow Your Farm.md +++ /dev/null @@ -1,70 +0,0 @@ - -

    Family Farm Adventure Hack Apk: Is It Worth It?

    -

    Family Farm Adventure is a popular farming simulator game where you can harvest crops, explore islands, and build your own farm town. The game has a beautiful story, charming characters, and engaging gameplay. However, some players may find it hard to progress in the game due to the limited energy, coins, and gems. That's why some of them may resort to using a hack apk for Family Farm Adventure.

    -

    A hack apk is a modified version of the original game app that allows you to cheat or bypass some of the game's rules and restrictions. For example, a hack apk for Family Farm Adventure may give you unlimited energy, coins, and gems, or unlock all the features and islands in the game. Sounds tempting, right? But before you download and install a hack apk for Family Farm Adventure, you should know the benefits and risks of doing so. In this article, we will discuss both sides of the coin and give you some alternatives to using a hack apk for Family Farm Adventure.

    -

    family farm adventure hack apk


    Download Zip » https://ssurll.com/2uNW7p



    -

    Benefits of Using a Hack Apk for Family Farm Adventure

    -

    Using a hack apk for Family Farm Adventure may seem like a good idea if you want to enjoy the game without any limitations or frustrations. Here are some of the benefits that you may get from using a hack apk for Family Farm Adventure:

    -
      -
    • Unlimited energy. Energy is the most important resource in the game, as it allows you to perform actions such as harvesting crops, clearing obstacles, and exploring islands. However, energy is also very scarce and regenerates slowly. With a hack apk for Family Farm Adventure, you can have unlimited energy and do whatever you want in the game without waiting or spending real money.
    • -
    • Unlimited coins and gems. Coins and gems are the currencies in the game that you can use to buy items, decorations, animals, and more. You can earn coins and gems by completing tasks, selling products, or watching ads. However, coins and gems are also very expensive and hard to come by. With a hack apk for Family Farm Adventure, you can have unlimited coins and gems and buy anything you want in the game without grinding or spending real money.
    • -
    • Unlock all features and islands. The game has many features and islands that you can unlock as you progress in the story. However, some of these features and islands require certain levels, items, or quests to be completed. With a hack apk for Family Farm Adventure, you can unlock all the features and islands in the game without meeting any requirements or completing any quests.
    • -
    -

    Risks of Using a Hack Apk for Family Farm Adventure

    -

    Using a hack apk for Family Farm Adventure may seem like a fun and easy way to play the game, but it also comes with many risks and drawbacks. Here are some of the risks that you may face from using a hack apk for Family Farm Adventure:

    -
      -
    • Malware and viruses. A hack apk for Family Farm Adventure is not an official app from the game developer, but rather an unauthorized app from an unknown source. This means that you cannot trust its quality or safety. A hack apk for Family Farm Adventure may contain malware or viruses that can harm your device or steal your personal information. You should always be careful when downloading apps from unknown sources.
    • -
    • Account suspension or ban. A hack apk for Family Farm Adventure is not allowed by the game developer, but rather a violation of the game's terms of service. This means that you are cheating or hacking the game. The game developer has the right to detect and punish any players who use a hack apk for Family Farm Adventure. You may face account suspension or ban if you are caught using a hack apk for Family Farm Adventure. This means that you will lose access to your game account and all your game progress and data.
    • -
    • Loss of game progress and data. A hack apk for Family Farm Adventure may not be compatible with the latest version of the game or the game server. This means that you may encounter errors, glitches, or crashes when playing the game. A hack apk for Family Farm Adventure may also overwrite or corrupt your game data. This means that you may lose your game progress and data, such as your level, items, coins, gems, and more.
    • -
    -

    Alternatives to Using a Hack Apk for Family Farm Adventure

    -

    Using a hack apk for Family Farm Adventure is not the only way to enjoy the game. There are other ways to play the game efficiently and effectively without cheating or risking your device, account, or data. Here are some of the alternatives to using a hack apk for Family Farm Adventure:

    -
      -
    • Tips and tricks to play the game efficiently and effectively. There are many tips and tricks that you can learn and apply to play the game better. For example, you can plan ahead your actions, prioritize your tasks, use boosters wisely, collect daily rewards, complete quests and achievements, join a club, and more. You can find many online guides and videos that can teach you how to play the game efficiently and effectively.
    • -
    • Official sources to get free energy, coins, and gems. There are many official sources that you can use to get free energy, coins, and gems in the game. For example, you can watch ads, invite friends, follow social media pages, participate in events and contests, and more. You can also use real money to buy energy, coins, and gems if you want to support the game developer and enjoy the game faster.
    • -
    • Legitimate ways to unlock features and islands. There are many legitimate ways that you can use to unlock features and islands in the game. For example, you can level up, collect items, complete quests, explore maps, and more. You can also use real money to unlock features and islands if you want to support the game developer and enjoy the game more.
    • -
    -

    Conclusion

    -

    Family Farm Adventure is a fun and relaxing game that you can play on your device. However, using a hack apk for Family Farm Adventure is not a good idea if you want to play the game safely and fairly. Using a hack apk for Family Farm Adventure may give you some benefits, such as unlimited energy, coins, gems, and features. But it also comes with many risks, such as malware, viruses, account suspension or ban, and loss of game progress and data. There are also other alternatives that you can use to play the game efficiently and effectively without cheating or risking anything. Therefore, we recommend that you avoid using a hack apk for Family Farm Adventure and enjoy the game as it is meant to be played.

    -

    We hope that this article has helped you understand more about the topic of "family farm adventure hack apk". If you have any feedback or comments, please feel free to share them with us. Thank you for reading!

    -

    FAQs

    -

    Here are some of the frequently asked questions about "family farm adventure hack apk":

    -

    family farm adventure mod apk unlimited energy
    -family farm adventure hack apk download free
    -family farm adventure cheat apk latest version
    -family farm adventure mod apk gems and coins
    -family farm adventure hack apk no root
    -family farm adventure mod apk offline
    -family farm adventure hack apk android 1
    -family farm adventure mod apk revdl
    -family farm adventure cheat apk no verification
    -family farm adventure mod apk 2023
    -family farm adventure hack apk ios
    -family farm adventure mod apk unlimited everything
    -family farm adventure cheat apk online
    -family farm adventure mod apk obb
    -family farm adventure hack apk pure
    -family farm adventure mod apk rexdl
    -family farm adventure cheat apk unlimited money
    -family farm adventure mod apk happymod
    -family farm adventure hack apk 1.23.101
    -family farm adventure mod apk getmodsapk[^1^]
    -family farm adventure cheat apk 2023
    -family farm adventure mod apk android oyun club
    -family farm adventure hack apk latest
    -family farm adventure mod apk an1
    -family farm adventure cheat apk download link
    -family farm adventure mod apk vip unlocked
    -family farm adventure hack apk for pc
    -family farm adventure mod apk platinmods
    -family farm adventure cheat apk without survey
    -family farm adventure mod apk unlimited keys

    - - - - - - -
    Q: What is Family Farm Adventure?A: Family Farm Adventure is a popular farming simulator game where you can harvest crops, explore islands, and build your own farm town.
    Q: What is a hack apk?A: A hack apk is a modified version of the original game app that allows you to cheat or bypass some of the game's rules and restrictions.
    Q: What are the benefits of using a hack apk for Family Farm Adventure?A: Some of the benefits of using a hack apk for Family Farm Adventure are unlimited energy, coins, gems, and features.
    Q: What are the risks of using a hack apk for Family Farm Adventure?A: Some of the risks of using a hack apk for Family Farm Adventure are malware, viruses, account suspension or ban, and loss of game progress and data.
    Q: What are the alternatives to using a hack apk for Family Farm Adventure?A: Some of the alternatives to using a hack apk for Family Farm Adventure are tips and tricks to play the game efficiently and effectively, official sources to get free energy, coins, and gems, and legitimate ways to unlock features and islands.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/sirmews/url-summarizer-playground/app.py b/spaces/sirmews/url-summarizer-playground/app.py deleted file mode 100644 index 808fc7f6c8b9998891f70e54dd990252f715584b..0000000000000000000000000000000000000000 --- a/spaces/sirmews/url-summarizer-playground/app.py +++ /dev/null @@ -1,110 +0,0 @@ -import streamlit as st -import pandas as pd -from scraper import website_scraper -from openai_summarizer import openai_summarizer -from feature_flags import supabase_feature_flag, openai_feature_flag, kor_feature_flag -#from authentication import send_magic_link, verify_token, get_current_user -#from supabase_init import initialize_supabase - -# def show_auth_page(): -# st.title("Supabase Magic Link Authentication Example") - -# email = st.text_input("Enter your email address to receive a Magic Link") - -# if email: -# response = send_magic_link(email) -# if response.error: -# st.error(f"Error sending Magic Link: {response.error}") -# else: -# st.success("Magic Link sent!") - -# token = st.text_input("Enter the token from the Magic Link") - -# if token: -# response = verify_token(token) -# if response.error: -# st.error(f"Error verifying token: {response.error}") -# else: -# st.session_state.authenticated = True -# st.session_state.show_main_page = True - -async def a_download_html(new_url: str, extra_sleep: int) -> str: - """Download an HTML from a URL. - - In some pathological cases, an extra sleep period may be needed. - """ - - async with async_playwright() as p: - browser = await p.chromium.launch() - page = await browser.new_page() - await page.goto(url, wait_until="load") - if extra_sleep: - await asyncio.sleep(extra_sleep) - html_content = await page.content() - await browser.close() - return html_content - -def show_alt_page(): - - new_url = st.text_input('ALT URL', '') - - -def show_main_page(): - st.title("Welcome to the Main Page") - st.subheader("Add URLs here") - - # Check feature flags - supabase_flag = supabase_feature_flag() - openai_flag = openai_feature_flag() - - # Get OpenAI API key input if the feature flag is enabled - if openai_flag: - openai_api_key = st.text_input('OPENAI KEY', '') - - # Add a dropdown menu to choose the model - model = st.selectbox("Model", options=["text-davinci-002", "text-davinci-003"], index=0) - else: - openai_api_key = None - - # Get website URL input - url = st.text_input('URL', '') - - if url and openai_api_key: - # Display a loading bar while the scraper is running - with st.spinner('Scraping and analyzing the website...'): - headline, content, reference_urls, opengraph_metadata = website_scraper(url) - summarized_text, tags = openai_summarizer(content, openai_api_key, model) - - # Create a JSON output with the given structure - json_output = { - "title": headline, - 'opengraph': opengraph_metadata, - "summary": summarized_text, - "tags": tags - } - - # Display the JSON output in Streamlit - st.json(json_output) - - # Display reference URLs in a table - st.markdown("## Reference URLs") - st.table(pd.DataFrame({"URL": reference_urls})) - - # if st.button("Logout"): - # st.session_state.authenticated = False - # st.session_state.show_main_page = False - -show_main_page() - -show_alt_page() - -# if "authenticated" not in st.session_state: -# st.session_state.authenticated = False - -# if "show_main_page" not in st.session_state: -# st.session_state.show_main_page = False - -# if not st.session_state.authenticated: -# show_auth_page() -# elif st.session_state.show_main_page: -# show_main_page() diff --git a/spaces/siya02/Konakni-TTS/ttsv/utils/inference/run_gradio.py b/spaces/siya02/Konakni-TTS/ttsv/utils/inference/run_gradio.py deleted file mode 100644 index f548a6633497b7c54f537dddec53796ff576316b..0000000000000000000000000000000000000000 --- a/spaces/siya02/Konakni-TTS/ttsv/utils/inference/run_gradio.py +++ /dev/null @@ -1,60 +0,0 @@ -import gradio as gr -import argparse -import numpy as np -from argparse import Namespace -from .advanced_tts import load_all_models, run_tts_paragraph - - -def hit_tts(textbox, gender, slider_noise_scale, slider_length_sclae, choice_transliteration, choice_number_conversion, choice_split_sentences): - inputs_to_gradio = {'text' : textbox, - 'gender' : gender, - 'noise_scale': slider_noise_scale, - 'length_scale': slider_length_sclae, - 'transliteration' : 1 if choice_transliteration else 0, - 'number_conversion' : 1 if choice_number_conversion else 0, - 'split_sentences' : 1 if choice_split_sentences else 0 - } - - args = Namespace(**inputs_to_gradio) - args.wav = None - args.lang = lang - args.gender = gender - - if args.text: - sr, audio = run_tts_paragraph(args) - return (sr, audio) - -def build_gradio(args): - global lang - lang = args.lang - load_all_models(args) - textbox = gr.inputs.Textbox(placeholder="Enter Text to run", default="", label="Enter Input Text") - gender = gr.inputs.Radio(choices = ['Female'], default='Female', label='Gender') - slider_noise_scale = gr.inputs.Slider(minimum=0, maximum=1.0, step=0.001, default=0.667, label='Noise Scale') - slider_length_sclae = gr.inputs.Slider(minimum=0, maximum=2.0, step=0.1, default=1.0, label='Length Scale') - - choice_transliteration = gr.inputs.Checkbox(default=True, label="Transliteration") - choice_number_conversion = gr.inputs.Checkbox(default=True, label="Number Conversion") - choice_split_sentences = gr.inputs.Checkbox(default=True, label="Split Sentences") - - examples = [['भारत म्हजो देश आनी म्हाका एक भारतीय जाल्ल्याचो अभिमान आसा.', 'Female', 0.667, 1, 0, 1, 1]] - - op = gr.outputs.Audio(type="numpy", label=None) - - inputs_to_gradio = [textbox, gender, slider_noise_scale, slider_length_sclae, choice_transliteration, choice_number_conversion, choice_split_sentences] - iface = gr.Interface(fn=hit_tts, examples = examples, inputs=inputs_to_gradio, outputs=op, theme='huggingface', title='Konkani TTS', article = 'Note: Transliteration models may not work well in some scenarios which can hamper the TTS quality, to evaluate the model in better sense it is advisable to provide input in the required language and switch off transliteration. Contact @harveenchadha on twitter for any issues.') - iface.launch(enable_queue=True) - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-a", "--acoustic", required=True, type=str) - parser.add_argument("-v", "--vocoder", required=True, type=str) - parser.add_argument("-d", "--device", type=str, default="cpu") - parser.add_argument("-L", "--lang", type=str, required=True) - - global lang - - args = parser.parse_args() - lang = args.lang - - build_gradio(args) \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/data/data_utils/mask_utils.py b/spaces/skf15963/summary/fengshen/data/data_utils/mask_utils.py deleted file mode 100644 index 0009f00272bf6feff1dbd491153332584cb431e1..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/data/data_utils/mask_utils.py +++ /dev/null @@ -1,285 +0,0 @@ -import collections - -import numpy as np - -MaskedLmInstance = collections.namedtuple("MaskedLmInstance", - ["index", "label"]) - - -def is_start_piece(piece): - """Check if the current word piece is the starting piece (BERT).""" - # When a word has been split into - # WordPieces, the first token does not have any marker and any subsequence - # tokens are prefixed with ##. So whenever we see the ## token, we - # append it to the previous set of word indexes. - return not piece.startswith("##") - - -def create_masked_lm_predictions(tokens, - vocab_id_list, vocab_id_to_token_dict, - masked_lm_prob, - cls_id, sep_id, mask_id, - max_predictions_per_seq, - np_rng, - max_ngrams=3, - do_whole_word_mask=True, - favor_longer_ngram=False, - do_permutation=False, - geometric_dist=False, - masking_style="bert", - zh_tokenizer=None): - """Creates the predictions for the masked LM objective. - Note: Tokens here are vocab ids and not text tokens.""" - ''' - modified from Megatron-LM - Args: - tokens: 输入 - vocab_id_list: 词表token_id_list - vocab_id_to_token_dict: token_id到token字典 - masked_lm_prob:mask概率 - cls_id、sep_id、mask_id:特殊token - max_predictions_per_seq:最大mask个数 - np_rng:mask随机数 - max_ngrams:最大词长度 - do_whole_word_mask:是否做全词掩码 - favor_longer_ngram:优先用长的词 - do_permutation:是否打乱 - geometric_dist:用np_rng.geometric做随机 - masking_style:mask类型 - zh_tokenizer:WWM的分词器,比如用jieba.lcut做分词之类的 - ''' - cand_indexes = [] - # Note(mingdachen): We create a list for recording if the piece is - # the starting piece of current token, where 1 means true, so that - # on-the-fly whole word masking is possible. - token_boundary = [0] * len(tokens) - # 如果没有指定中文分词器,那就直接按##算 - if zh_tokenizer is None: - for (i, token) in enumerate(tokens): - if token == cls_id or token == sep_id: - token_boundary[i] = 1 - continue - # Whole Word Masking means that if we mask all of the wordpieces - # corresponding to an original word. - # - # Note that Whole Word Masking does *not* change the training code - # at all -- we still predict each WordPiece independently, softmaxed - # over the entire vocabulary. - if (do_whole_word_mask and len(cand_indexes) >= 1 and - not is_start_piece(vocab_id_to_token_dict[token])): - cand_indexes[-1].append(i) - else: - cand_indexes.append([i]) - if is_start_piece(vocab_id_to_token_dict[token]): - token_boundary[i] = 1 - else: - # 如果指定了中文分词器,那就先用分词器分词,然后再进行判断 - # 获取去掉CLS SEP的原始文本 - raw_tokens = [] - for t in tokens: - if t != cls_id and t != sep_id: - raw_tokens.append(t) - raw_tokens = [vocab_id_to_token_dict[i] for i in raw_tokens] - # 分词然后获取每次字开头的最长词的长度 - word_list = set(zh_tokenizer(''.join(raw_tokens), HMM=True)) - word_length_dict = {} - for w in word_list: - if len(w) < 1: - continue - if w[0] not in word_length_dict: - word_length_dict[w[0]] = len(w) - elif word_length_dict[w[0]] < len(w): - word_length_dict[w[0]] = len(w) - i = 0 - # 从词表里面检索 - while i < len(tokens): - token_id = tokens[i] - token = vocab_id_to_token_dict[token_id] - if len(token) == 0 or token_id == cls_id or token_id == sep_id: - token_boundary[i] = 1 - i += 1 - continue - word_max_length = 1 - if token[0] in word_length_dict: - word_max_length = word_length_dict[token[0]] - j = 0 - word = '' - word_end = i+1 - # 兼容以前##的形式,如果后面的词是##开头的,那么直接把后面的拼到前面当作一个词 - old_style = False - while word_end < len(tokens) and vocab_id_to_token_dict[tokens[word_end]].startswith('##'): - old_style = True - word_end += 1 - if not old_style: - while j < word_max_length and i+j < len(tokens): - cur_token = tokens[i+j] - word += vocab_id_to_token_dict[cur_token] - j += 1 - if word in word_list: - word_end = i+j - cand_indexes.append([p for p in range(i, word_end)]) - token_boundary[i] = 1 - i = word_end - - output_tokens = list(tokens) - - masked_lm_positions = [] - masked_lm_labels = [] - - if masked_lm_prob == 0: - return (output_tokens, masked_lm_positions, - masked_lm_labels, token_boundary) - - num_to_predict = min(max_predictions_per_seq, - max(1, int(round(len(tokens) * masked_lm_prob)))) - - ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64) - if not geometric_dist: - # Note(mingdachen): - # By default, we set the probilities to favor shorter ngram sequences. - pvals = 1. / np.arange(1, max_ngrams + 1) - pvals /= pvals.sum(keepdims=True) - if favor_longer_ngram: - pvals = pvals[::-1] - # 获取一个ngram的idx,对于每个word,记录他的ngram的word - ngram_indexes = [] - for idx in range(len(cand_indexes)): - ngram_index = [] - for n in ngrams: - ngram_index.append(cand_indexes[idx:idx + n]) - ngram_indexes.append(ngram_index) - - np_rng.shuffle(ngram_indexes) - - (masked_lms, masked_spans) = ([], []) - covered_indexes = set() - for cand_index_set in ngram_indexes: - if len(masked_lms) >= num_to_predict: - break - if not cand_index_set: - continue - # Note(mingdachen): - # Skip current piece if they are covered in lm masking or previous ngrams. - for index_set in cand_index_set[0]: - for index in index_set: - if index in covered_indexes: - continue - - if not geometric_dist: - n = np_rng.choice(ngrams[:len(cand_index_set)], - p=pvals[:len(cand_index_set)] / - pvals[:len(cand_index_set)].sum(keepdims=True)) - else: - # Sampling "n" from the geometric distribution and clipping it to - # the max_ngrams. Using p=0.2 default from the SpanBERT paper - # https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1) - n = min(np_rng.geometric(0.2), max_ngrams) - - index_set = sum(cand_index_set[n - 1], []) - n -= 1 - # Note(mingdachen): - # Repeatedly looking for a candidate that does not exceed the - # maximum number of predictions by trying shorter ngrams. - while len(masked_lms) + len(index_set) > num_to_predict: - if n == 0: - break - index_set = sum(cand_index_set[n - 1], []) - n -= 1 - # If adding a whole-word mask would exceed the maximum number of - # predictions, then just skip this candidate. - if len(masked_lms) + len(index_set) > num_to_predict: - continue - is_any_index_covered = False - for index in index_set: - if index in covered_indexes: - is_any_index_covered = True - break - if is_any_index_covered: - continue - for index in index_set: - covered_indexes.add(index) - masked_token = None - token_id = tokens[index] - if masking_style == "bert": - # 80% of the time, replace with [MASK] - if np_rng.random() < 0.8: - masked_token = mask_id - else: - # 10% of the time, keep original - if np_rng.random() < 0.5: - masked_token = tokens[index] - # 10% of the time, replace with random word - else: - masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))] - elif masking_style == "t5": - masked_token = mask_id - else: - raise ValueError("invalid value of masking style") - - output_tokens[index] = masked_token - masked_lms.append(MaskedLmInstance(index=index, label=token_id)) - - masked_spans.append(MaskedLmInstance( - index=index_set, - label=[tokens[index] for index in index_set])) - - assert len(masked_lms) <= num_to_predict - np_rng.shuffle(ngram_indexes) - - select_indexes = set() - if do_permutation: - for cand_index_set in ngram_indexes: - if len(select_indexes) >= num_to_predict: - break - if not cand_index_set: - continue - # Note(mingdachen): - # Skip current piece if they are covered in lm masking or previous ngrams. - for index_set in cand_index_set[0]: - for index in index_set: - if index in covered_indexes or index in select_indexes: - continue - - n = np.random.choice(ngrams[:len(cand_index_set)], - p=pvals[:len(cand_index_set)] / - pvals[:len(cand_index_set)].sum(keepdims=True)) - index_set = sum(cand_index_set[n - 1], []) - n -= 1 - - while len(select_indexes) + len(index_set) > num_to_predict: - if n == 0: - break - index_set = sum(cand_index_set[n - 1], []) - n -= 1 - # If adding a whole-word mask would exceed the maximum number of - # predictions, then just skip this candidate. - if len(select_indexes) + len(index_set) > num_to_predict: - continue - is_any_index_covered = False - for index in index_set: - if index in covered_indexes or index in select_indexes: - is_any_index_covered = True - break - if is_any_index_covered: - continue - for index in index_set: - select_indexes.add(index) - assert len(select_indexes) <= num_to_predict - - select_indexes = sorted(select_indexes) - permute_indexes = list(select_indexes) - np_rng.shuffle(permute_indexes) - orig_token = list(output_tokens) - - for src_i, tgt_i in zip(select_indexes, permute_indexes): - output_tokens[src_i] = orig_token[tgt_i] - masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i])) - - masked_lms = sorted(masked_lms, key=lambda x: x.index) - # Sort the spans by the index of the first span - masked_spans = sorted(masked_spans, key=lambda x: x.index[0]) - - for p in masked_lms: - masked_lm_positions.append(p.index) - masked_lm_labels.append(p.label) - return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans) diff --git a/spaces/sklearn-docs/Detection-Error-Tradeoff-Curve/README.md b/spaces/sklearn-docs/Detection-Error-Tradeoff-Curve/README.md deleted file mode 100644 index 119335c7273a77c0c9dd86290894b13c9fbdb234..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/Detection-Error-Tradeoff-Curve/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Detection Error Tradeoff Curve -emoji: 🏢 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sklearn-docs/Manifold-Learning-methods-on-a-severed-sphere/app.py b/spaces/sklearn-docs/Manifold-Learning-methods-on-a-severed-sphere/app.py deleted file mode 100644 index 0b230f8c66054d2e78e2b16edb5c926afae9610f..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/Manifold-Learning-methods-on-a-severed-sphere/app.py +++ /dev/null @@ -1,184 +0,0 @@ -# Author: Jaques Grobler -# License: BSD 3 clause - -from time import time -import numpy as np -import matplotlib.pyplot as plt -from matplotlib.ticker import NullFormatter -from sklearn import manifold -from sklearn.utils import check_random_state -import plotly.graph_objects as go -import gradio as gr -from matplotlib import style -plt.switch_backend("agg") -style.use('ggplot') - -n_neighbors = 10 -n_samples = 1000 - -font1 = {'family':'DejaVu Sans','size':10, 'color':'white'} - -def sphere(n_neighbors, n_samples): - - # Create our sphere. - random_state = check_random_state(0) - p = random_state.rand(n_samples) * (2 * np.pi - 0.55) - t = random_state.rand(n_samples) * np.pi - - # Sever the poles from the sphere. - indices = (t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))) - colors = p[indices] - x, y, z = ( - np.sin(t[indices]) * np.cos(p[indices]), - np.sin(t[indices]) * np.sin(p[indices]), - np.cos(t[indices]), - ) - - sphere_data = np.array([x, y, z]).T - - return x, y, z, colors, sphere_data - -x, y, z, colors, sphere_data = sphere(n_neighbors, n_samples) - -def create_3D_plot(n_neighbors = n_neighbors, n_samples = n_samples): - - x, y, z, colors = sphere(n_neighbors, n_samples)[:4] - - # Create the trace for the scatter plot - scatter_trace = go.Scatter3d( - x=x, - y=y, - z=z, - mode='markers', - marker=dict( - size=5, - color=colors, - colorscale='rainbow', - showscale=False - ) - ) - - # Create the figure and add the trace - fig = go.Figure() - fig.add_trace(scatter_trace) - - return fig - -# Perform Locally Linear Embedding Manifold learning -methods = {"LLE":"standard", "LTSA":"ltsa" , - 'Hessian LLE':'hessian', "Modified LLE":"modified"} - -available = ["LLE", "LTSA",'Hessian LLE',"Modified LLE", - "Isomap","MDS","Spectral Embedding", "t-SNE"] - -def make_plot(method, methods = methods): - - # Plot our dataset. - fig1 = plt.figure(figsize=(10, 6), facecolor = 'none', dpi = 200) - plt.title( - "Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), - pad = 20, bbox=dict(boxstyle="round,pad=0.3",color = "#6366F1"), - fontdict = font1, size = 16 - ) - - if method in methods.keys(): - t0 = time() - trans_data = ( - manifold.LocallyLinearEmbedding( - n_neighbors=n_neighbors, n_components=2, method=methods[method] - ) - .fit_transform(sphere_data) - .T - ) - t1 = time() - title = "%s: %.2g sec" % (method, t1 - t0) - - elif method == "Isomap": - # Perform Isomap Manifold learning. - t0 = time() - trans_data = ( - manifold.Isomap(n_neighbors=n_neighbors, n_components=2) - .fit_transform(sphere_data) - .T - ) - t1 = time() - title = "%s: %.2g sec" % ("ISO", t1 - t0) - - elif method == "MDS": - # Perform Multi-dimensional scaling. - t0 = time() - mds = manifold.MDS(2, max_iter=100, n_init=1, normalized_stress="auto") - trans_data = mds.fit_transform(sphere_data).T - t1 = time() - title = "MDS: %.2g sec" % (t1 - t0) - - elif method == "Spectral Embedding": - # Perform Spectral Embedding. - t0 = time() - se = manifold.SpectralEmbedding(n_components=2, n_neighbors=n_neighbors) - trans_data = se.fit_transform(sphere_data).T - t1 = time() - title = "Spectral Embedding: %.2g sec" % (t1 - t0) - - elif method == "t-SNE": - # Perform t-distributed stochastic neighbor embedding. - t0 = time() - tsne = manifold.TSNE(n_components=2, random_state=0) - trans_data = tsne.fit_transform(sphere_data).T - t1 = time() - title = "t-SNE: %.2g sec" % (t1 - t0) - - ax = fig1.add_subplot() - ax.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) - - ax.tick_params(axis='x', which='both', bottom=False, labelbottom=False) # Hide x-axis tick labels - ax.tick_params(axis='y', which='both', left=False, labelleft=False) # Hide y-axis tick labels - - return fig1, title - -made ="""
    -

    Made with ❤

    """ - -link = """
    - -Demo is based on this script from scikit-learn documentation""" - -intro = """

    🤗 Manifold Learning methods on a severed sphere 🤗

    -""" -desc = """

    An application of the different -Manifold Learning techniques on a spherical data-set. Here one can see the use of dimensionality reduction in order to gain some intuition regarding the manifold learning methods. Regarding the dataset, the poles are cut from the sphere, as well as a thin slice down its side. This enables the manifold learning techniques to ‘spread it open’ whilst projecting it onto two dimensions. -

    -For a similar example, where the methods are applied to the S-curve dataset, see -Comparison of Manifold Learning methods. -

    -Note that the purpose of the -MDS is to find a low-dimensional representation of the data (here 2D) in which the distances respect well the distances in the original high-dimensional space, unlike other manifold-learning algorithms, it does not seeks an isotropic representation of the data in the low-dimensional space. Here the manifold problem matches fairly that of representing a flat map of the Earth, as with -map projection. -

    -""" - -with gr.Blocks(theme = gr.themes.Soft( - primary_hue="amber", - secondary_hue="orange", - neutral_hue="teal", - font=[gr.themes.GoogleFont('Inter'), 'ui-sans-serif', 'system-ui', 'sans-serif'],), title = "Manifold Learning methods on a severed sphere") as demo: - with gr.Column(): - gr.HTML(intro) - with gr.Accordion(label = "Description", open = True): - gr.HTML(desc) - with gr.Column(): - method = gr.Radio(available, label="Select method:", value= "LLE") - title = gr.Textbox(label = 'Time for the method to perform:') - with gr.Row(): - plot_3D = gr.Plot(label="3D projection of the sphere") - plot = gr.Plot(label="Plot") - - method.change(fn=make_plot, inputs = method, outputs=[plot, title]) - - demo.load(fn=make_plot, inputs = method, outputs=[plot, title]) - demo.load(fn=create_3D_plot, inputs = [], outputs=plot_3D) - gr.HTML(made) - gr.HTML(link) - -demo.launch() - diff --git a/spaces/sklearn-docs/SGD_Penalties/app.py b/spaces/sklearn-docs/SGD_Penalties/app.py deleted file mode 100644 index 17a41ba6f40c9fe73039ce9f1ea2e3eab91a7e37..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/SGD_Penalties/app.py +++ /dev/null @@ -1,79 +0,0 @@ -import gradio as gr -import numpy as np -import matplotlib.pyplot as plt - - -def plot_penalties(): - - - - - # Plot the results - plt.clf() - - - l1_color = 'r' # hard coded as color picker not working - l2_color = 'g' # hard coded as color picker not working - elastic_net_color = 'b' # hard coded as color picker not working - - line = np.linspace(-1.5, 1.5, 1001) - xx, yy = np.meshgrid(line, line) - - l2 = xx**2 + yy**2 - l1 = np.abs(xx) + np.abs(yy) - rho = 0.5 - elastic_net = rho * l1 + (1 - rho) * l2 - fig = plt.figure(figsize=(10, 10), dpi=100) - - ax = plt.gca() - - elastic_net_contour = plt.contour( - xx, yy, elastic_net, levels=[1], colors=elastic_net_color - ) - l2_contour = plt.contour(xx, yy, l2, levels=[1], colors=l2_color) - l1_contour = plt.contour(xx, yy, l1, levels=[1], colors=l1_color) - ax.set_aspect("equal") - ax.spines["left"].set_position("center") - ax.spines["right"].set_color("none") - ax.spines["bottom"].set_position("center") - ax.spines["top"].set_color("none") - - plt.clabel( - elastic_net_contour, - inline=1, - fontsize=18, - fmt={1.0: "elastic-net"}, - manual=[(-1, -1)],) - plt.clabel(l2_contour, inline=1, fontsize=18, fmt={1.0: "L2"}, manual=[(-1, -1)]) - plt.clabel(l1_contour, inline=1, fontsize=18, fmt={1.0: "L1"}, manual=[(-1, -1)]) - - plt.tight_layout() - # plt.show() - return fig - - - - -title = "SGD Penalties" - - -with gr.Blocks(title=title) as demo: - gr.Markdown(f"# {title}") - gr.Markdown( - """ - ### The plot shows the contours of L1, L2 and Elastic Net regularizers. - ### The value of penalties is equal to 1 in all of them. - ### L2 regularizer is used for linear SVM models, L1 and elastic net brings sparsity in the models - ### SGDClassifier and SGDRegressor support all of the above. - """) - - gr.Markdown(" **[Demo is based on sklearn docs](https://scikit-learn.org/stable/auto_examples/linear_model/plot_sgd_penalties.html#sphx-glr-auto-examples-linear-model-plot-sgd-penalties-py)**") - - - - btn = gr.Button(value="Visualize SGD penalties") - btn.click(plot_penalties, outputs= gr.Plot() ) # - - - -demo.launch() \ No newline at end of file diff --git a/spaces/skyxx/skyxxChat/Dockerfile b/spaces/skyxx/skyxxChat/Dockerfile deleted file mode 100644 index 335c2dba28ba8c365de9306858462a59dea25f28..0000000000000000000000000000000000000000 --- a/spaces/skyxx/skyxxChat/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -COPY requirements_advanced.txt . -RUN pip install --user -r requirements.txt -# RUN pip install --user -r requirements_advanced.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/snjyor/You_Say_I_Draw/app.py b/spaces/snjyor/You_Say_I_Draw/app.py deleted file mode 100644 index 8a8ca8fe37878724c4e6607bf6df7407c39d186b..0000000000000000000000000000000000000000 --- a/spaces/snjyor/You_Say_I_Draw/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import os -import hashlib -import gradio as gr -import openai -import requests -from pygtrans import Translate -ABS_PATH = os.path.join(os.path.dirname(__file__)) -openai.api_key = os.getenv("api_key") -header = { - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36" -} - - -def text_to_image(text, length=1024, width=1024): - text = chinese_to_english(text) - try: - img_res = openai.Image.create( - prompt=text, - n=2, - size=f"{length}x{width}" - ) - url1 = img_res.get("data")[0].get("url") - url2 = img_res.get("data")[1].get("url") - open(os.path.join(ABS_PATH, f"images/{md5(url1)}.jpg"), "wb").write( - requests.get(img_res.get("data")[0].get("url"), headers=header).content) - except Exception as err: - print(err) - url1 = err - url2 = err - return url1, url2 - -def md5(text): - md = hashlib.md5() - md.update(text.encode("utf-8")) - return md.hexdigest() - -def chinese_to_english(text, target="en"): - went_wrong = False - try: - client = Translate() - text = client.translate(text, target=target) - except Exception as err: - print(err) - went_wrong = True - if went_wrong: - return text - return text.translatedText - - -app = gr.Interface( - fn=text_to_image, - inputs=[ - gr.Text(lines=3, placeholder="可输入中文关键词", label="图片描述"), - gr.Slider(minimum=512, maximum=2560, step=1, label="长", value=512), - gr.Slider(minimum=512, maximum=2560, step=1, label="宽", value=512), - ], - outputs=[ - gr.Text(lines=6), - gr.Text(lines=6) - ], - examples="examples", - allow_flagging="never" -) -app.launch() - diff --git a/spaces/sophiamyang/panel_example/README.md b/spaces/sophiamyang/panel_example/README.md deleted file mode 100644 index e650b4908c28fa62eca7719b9077cb437c81bcd2..0000000000000000000000000000000000000000 --- a/spaces/sophiamyang/panel_example/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Panel Example -emoji: 🔥 -colorFrom: yellow -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sparanoid/milky-green-sovits-4/vdecoder/__init__.py b/spaces/sparanoid/milky-green-sovits-4/vdecoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/spiritupbro/Voice-Cloning/app.py b/spaces/spiritupbro/Voice-Cloning/app.py deleted file mode 100644 index 98689cad4e3bddea3bb3190f8dd39ce76e44803f..0000000000000000000000000000000000000000 --- a/spaces/spiritupbro/Voice-Cloning/app.py +++ /dev/null @@ -1,164 +0,0 @@ -from turtle import title -import gradio as gr - -import git -import os -os.system('git clone https://github.com/Edresson/Coqui-TTS -b multilingual-torchaudio-SE TTS') -os.system('pip install -q -e TTS/') -os.system('pip install -q torchaudio==0.9.0') - -import sys -TTS_PATH = "TTS/" - -# add libraries into environment -sys.path.append(TTS_PATH) # set this if TTS is not installed globally - -import os -import string -import time -import argparse -import json - -import numpy as np -import IPython -from IPython.display import Audio - - -import torch - -from TTS.tts.utils.synthesis import synthesis -from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols -try: - from TTS.utils.audio import AudioProcessor -except: - from TTS.utils.audio import AudioProcessor - - -from TTS.tts.models import setup_model -from TTS.config import load_config -from TTS.tts.models.vits import * - -OUT_PATH = 'out/' - -# create output path -os.makedirs(OUT_PATH, exist_ok=True) - -# model vars -MODEL_PATH = '/home/user/app/best_model_latest.pth.tar' -CONFIG_PATH = '/home/user/app/config.json' -TTS_LANGUAGES = "/home/user/app/language_ids.json" -TTS_SPEAKERS = "/home/user/app/speakers.json" -USE_CUDA = torch.cuda.is_available() - -# load the config -C = load_config(CONFIG_PATH) - - -# load the audio processor -ap = AudioProcessor(**C.audio) - -speaker_embedding = None - -C.model_args['d_vector_file'] = TTS_SPEAKERS -C.model_args['use_speaker_encoder_as_loss'] = False - -model = setup_model(C) -model.language_manager.set_language_ids_from_file(TTS_LANGUAGES) -# print(model.language_manager.num_languages, model.embedded_language_dim) -# print(model.emb_l) -cp = torch.load(MODEL_PATH, map_location=torch.device('cpu')) -# remove speaker encoder -model_weights = cp['model'].copy() -for key in list(model_weights.keys()): - if "speaker_encoder" in key: - del model_weights[key] - -model.load_state_dict(model_weights) - - -model.eval() - -if USE_CUDA: - model = model.cuda() - -# synthesize voice -use_griffin_lim = False - -os.system('pip install -q pydub ffmpeg-normalize') - -CONFIG_SE_PATH = "config_se.json" -CHECKPOINT_SE_PATH = "SE_checkpoint.pth.tar" - -from TTS.tts.utils.speakers import SpeakerManager -from pydub import AudioSegment -import librosa - -SE_speaker_manager = SpeakerManager(encoder_model_path=CHECKPOINT_SE_PATH, encoder_config_path=CONFIG_SE_PATH, use_cuda=USE_CUDA) - -def compute_spec(ref_file): - y, sr = librosa.load(ref_file, sr=ap.sample_rate) - spec = ap.spectrogram(y) - spec = torch.FloatTensor(spec).unsqueeze(0) - return spec - - - -def greet(Text,Voicetoclone,VoiceMicrophone): - text= "%s" % (Text) - if Voicetoclone is not None: - reference_files= "%s" % (Voicetoclone) - print("path url") - print(Voicetoclone) - sample= str(Voicetoclone) - else: - reference_files= "%s" % (VoiceMicrophone) - print("path url") - print(VoiceMicrophone) - sample= str(VoiceMicrophone) - size= len(reference_files)*sys.getsizeof(reference_files) - size2= size / 1000000 - if (size2 > 0.012) or len(text)>2000: - message="File is greater than 30mb or Text inserted is longer than 2000 characters. Please re-try with smaller sizes." - print(message) - raise SystemExit("File is greater than 30mb. Please re-try or Text inserted is longer than 2000 characters. Please re-try with smaller sizes.") - else: - os.system('ffmpeg-normalize $sample -nt rms -t=-27 -o $sample -ar 16000 -f') - reference_emb = SE_speaker_manager.compute_d_vector_from_clip(reference_files) - model.length_scale = 1 # scaler for the duration predictor. The larger it is, the slower the speech. - model.inference_noise_scale = 0.3 # defines the noise variance applied to the random z vector at inference. - model.inference_noise_scale_dp = 0.3 # defines the noise variance applied to the duration predictor z vector at inference. - text = text - model.language_manager.language_id_mapping - language_id = 0 - - print(" > text: {}".format(text)) - wav, alignment, _, _ = synthesis( - model, - text, - C, - "cuda" in str(next(model.parameters()).device), - ap, - speaker_id=None, - d_vector=reference_emb, - style_wav=None, - language_id=language_id, - enable_eos_bos_chars=C.enable_eos_bos_chars, - use_griffin_lim=True, - do_trim_silence=False, - ).values() - print("Generated Audio") - IPython.display.display(Audio(wav, rate=ap.sample_rate)) - file_name = text.replace(" ", "_") - file_name = file_name.translate(str.maketrans('', '', string.punctuation.replace('_', ''))) + '.wav' - out_path = os.path.join(OUT_PATH, file_name) - print(" > Saving output to {}".format(out_path)) - ap.save_wav(wav, out_path) - return out_path - -demo = gr.Interface( - fn=greet, - inputs=[gr.inputs.Textbox(label='What would you like the voice to say? (max. 2000 characters per request)'),gr.Audio(type="filepath", source="upload",label='Please upload a voice to clone (max. 30mb)'),gr.Audio(source="microphone", type="filepath", streaming=True)], - outputs="audio", - title="Bilal's Voice Cloning Tool" - ) -demo.launch() \ No newline at end of file diff --git a/spaces/stamps-labs/stamp2vec/pipelines/detection/yolo_v8.py b/spaces/stamps-labs/stamp2vec/pipelines/detection/yolo_v8.py deleted file mode 100644 index ecef268e4d6e7906568db10c92a7782564725592..0000000000000000000000000000000000000000 --- a/spaces/stamps-labs/stamp2vec/pipelines/detection/yolo_v8.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import Any -from huggingface_hub import hf_hub_download -import torchvision -from torchvision.transforms import ToTensor -import torch - -class Yolov8Pipeline: - def __init__(self): - self.model = None - self.transform = ToTensor() - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - - @classmethod - def from_pretrained(cls, model_path_hf: str = None, filename_hf: str = "weights.pt", local_model_path: str = None): - yolo = cls() - if model_path_hf is not None and filename_hf is not None: - yolo.model = torch.jit.load(hf_hub_download(model_path_hf, filename=filename_hf), map_location='cpu') - elif local_model_path is not None: - yolo.model = torch.jit.load(local_model_path) - return yolo - - def __call__(self, image, nms_threshold: float = 0.45, conf_threshold: float = 0.15): - shape = torch.tensor(image.size) - coef = torch.hstack((shape, shape)) / 640 - img = image.convert("RGB").resize((640, 640)) - img_tensor = self.transform(img).unsqueeze(0).to(self.device) - pred, boxes, scores = self.model(img_tensor, conf_thres = conf_threshold) - selected = torchvision.ops.nms(boxes, scores, nms_threshold) - predictions_new = list() - for i in selected: - #remove prob and class - pred_i = torch.Tensor(pred[i][:4]) - #Loop through coordinates - for j in range(4): - #If any are negative, map to 0 - if pred_i[j] < 0: - pred_i[j] = 0 - #multiply by coef - pred_i *= coef - predictions_new.append(pred_i) - predictions_new = torch.stack(predictions_new, dim=0) - return predictions_new \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Ab Tumhare Hawale Watan Sathiyo Movie Download In Hindi Mp4 Movies UPDATED.md b/spaces/stomexserde/gpt4-ui/Examples/Ab Tumhare Hawale Watan Sathiyo Movie Download In Hindi Mp4 Movies UPDATED.md deleted file mode 100644 index 6cb6dccff42cf71393a9144c16999b1958d7529c..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Ab Tumhare Hawale Watan Sathiyo Movie Download In Hindi Mp4 Movies UPDATED.md +++ /dev/null @@ -1,14 +0,0 @@ - -

    Ab Tumhare Hawale Watan Sathiyo Movie Download In Hindi Mp4 Movies

    -

    Ab Tumhare Hawale Watan Sathiyo is a 2004 Indian war drama film directed by Anil Sharma and Sanjay Sharma. The film stars Amitabh Bachchan, Akshay Kumar, Bobby Deol and Divya Khosla Kumar in the lead roles. The film tells the story of Kunal (Bobby Deol), a young man who reluctantly joins the army to follow his family's tradition and ends up fighting against terrorists who want to disrupt the peace between India and Pakistan.

    -

    Ab Tumhare Hawale Watan Sathiyo Movie Download In Hindi Mp4 Movies


    DOWNLOAD ✦✦✦ https://urlgoal.com/2uI6bA



    -

    The film was released on 24 December 2004 and received mixed reviews from critics. The film was praised for its patriotic theme, action sequences and performances of Bachchan and Kumar, but criticized for its length, melodrama and historical inaccuracies. The film was a moderate success at the box office, earning ₹29.5 crore against a budget of ₹40 crore.

    -

    If you want to watch or download Ab Tumhare Hawale Watan Sathiyo movie in Hindi mp4 format, you can visit the link below[^2^]. However, please be aware that downloading or streaming movies from unauthorized sources may be illegal and unethical. We do not endorse or promote any such websites or services. We recommend you to watch the movie legally from official platforms or cinemas.

    The movie has a complex plot that spans three generations of an army family. The first part of the movie shows the love story of Major Amarjeet Singh (Amitabh Bachchan) and his wife Shweta (Divya Khosla Kumar) during the Indo-Pakistani War of 1971. Amarjeet is captured by the Pakistani army and presumed dead. Shweta gives birth to their son Vikramjeet (Akshay Kumar) and raises him as a single mother.

    -

    The second part of the movie shows Vikramjeet's life as an army officer who falls in love with a journalist named Shilpa (Sandali Sinha). Vikramjeet is assigned to a mission to capture a terrorist leader named Rana (Akshay Kumar in a double role) who is responsible for several attacks on India. Vikramjeet succeeds in capturing Rana but is killed in a bomb blast.

    -

    The third part of the movie shows Kunal's life as a college student who is not interested in joining the army. He meets Sakshi (Divya Khosla Kumar in a double role), who is Shweta's niece and Amarjeet's granddaughter. They fall in love and decide to get married. However, Kunal learns that he has to join the army as per his family's tradition and he reluctantly agrees. He is posted at the same base where Vikramjeet was killed and where Amarjeet is still alive as a prisoner of war. Kunal has to face Rana, who has escaped from custody and is planning to launch a nuclear attack on India.

    -

    The movie ends with a climactic battle between Kunal and Rana, where Kunal manages to stop Rana's plan and kill him. He also rescues Amarjeet from captivity and reunites him with his family. Kunal realizes the value of serving his country and decides to continue his army career.

    Ab Tumhare Hawale Watan Sathiyo is a movie that tries to portray the sacrifices and bravery of the Indian army. The movie has a patriotic message and a star-studded cast. The movie also has some memorable songs composed by Anu Malik and sung by Udit Narayan, Alka Yagnik, Sonu Nigam and others. Some of the popular songs are "Humein Tumse Hua Hai Pyar", "Mujhe Pyar Do", "Chali Aa Chali Aa" and the title song "Ab Tumhare Hawale Watan Sathiyo".

    -

    -

    However, the movie also has several flaws that make it a tedious watch. The movie is too long, running for over three hours. The movie has too many subplots and characters that are not well-developed or connected. The movie has many scenes that are unrealistic, illogical or clichéd. The movie also has some historical errors and inaccuracies that may offend some viewers. For example, the movie shows Amarjeet being captured in 1971 and remaining in captivity for 33 years, which is impossible as the war ended in two weeks and all prisoners of war were exchanged by 1972. The movie also shows Rana having access to a nuclear bomb and a missile launcher, which is highly improbable.

    -

    Overall, Ab Tumhare Hawale Watan Sathiyo is a movie that may appeal to some fans of action and patriotism, but may disappoint others who expect a more realistic and engaging story. The movie is not a masterpiece of cinema, but rather a typical Bollywood masala film that tries to cater to all kinds of audiences.

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Divya Shakti Movie 3gp Free Download.md b/spaces/stomexserde/gpt4-ui/Examples/Divya Shakti Movie 3gp Free Download.md deleted file mode 100644 index 9b93ff48846eef77c8dbe04049c53cf9e2ceab37..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Divya Shakti Movie 3gp Free Download.md +++ /dev/null @@ -1,31 +0,0 @@ - -

    How to Download Divya Shakti Movie in 3GP Format for Free

    -

    Divya Shakti is a 1993 action drama movie starring Ajay Devgan, Raveena Tandon and Amrish Puri. It is directed by Sameer Malkan and produced by Dinesh Patel. The movie tells the story of Prashant Verma, a reporter who fights against corruption and injustice in Mumbai[^1^].

    -

    If you want to watch this movie on your mobile device, you can download it in 3GP format for free from various websites. 3GP is a multimedia container format that can store video and audio streams. It is compatible with most mobile phones and has a small file size.

    -

    Divya Shakti movie 3gp free download


    Download File ✪✪✪ https://urlgoal.com/2uI89K



    -

    However, before you download any movie from the internet, you should be aware of the risks involved. Some websites may contain malware, viruses or spyware that can harm your device or steal your personal information. Some websites may also violate the copyright laws and infringe on the rights of the movie makers.

    -

    Therefore, you should always use a trusted and legal source to download movies online. One such source is IMDb TV, which is a free streaming service that offers thousands of movies and TV shows. You can watch Divya Shakti on IMDb TV with ads or download it for offline viewing.

    -

    -

    To download Divya Shakti movie in 3GP format for free from IMDb TV, you need to follow these steps:

    -
      -
    1. Go to https://www.imdb.com/tv/ and sign up for a free account or log in with your existing account.
    2. -
    3. Search for Divya Shakti in the search bar or browse through the categories.
    4. -
    5. Select the movie and click on the "Watch Now" button.
    6. -
    7. Choose the quality and format of the video. You can select 3GP as the format and low or medium as the quality.
    8. -
    9. Click on the "Download" button and choose a location to save the file on your device.
    10. -
    11. Enjoy watching Divya Shakti movie in 3GP format for free.
    12. -
    -

    We hope this article helped you to download Divya Shakti movie in 3GP format for free. If you have any questions or feedback, please leave a comment below.

    - -

    Divya Shakti is a movie that showcases the transformation of a timid and cowardly man into a brave and fearless hero. Ajay Devgan delivers a powerful performance as Prashant Verma, who faces many challenges and enemies in his quest for justice. Raveena Tandon plays the role of Priya, Prashant's love interest and support system. Amrish Puri plays the role of Tau, the main antagonist and a ruthless gangster who rules Mumbai with an iron fist.

    -

    The movie has many memorable scenes and dialogues that have become iconic in Bollywood. Some of them are:

    -
      -
    • The scene where Prashant confronts Tau and his men in a temple and declares that he is not afraid of anyone.
    • -
    • The scene where Prashant saves Priya from a bomb blast and confesses his love for her.
    • -
    • The scene where Prashant fights against Tau's henchmen in a warehouse and uses various weapons and tactics to defeat them.
    • -
    • The dialogue where Prashant says "Main tumhe maarna nahi chahta, main tumhe jeena nahi doonga" (I don't want to kill you, I won't let you live).
    • -
    • The dialogue where Tau says "Yeh Mumbai hai, yahan koi bhi apni marzi se nahi jeeta" (This is Mumbai, no one lives here as they please).
    • -
    -

    Divya Shakti is a movie that appeals to the masses and the classes alike. It has a gripping storyline, thrilling action sequences, melodious songs and emotional moments. It is a movie that celebrates the spirit of courage, honesty and love. It is a movie that you should not miss.

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download ((LINK)) Gta Vice City Mamaia Tpbl.md b/spaces/stomexserde/gpt4-ui/Examples/Download ((LINK)) Gta Vice City Mamaia Tpbl.md deleted file mode 100644 index a358bbb3ca9e5f4af3d6905129a1d1d8ba01f4cd..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Download ((LINK)) Gta Vice City Mamaia Tpbl.md +++ /dev/null @@ -1,27 +0,0 @@ - -

    How to Download GTA Vice City Mamaia, a Romanian Mod for the Classic Game

    -

    GTA Vice City is one of the most popular games in the Grand Theft Auto series, released in 2002 by Rockstar Games. The game is set in a fictional version of Miami in the 1980s, where the player controls Tommy Vercetti, a former mafia member who tries to build his own criminal empire.

    -

    Download Gta Vice City Mamaia Tpbl


    Download Filehttps://urlgoal.com/2uIa8w



    -

    But what if you want to experience a different version of Vice City, with Romanian cars, buildings, and characters? That's where GTA Vice City Mamaia comes in. This is a mod that transforms the original game into a Romanian-themed one, with new vehicles, textures, sounds, and missions. The mod was created by a team of Romanian fans who wanted to pay tribute to their country and culture.

    -

    If you are interested in downloading GTA Vice City Mamaia, here are the steps you need to follow:

    -
      -
    1. First, you need to have the original GTA Vice City game installed on your PC. You can buy it from Rockstar Games or other online platforms.
    2. -
    3. Next, you need to download the GTA Vice City Mamaia mod from this link. The file size is about 177 MB and it contains everything you need to install the mod.
    4. -
    5. After downloading the mod, you need to extract it using a program like WinRAR or 7-Zip. You will get a folder called "GTA Mamaia Vice".
    6. -
    7. Then, you need to copy the contents of this folder and paste them into your GTA Vice City game directory, where you installed the original game. You will be asked to overwrite some files, so click "Yes" or "Replace".
    8. -
    9. Finally, you can launch the game and enjoy GTA Vice City Mamaia. You will see a new intro screen with the mod logo and some Romanian music. You can also access new missions from various locations around the city.
    10. -
    -

    GTA Vice City Mamaia is a fun and unique mod that adds a lot of variety and humor to the classic game. You can drive Dacia cars, visit famous landmarks like the Palace of Parliament or the Black Sea coast, and interact with Romanian characters and celebrities. The mod also has some Easter eggs and references to Romanian culture and history that you can discover along the way.

    -

    If you want to see more of GTA Vice City Mamaia, you can watch some gameplay videos on YouTube, such as this one by Stringer or this one by Stringerino. You can also visit this page for more information and screenshots of the mod.

    -

    - -

    GTA Vice City Mamaia is not the only mod that changes the game's setting and atmosphere. There are many other mods that offer different scenarios and locations for GTA Vice City, such as:

    -
      -
    • GTA Long Night: A mod that turns Vice City into a zombie apocalypse, where the player has to survive and fight against hordes of undead creatures.
    • -
    • GTA Vice City BTTF Hill Valley: A mod that adds the time-traveling DeLorean from the Back to the Future movies, allowing the player to visit different eras and alter the course of history.
    • -
    • GTA Vice City - Ultimate Vice City: A mod that enhances the original game with new cars, weapons, buildings, and features, such as a working train and a helicopter.
    • -
    -

    These are just some examples of the many mods that exist for GTA Vice City. You can find more mods on websites like GTA Garage or GTA Gaming. Mods are a great way to extend the lifespan and replay value of the game, as well as to explore new possibilities and experiences.

    -

    However, before installing any mod, you should always make a backup of your game files and read the instructions carefully. Some mods may not be compatible with each other or with your game version. Some mods may also contain viruses or malware, so you should always scan them with an antivirus program before using them. Installing mods is at your own risk, so be careful and responsible.

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Gathers Navigation System Wx 034c Program Disk.rar.md b/spaces/stomexserde/gpt4-ui/Examples/Gathers Navigation System Wx 034c Program Disk.rar.md deleted file mode 100644 index e21db6f188b77f4d1de8321aca36d67f256903bb..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Gathers Navigation System Wx 034c Program Disk.rar.md +++ /dev/null @@ -1,37 +0,0 @@ -
    -

    How to Update Your Gathers Navigation System with a Program Disk

    -

    Gathers Navigation System is a vehicle telematics service offered by Honda to drivers in Japan. It provides mobile connectivity for on-demand traffic information services and internet provided maps displayed inside selected Honda vehicles. The service is also known as Internavi or HondaLink in other regions.

    -

    gathers navigation system wx 034c program disk.rar


    Download ===> https://urlgoal.com/2uI9yc



    -

    If you have a Gathers Navigation System installed in your Honda vehicle, you may need to update it periodically with a program disk. A program disk is a file that contains the latest software and map data for your navigation system. You can download a program disk from the internet or get one from your dealer.

    -

    One of the program disks available online is the wx 034c program disk.rar. This file is a compressed archive that contains the program disk for the Gathers Navigation System model VXH-072CV. This model is compatible with older vehicles that were not sold with Internavi installed, but can be retrofitted with the system by visiting any Honda dealer or Honda Access accessories retailer.

    -

    To update your Gathers Navigation System with the wx 034c program disk.rar, you need to follow these steps:

    -
      -
    1. Download the wx 034c program disk.rar file from a reliable source. You can find it on some torrent sites or online forums, but be careful of viruses and malware. Alternatively, you can ask your dealer for a copy of the file.
    2. -
    3. Extract the wx 034c program disk.rar file using a software like WinRAR or 7-Zip. You will get a folder named wx 034c that contains several files and subfolders.
    4. -
    5. Copy the wx 034c folder to an SD card that has at least 8 GB of free space. Make sure the SD card is formatted in FAT32 file system.
    6. -
    7. Insert the SD card into the slot of your Gathers Navigation System unit. Turn on your vehicle and wait for the system to boot up.
    8. -
    9. The system will automatically detect the SD card and prompt you to update the software and map data. Follow the on-screen instructions and wait for the update process to complete. Do not turn off your vehicle or remove the SD card during the update.
    10. -
    11. When the update is finished, you will see a confirmation message on the screen. Remove the SD card and restart your vehicle. Your Gathers Navigation System is now updated with the latest software and map data.
    12. -
    -

    Congratulations! You have successfully updated your Gathers Navigation System with the wx 034c program disk.rar. Enjoy your improved navigation experience and drive safely!

    -

    - -

    Benefits of Updating Your Gathers Navigation System

    -

    Updating your Gathers Navigation System with the wx 034c program disk.rar has many benefits for you and your vehicle. Here are some of them:

    -
      -
    • You will get the most accurate and up-to-date map data for your region. This will help you avoid traffic jams, road closures, tolls, and other obstacles that may affect your travel time and fuel efficiency.
    • -
    • You will get the latest software features and enhancements for your navigation system. This will improve the performance, stability, and security of your system. You will also enjoy new functions and options that will make your navigation experience more convenient and enjoyable.
    • -
    • You will extend the lifespan and compatibility of your navigation system. By updating your system regularly, you will prevent it from becoming obsolete or incompatible with newer models or technologies. You will also avoid potential problems or errors that may arise from outdated software or map data.
    • -
    -

    As you can see, updating your Gathers Navigation System with the wx 034c program disk.rar is a smart and worthwhile decision. You will not only save time and money, but also enhance your safety and comfort on the road.

    - -

    How to Get More Information and Support for Your Gathers Navigation System

    -

    If you have any questions or issues regarding your Gathers Navigation System or the wx 034c program disk.rar, you can get more information and support from various sources. Here are some of them:

    -
      -
    • You can visit the official website of Honda or Internavi to get more details about the features and specifications of your navigation system. You can also download manuals, guides, and other resources that will help you use your system effectively.
    • -
    • You can contact your dealer or Honda Access retailer to get more assistance or advice about your navigation system. They can also provide you with the latest program disks or other accessories that you may need for your system.
    • -
    • You can join online forums or communities where other users of Gathers Navigation System share their experiences and tips. You can also ask questions, give feedback, or exchange ideas with other users who have similar interests or issues.
    • -
    -

    By getting more information and support for your Gathers Navigation System, you will be able to maximize its potential and enjoy its benefits. You will also be able to connect with other users who share your passion for navigation and driving.

    81aa517590
    -
    -
    \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/examples/search_google.py b/spaces/sub314xxl/MetaGPT/examples/search_google.py deleted file mode 100644 index df45c29eaf8106c8f42260cba8c15e92d27bc1fc..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/examples/search_google.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/7 18:32 -@Author : alexanderwu -@File : search_google.py -@Modified By: mashenquan, 2023-8-9, fix-bug: cannot find metagpt module. -""" - -import asyncio -from pathlib import Path -import sys -sys.path.append(str(Path(__file__).resolve().parent.parent)) -from metagpt.roles import Searcher - - -async def main(): - await Searcher().run("What are some good sun protection products?") - - -if __name__ == '__main__': - asyncio.run(main()) diff --git a/spaces/sukiru/BlueArchiveTTS/README.md b/spaces/sukiru/BlueArchiveTTS/README.md deleted file mode 100644 index cc0de72eaca5422b6027a67fe491c0a1fe0db6db..0000000000000000000000000000000000000000 --- a/spaces/sukiru/BlueArchiveTTS/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: BlueArchiveTTS -emoji: 📉 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: ORI-Muchim/BlueArchiveTTS ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/sunshineatnoon/TextureScraping/swapae/models/networks/pyramidnet.py b/spaces/sunshineatnoon/TextureScraping/swapae/models/networks/pyramidnet.py deleted file mode 100644 index 277a9ccea936baed3a81a6e9bc59c297162ced9f..0000000000000000000000000000000000000000 --- a/spaces/sunshineatnoon/TextureScraping/swapae/models/networks/pyramidnet.py +++ /dev/null @@ -1,229 +0,0 @@ -# Original code: https://github.com/dyhan0920/PyramidNet-PyTorch/blob/master/PyramidNet.py - -import torch -import torch.nn as nn -import math - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - outchannel_ratio = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.bn1 = nn.BatchNorm2d(inplanes) - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn2 = nn.BatchNorm2d(planes) - self.conv2 = conv3x3(planes, planes) - self.bn3 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - - out = self.bn1(x) - out = self.conv1(out) - out = self.bn2(out) - out = self.relu(out) - out = self.conv2(out) - out = self.bn3(out) - if self.downsample is not None: - shortcut = self.downsample(x) - featuremap_size = shortcut.size()[2:4] - else: - shortcut = x - featuremap_size = out.size()[2:4] - - batch_size = out.size()[0] - residual_channel = out.size()[1] - shortcut_channel = shortcut.size()[1] - - if residual_channel != shortcut_channel: - padding = torch.autograd.Variable(torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0], featuremap_size[1]).fill_(0)) - out += torch.cat((shortcut, padding), 1) - else: - out += shortcut - - return out - - -class Bottleneck(nn.Module): - outchannel_ratio = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16): - super(Bottleneck, self).__init__() - self.bn1 = nn.BatchNorm2d(inplanes) - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, (planes), kernel_size=3, stride=stride, padding=1, bias=False, groups=1) - self.bn3 = nn.BatchNorm2d((planes)) - self.conv3 = nn.Conv2d((planes), planes * Bottleneck.outchannel_ratio, kernel_size=1, bias=False) - self.bn4 = nn.BatchNorm2d(planes * Bottleneck.outchannel_ratio) - self.relu = nn.ReLU(inplace=True) - - self.downsample = downsample - self.stride = stride - - def forward(self, x): - - out = self.bn1(x) - out = self.conv1(out) - - out = self.bn2(out) - out = self.relu(out) - out = self.conv2(out) - - out = self.bn3(out) - out = self.relu(out) - out = self.conv3(out) - - out = self.bn4(out) - if self.downsample is not None: - shortcut = self.downsample(x) - featuremap_size = shortcut.size()[2:4] - else: - shortcut = x - featuremap_size = out.size()[2:4] - - batch_size = out.size()[0] - residual_channel = out.size()[1] - shortcut_channel = shortcut.size()[1] - - if residual_channel != shortcut_channel: - padding = torch.autograd.Variable(torch.cuda.FloatTensor(batch_size, residual_channel - shortcut_channel, featuremap_size[0], featuremap_size[1]).fill_(0)) - out += torch.cat((shortcut, padding), 1) - else: - out += shortcut - - return out - - -class PyramidNet(nn.Module): - - def __init__(self, dataset, depth, alpha, num_classes, bottleneck=False): - super(PyramidNet, self).__init__() - self.dataset = dataset - if self.dataset.startswith('cifar'): - self.inplanes = 16 - if bottleneck == True: - n = int((depth - 2) / 9) - block = Bottleneck - else: - n = int((depth - 2) / 6) - block = BasicBlock - - self.addrate = alpha / (3*n*1.0) - - self.input_featuremap_dim = self.inplanes - self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim) - - self.featuremap_dim = self.input_featuremap_dim - self.layer1 = self.pyramidal_make_layer(block, n) - self.layer2 = self.pyramidal_make_layer(block, n, stride=2) - self.layer3 = self.pyramidal_make_layer(block, n, stride=2) - - self.final_featuremap_dim = self.input_featuremap_dim - self.bn_final= nn.BatchNorm2d(self.final_featuremap_dim) - self.relu_final = nn.ReLU(inplace=True) - self.avgpool = nn.AvgPool2d(8) - self.fc = nn.Linear(self.final_featuremap_dim, num_classes) - - elif dataset == 'imagenet': - blocks ={18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck} - layers ={18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]} - - if layers.get(depth) is None: - if bottleneck == True: - blocks[depth] = Bottleneck - temp_cfg = int((depth-2)/12) - else: - blocks[depth] = BasicBlock - temp_cfg = int((depth-2)/8) - - layers[depth]= [temp_cfg, temp_cfg, temp_cfg, temp_cfg] - print('=> the layer configuration for each stage is set to', layers[depth]) - - self.inplanes = 64 - self.addrate = alpha / (sum(layers[depth])*1.0) - - self.input_featuremap_dim = self.inplanes - self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.featuremap_dim = self.input_featuremap_dim - self.layer1 = self.pyramidal_make_layer(blocks[depth], layers[depth][0]) - self.layer2 = self.pyramidal_make_layer(blocks[depth], layers[depth][1], stride=2) - self.layer3 = self.pyramidal_make_layer(blocks[depth], layers[depth][2], stride=2) - self.layer4 = self.pyramidal_make_layer(blocks[depth], layers[depth][3], stride=2) - - self.final_featuremap_dim = self.input_featuremap_dim - self.bn_final= nn.BatchNorm2d(self.final_featuremap_dim) - self.relu_final = nn.ReLU(inplace=True) - self.avgpool = nn.AvgPool2d(7) - self.fc = nn.Linear(self.final_featuremap_dim, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def pyramidal_make_layer(self, block, block_depth, stride=1): - downsample = None - if stride != 1: # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio: - downsample = nn.AvgPool2d((2,2), stride = (2, 2), ceil_mode=True) - - layers = [] - self.featuremap_dim = self.featuremap_dim + self.addrate - layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample)) - for i in range(1, block_depth): - temp_featuremap_dim = self.featuremap_dim + self.addrate - layers.append(block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1)) - self.featuremap_dim = temp_featuremap_dim - self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio - - return nn.Sequential(*layers) - - def forward(self, x): - if self.dataset == 'cifar10' or self.dataset == 'cifar100': - x = self.conv1(x) - x = self.bn1(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - - x = self.bn_final(x) - x = self.relu_final(x) - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - elif self.dataset == 'imagenet': - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.bn_final(x) - x = self.relu_final(x) - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x diff --git a/spaces/supertori/files/stable-diffusion-webui/scripts/postprocessing_upscale.py b/spaces/supertori/files/stable-diffusion-webui/scripts/postprocessing_upscale.py deleted file mode 100644 index ccec72fcbc72eeffbe24a659bf53ecba71162391..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/scripts/postprocessing_upscale.py +++ /dev/null @@ -1,131 +0,0 @@ -from PIL import Image -import numpy as np - -from modules import scripts_postprocessing, shared -import gradio as gr - -from modules.ui_components import FormRow - - -upscale_cache = {} - - -class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): - name = "Upscale" - order = 1000 - - def ui(self): - selected_tab = gr.State(value=0) - - with gr.Tabs(elem_id="extras_resize_mode"): - with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: - upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") - - with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: - with FormRow(): - upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") - upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") - upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") - - with FormRow(): - extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - - with FormRow(): - extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") - - tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) - tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) - - return { - "upscale_mode": selected_tab, - "upscale_by": upscaling_resize, - "upscale_to_width": upscaling_resize_w, - "upscale_to_height": upscaling_resize_h, - "upscale_crop": upscaling_crop, - "upscaler_1_name": extras_upscaler_1, - "upscaler_2_name": extras_upscaler_2, - "upscaler_2_visibility": extras_upscaler_2_visibility, - } - - def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop): - if upscale_mode == 1: - upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height) - info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}" - else: - info["Postprocess upscale by"] = upscale_by - - cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - cached_image = upscale_cache.pop(cache_key, None) - - if cached_image is not None: - image = cached_image - else: - image = upscaler.scaler.upscale(image, upscale_by, upscaler.data_path) - - upscale_cache[cache_key] = image - if len(upscale_cache) > shared.opts.upscaling_max_images_in_cache: - upscale_cache.pop(next(iter(upscale_cache), None), None) - - if upscale_mode == 1 and upscale_crop: - cropped = Image.new("RGB", (upscale_to_width, upscale_to_height)) - cropped.paste(image, box=(upscale_to_width // 2 - image.width // 2, upscale_to_height // 2 - image.height // 2)) - image = cropped - info["Postprocess crop to"] = f"{image.width}x{image.height}" - - return image - - def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0): - if upscaler_1_name == "None": - upscaler_1_name = None - - upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_1_name]), None) - assert upscaler1 or (upscaler_1_name is None), f'could not find upscaler named {upscaler_1_name}' - - if not upscaler1: - return - - if upscaler_2_name == "None": - upscaler_2_name = None - - upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None) - assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}' - - upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - pp.info[f"Postprocess upscaler"] = upscaler1.name - - if upscaler2 and upscaler_2_visibility > 0: - second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop) - upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility) - - pp.info[f"Postprocess upscaler 2"] = upscaler2.name - - pp.image = upscaled_image - - def image_changed(self): - upscale_cache.clear() - - -class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale): - name = "Simple Upscale" - order = 900 - - def ui(self): - with FormRow(): - upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) - upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2) - - return { - "upscale_by": upscale_by, - "upscaler_name": upscaler_name, - } - - def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None): - if upscaler_name is None or upscaler_name == "None": - return - - upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None) - assert upscaler1, f'could not find upscaler named {upscaler_name}' - - pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False) - pp.info[f"Postprocess upscaler"] = upscaler1.name diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Uttama Villain Movie Download In Tamilrockers Tamil TOP.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Uttama Villain Movie Download In Tamilrockers Tamil TOP.md deleted file mode 100644 index 0fe4d6b6fe4662dfe3a4d17777db8b876f00166b..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Uttama Villain Movie Download In Tamilrockers Tamil TOP.md +++ /dev/null @@ -1,19 +0,0 @@ - -

    Uttama Villain Movie Download in Tamilrockers Tamil: A Review

    -

    Uttama Villain is a 2015 Tamil comedy drama film starring Kamal Haasan, K. Balachander, K. Viswanath, and Jayaram. The film is directed by Ramesh Aravind and written by Kamal Haasan and Crazy Mohan. The film revolves around Manoranjan, a superstar who is diagnosed with a brain tumor and decides to make his last film a comedy with his mentor Margadarsi.

    -

    uttama villain movie download in tamilrockers tamil


    DOWNLOAD ✫✫✫ https://cinurl.com/2uEXqW



    -

    The film was released on May 2, 2015 and received positive reviews from critics and audiences. The film was praised for its performances, humor, music, and emotional depth. The film also features Kamal Haasan's real-life mentor K. Balachander in his last appearance on screen.

    -

    If you are looking for Uttama Villain movie download in Tamilrockers Tamil, you might be disappointed as the site is blocked by the government for piracy issues. However, you can watch the film legally on ZEE5, where it is available in full HD quality with subtitles. ZEE5 is a popular OTT platform that offers a wide range of movies, shows, and originals in various languages.

    -

    To watch Uttama Villain movie online on ZEE5, you need to subscribe to the platform with a monthly or annual plan. You can also enjoy a free trial for 14 days before deciding to pay. ZEE5 also has other features like offline download, live TV, and smart search.

    -

    So, what are you waiting for? Watch Uttama Villain movie online on ZEE5 and enjoy a hilarious and heartwarming film that showcases the talent and versatility of Kamal Haasan.

    -

    - -

    Uttama Villain movie has two parallel stories - one set in the present day and the other in the 8th century. In the present day, Manoranjan tries to reconcile with his son Manohar, who is angry with him for neglecting him and his mother. He also tries to bond with his daughter Manonmani, who is a doctor and a fan of his films. He introduces her to Margadarsi, who is like a father figure to him.

    -

    In the 8th century, Manoranjan plays Uttaman, a folk artist who is famous for his role as a villain in a street play. He is in love with Karpagavalli, a princess who is betrothed to a cruel king. Uttaman and Karpagavalli elope with the help of his friend Chokku Chettiar, but are pursued by the king's army. Along the way, they encounter various adventures and dangers.

    -

    The film explores the themes of life, death, love, art, and legacy through the two stories. It also pays tribute to the history and culture of Tamil cinema and theatre. The film has a stellar cast of veteran actors and actresses who deliver memorable performances. The film also has a rich musical score by Ghibran, who blends classical and modern elements.

    - -

    Uttama Villain movie is not just a comedy drama, but also a tribute to the history and culture of Tamil cinema and theatre. The film references many classic films and plays, such as Andha Naal, Avvaiyar, Thillana Mohanambal, and Manohara. The film also pays homage to the legends of Tamil cinema, such as Sivaji Ganesan, MGR, Nagesh, and of course, K. Balachander. The film also showcases the art of Theeyam, a ritual dance form from Kerala.

    -

    Uttama Villain movie is also a reflection of Kamal Haasan's own life and career. The film explores the themes of mortality, legacy, art, and love that are close to his heart. The film also showcases his versatility as an actor, writer, singer, and dancer. He plays two contrasting roles - one of a superstar who is facing his end and the other of a folk artist who is immortalized on screen. He also sings and dances in different styles and languages.

    -

    Uttama Villain movie is a film that will appeal to the fans of Kamal Haasan and Tamil cinema. It is a film that celebrates the life and art of a legend. It is a film that will make you laugh, cry, and think. It is a film that will stay with you long after it ends.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ys Memories Of Celceta Update V20180803-CODEX Crack Free [HOT].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ys Memories Of Celceta Update V20180803-CODEX Crack Free [HOT].md deleted file mode 100644 index 597416726929080f112f89ba7e9c1ff8d0a3712c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ys Memories Of Celceta Update V20180803-CODEX Crack Free [HOT].md +++ /dev/null @@ -1,26 +0,0 @@ -

    Ys Memories of Celceta Update v20180803-CODEX crack free


    Download Zip 🗹 https://cinurl.com/2uEYBJ



    - -He is promptly kidnapped by the Emperor, and taken to the remote city of Nelvil, where he discovers that he is not the only survivor of a terrible calamity, and that his comrades and fellow adventurers are trapped there as well. These are the members of the Celceta Expedition, a group of famous adventurers whose adventures are chronicled in a small, public journal called the "Gazette." Adol sets out to solve the mystery of what happened to them, and to reunite with the other survivors. It's a race against time and evil forces to uncover the truth of what happened to Adol's friends and companions, who may be dead, or captured, or just as likely, transformed into monsters by the foul creatures of this strange land. [Dungeon World, RPG]My boyfriend, P.J., has changed a lot in the last year. - -It’s been a pretty dramatic journey, full of ups and downs. Things could have gone a different way, but they haven’t. - -But a lot of it comes down to his relationship with his parents. When he was little, they were so different. His mom was much more traditional and his dad was not. It wasn’t until later that P.J. got to see them for who they really were, and those differences started to come out. - -They started getting along better and he had a lot of fun with his dad, but mom was never a fan. - -When P.J. told me that his mom didn’t like me, I asked him if that was why he’d been hanging out with me less. - -He said no, that it was because she kept telling him to go hang out with his friends instead, and that he didn’t want to make her mad. - -This happened a lot and I was always a little hurt. I never said anything, because I didn’t want to be the one to make things worse. - -But as we’ve gotten closer, I’ve realized how much he’s been struggling, because of how things with his parents have gone. - -He says that his mom has really changed since then. He’s seen her get into arguments with his friends about political issues, and her anger at him is getting worse. - -“I’m becoming a piece of shit,” he told me. - -One day when I asked 4fefd39f24
    -
    -
    -

    diff --git a/spaces/svdiff-library/SVDiff-Training-UI/app_training.py b/spaces/svdiff-library/SVDiff-Training-UI/app_training.py deleted file mode 100644 index 190b340d30754496caf9e58680c673a792f99842..0000000000000000000000000000000000000000 --- a/spaces/svdiff-library/SVDiff-Training-UI/app_training.py +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os - -import gradio as gr - -from constants import UploadTarget -from inference import InferencePipeline -from trainer import Trainer - - -def create_training_demo(trainer: Trainer, - pipe: InferencePipeline | None = None) -> gr.Blocks: - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - with gr.Box(): - gr.Markdown('Training Data') - instance_images = gr.Files(label='Instance images') - instance_prompt = gr.Textbox(label='Instance prompt', - max_lines=1) - gr.Markdown(''' - - Upload images of the style you are planning on training on. - - For an instance prompt, use a unique, made up word to avoid collisions. - ''') - with gr.Box(): - gr.Markdown('Output Model') - output_model_name = gr.Text(label='Name of your model', - max_lines=1) - delete_existing_model = gr.Checkbox( - label='Delete existing model of the same name', - value=False) - validation_prompt = gr.Text(label='Validation Prompt') - with gr.Box(): - gr.Markdown('Upload Settings') - with gr.Row(): - upload_to_hub = gr.Checkbox( - label='Upload model to Hub', value=True) - use_private_repo = gr.Checkbox(label='Private', - value=True) - delete_existing_repo = gr.Checkbox( - label='Delete existing repo of the same name', - value=False) - upload_to = gr.Radio( - label='Upload to', - choices=[_.value for _ in UploadTarget], - value=UploadTarget.SVDIFF_LIBRARY.value) - gr.Markdown(''' - - By default, trained models will be uploaded to [SVDiff-pytorch Library](https://huggingface.co/svdiff-library). - - You can also choose "Personal Profile", in which case, the model will be uploaded to https://huggingface.co/{your_username}/{model_name}. - ''') - - with gr.Box(): - gr.Markdown('Training Parameters') - with gr.Row(): - base_model = gr.Text( - label='Base Model', - value='runwayml/stable-diffusion-v1-5', - max_lines=1) - resolution = gr.Dropdown(choices=['512', '768'], - value='512', - label='Resolution') - num_training_steps = gr.Number( - label='Number of Training Steps', value=1000, precision=0) - learning_rate = gr.Number(label='Learning Rate', value=0.001) - gradient_accumulation = gr.Number( - label='Number of Gradient Accumulation', - value=1, - precision=0) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=100000, - step=1, - value=0) - fp16 = gr.Checkbox(label='FP16', value=False) - use_8bit_adam = gr.Checkbox(label='Use 8bit Adam', value=True) - gradient_checkpointing = gr.Checkbox(label='Use gradient checkpointing', value=True) - # enable_xformers_memory_efficient_attention = gr.Checkbox(label='Use xformers', value=True) - checkpointing_steps = gr.Number(label='Checkpointing Steps', - value=200, - precision=0) - use_wandb = gr.Checkbox(label='Use W&B', - value=False, - interactive=bool( - os.getenv('WANDB_API_KEY'))) - validation_epochs = gr.Number(label='Validation Epochs', - value=200, - precision=0) - gr.Markdown(''' - - The base model must be a model that is compatible with [diffusers](https://github.com/huggingface/diffusers) library. - - It takes a few minutes to download the base model first. - - You may want to try a small number of steps first, like 1, to see if everything works fine in your environment. - - You can check the training status by pressing the "Open logs" button if you are running this on your Space. - - You need to set the environment variable `WANDB_API_KEY` if you'd like to use [W&B](https://wandb.ai/site). See [W&B documentation](https://docs.wandb.ai/guides/track/advanced/environment-variables). - - **Note:** Due to [this issue](https://github.com/huggingface/accelerate/issues/944), currently, training will not terminate properly if you use W&B. - ''') - - remove_gpu_after_training = gr.Checkbox( - label='Remove GPU after training', - value=False, - interactive=bool(os.getenv('SPACE_ID')), - visible=False) - run_button = gr.Button('Start Training') - - with gr.Box(): - gr.Markdown('Output message') - output_message = gr.Markdown() - - if pipe is not None: - run_button.click(fn=pipe.clear) - run_button.click(fn=trainer.run, - inputs=[ - instance_images, - instance_prompt, - output_model_name, - delete_existing_model, - validation_prompt, - base_model, - resolution, - num_training_steps, - learning_rate, - gradient_accumulation, - seed, - fp16, - use_8bit_adam, - gradient_checkpointing, - # enable_xformers_memory_efficient_attention, - checkpointing_steps, - use_wandb, - validation_epochs, - upload_to_hub, - use_private_repo, - delete_existing_repo, - upload_to, - remove_gpu_after_training, - ], - outputs=output_message) - return demo - - -if __name__ == '__main__': - hf_token = os.getenv('HF_TOKEN') - trainer = Trainer(hf_token) - demo = create_training_demo(trainer) - demo.queue(max_size=1).launch(share=True, debug=True) diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/registry.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/registry.py deleted file mode 100644 index 39eabc58db4b5954478a2ac1ab91cea5e45ab055..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/bricks/registry.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from annotator.uniformer.mmcv.utils import Registry - -CONV_LAYERS = Registry('conv layer') -NORM_LAYERS = Registry('norm layer') -ACTIVATION_LAYERS = Registry('activation layer') -PADDING_LAYERS = Registry('padding layer') -UPSAMPLE_LAYERS = Registry('upsample layer') -PLUGIN_LAYERS = Registry('plugin layer') - -DROPOUT_LAYERS = Registry('drop out layers') -POSITIONAL_ENCODING = Registry('position encoding') -ATTENTION = Registry('attention') -FEEDFORWARD_NETWORK = Registry('feed-forward Network') -TRANSFORMER_LAYER = Registry('transformerLayer') -TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence') diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py deleted file mode 100644 index 88bb10d44026ba9f21756eaea9e550841cd59b9f..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/seg/sampler/ohem_pixel_sampler.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -import torch.nn.functional as F - -from ..builder import PIXEL_SAMPLERS -from .base_pixel_sampler import BasePixelSampler - - -@PIXEL_SAMPLERS.register_module() -class OHEMPixelSampler(BasePixelSampler): - """Online Hard Example Mining Sampler for segmentation. - - Args: - context (nn.Module): The context of sampler, subclass of - :obj:`BaseDecodeHead`. - thresh (float, optional): The threshold for hard example selection. - Below which, are prediction with low confidence. If not - specified, the hard examples will be pixels of top ``min_kept`` - loss. Default: None. - min_kept (int, optional): The minimum number of predictions to keep. - Default: 100000. - """ - - def __init__(self, context, thresh=None, min_kept=100000): - super(OHEMPixelSampler, self).__init__() - self.context = context - assert min_kept > 1 - self.thresh = thresh - self.min_kept = min_kept - - def sample(self, seg_logit, seg_label): - """Sample pixels that have high loss or with low prediction confidence. - - Args: - seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) - seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) - - Returns: - torch.Tensor: segmentation weight, shape (N, H, W) - """ - with torch.no_grad(): - assert seg_logit.shape[2:] == seg_label.shape[2:] - assert seg_label.shape[1] == 1 - seg_label = seg_label.squeeze(1).long() - batch_kept = self.min_kept * seg_label.size(0) - valid_mask = seg_label != self.context.ignore_index - seg_weight = seg_logit.new_zeros(size=seg_label.size()) - valid_seg_weight = seg_weight[valid_mask] - if self.thresh is not None: - seg_prob = F.softmax(seg_logit, dim=1) - - tmp_seg_label = seg_label.clone().unsqueeze(1) - tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0 - seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1) - sort_prob, sort_indices = seg_prob[valid_mask].sort() - - if sort_prob.numel() > 0: - min_threshold = sort_prob[min(batch_kept, - sort_prob.numel() - 1)] - else: - min_threshold = 0.0 - threshold = max(min_threshold, self.thresh) - valid_seg_weight[seg_prob[valid_mask] < threshold] = 1. - else: - losses = self.context.loss_decode( - seg_logit, - seg_label, - weight=None, - ignore_index=self.context.ignore_index, - reduction_override='none') - # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa - _, sort_indices = losses[valid_mask].sort(descending=True) - valid_seg_weight[sort_indices[:batch_kept]] = 1. - - seg_weight[valid_mask] = valid_seg_weight - - return seg_weight diff --git a/spaces/swhyuni/Digital-Financial-Advisory-for-Mutual-Funds/app.py b/spaces/swhyuni/Digital-Financial-Advisory-for-Mutual-Funds/app.py deleted file mode 100644 index 4e33db834fb956ae84cf0d9f773b275ab58f8f0a..0000000000000000000000000000000000000000 --- a/spaces/swhyuni/Digital-Financial-Advisory-for-Mutual-Funds/app.py +++ /dev/null @@ -1,542 +0,0 @@ -import streamlit as st -from streamlit_option_menu import option_menu -import yfinance as yf -import pandas as pd -import matplotlib.pyplot as plt -import matplotlib.dates as mdates -import seaborn as sns -import datetime -from PIL import Image - -st.set_page_config(page_title = 'Your personal Digital Financial Advisory for Mutual Funds',layout='wide', initial_sidebar_state='expanded') - -dfp = pd.read_csv(r'https://github.com/H8-Assignments-Bay/p2---final-project-ftds-016-rmt-group-002/raw/main/dataset_predict/Pasar%20Uang.csv') -dfb = pd.read_csv(r'https://github.com/H8-Assignments-Bay/p2---final-project-ftds-016-rmt-group-002/raw/main/dataset_predict/Bond.csv') -dfss = pd.read_csv(r'https://github.com/H8-Assignments-Bay/p2---final-project-ftds-016-rmt-group-002/raw/main/dataset_predict/Saham.csv') -dfl1 = pd.read_csv(r'dfl1.csv') -dfl2 = pd.read_csv(r'dfl2.csv') -dfl3 = pd.read_csv(r'dfl3.csv') -dfl4 = pd.read_csv(r'dfl4.csv') -dfl5 = pd.read_csv(r'dfl5.csv') -dfl6 = pd.read_csv(r'dfl6.csv') -dfl7 = pd.read_csv(r'dfl7.csv') -dfl8 = pd.read_csv(r'dfl8.csv') -dfl9 = pd.read_csv(r'dfl9.csv') -dfl10 = pd.read_csv(r'dfl10.csv') -dfb1 = pd.read_csv(r'dfb1.csv') -dfb2 = pd.read_csv(r'dfb2.csv') -dfb3 = pd.read_csv(r'dfb3.csv') -dfb4 = pd.read_csv(r'dfb4.csv') -dfb5 = pd.read_csv(r'dfb5.csv') -dfb6 = pd.read_csv(r'dfb6.csv') -dfb7 = pd.read_csv(r'dfb7.csv') -dfb8 = pd.read_csv(r'dfb8.csv') -dfb9 = pd.read_csv(r'dfb9.csv') -dfb10 = pd.read_csv(r'dfb10.csv') -dfs1 = pd.read_csv(r'dfs1.csv') -dfs2 = pd.read_csv(r'dfs2.csv') -dfs3 = pd.read_csv(r'dfs3.csv') -dfs4 = pd.read_csv(r'dfs4.csv') -dfs5 = pd.read_csv(r'dfs5.csv') -dfs6 = pd.read_csv(r'dfs6.csv') -dfs7 = pd.read_csv(r'dfs7.csv') -dfs8 = pd.read_csv(r'dfs8.csv') -dfs9 = pd.read_csv(r'dfs9.csv') -dfs10 = pd.read_csv(r'dfs10.csv') -dfl = pd.read_csv(r'https://github.com/H8-Assignments-Bay/p2---final-project-ftds-016-rmt-group-002/raw/main/dataset_predict/final_lowrisk.csv') -dfm = pd.read_csv(r'https://github.com/H8-Assignments-Bay/p2---final-project-ftds-016-rmt-group-002/raw/main/dataset_predict/final_medrisk.csv') -dfh = pd.read_csv(r'https://github.com/H8-Assignments-Bay/p2---final-project-ftds-016-rmt-group-002/raw/main/dataset_predict/Final_highrisk.csv') -dfs = pd.read_csv(r'https://github.com/H8-Assignments-Bay/p2---final-project-ftds-016-rmt-group-002/raw/main/dataset_predict/final_saham.csv') -dfl=dfl.drop(['Unnamed: 0'], axis = 1) -dfm=dfm.drop(['Unnamed: 0'], axis = 1) -dfh=dfh.drop(['Unnamed: 0'], axis = 1) -dfs=dfs.drop(['Unnamed: 0'], axis = 1) -aum_low = [11.38, 2.01, 2.29, 2.07, 2.96, 4.28, 2.62, 4.9, 9.49, 3.73] -dfl['AUM'] = aum_low -aum_med = [1.51, 1.51, 1.58, 4.76, 5.57, 0.85, 2.12, 14.56, 1.29, 15.92] -dfm['AUM'] = aum_med -aum_high = [4.83, 03.02, 1.72, 3.17, 1.38, 0.62, 1.13, 0.36, 0.98, 6.18] -dfh['AUM'] = aum_high - -dfm = dfm.round(decimals=2) - -ls1 = {'Manulife Dana Saham Kelas A':['ADRO','ASII','BBCA','BMRI','BBNI','BBRI','GOTO','MCAS','TLKM','UNTR'], - 'Batavia Dana Saham': ['BBRI','TLKM','BMRI','BBCA','BBNI','MDKA','ADRO','KLBF','MYOR','ASII'], - 'Sucroinvest Equity Fund':['ADRO','ASII','BBRI','BBTN','BUMI','MFIN','PGAS','MYOH','TLKM','EXCL'], - 'Manulife Saham Andalan':['ADRO','BBCA','BMRI','PNBN','BBRI','GOTO','MCAS','MDKA','PNLF','TLKM'], - 'BNI-AM Indeks IDX30':['ADRO','ASII','BBCA','BBNI','BBRI','BMRI','GOTO','MDKA','TLKM','UNTR'], - 'Sucroinvest Sharia Equity Fund':['HOKI','ENAK','CSMI','ECII','ENRG','KBLI','MNCN','PGAS','MYOH','SCCO'], - 'BNI-AM Inspiring Equity Fund ':['ADRO','ASII','BBCA','BBNI','BBRI','BMRI','EXCL','SMGR','TLKM','TOWR'], - 'Simas Saham Unggulan':['ASII','BBCA','BMRI','BBNI','BBRI','ICBP','KLBF','MYOR','AMRT','TLKM'], - 'Schroder 90 Plus Equity Fund':['BBCA','BMRI','BBNI','BBRI','KLBF','MYOR','MDKA','MAPI','MLBI','TLKM'], - 'DanaReksa Mawah Konsumer 10 Kelas A':['ADRO','ASII','BBCA','BBNI','BBRI','BMRI','INCO','MEDC','MYOR','TLKM']} - -dfx = pd.DataFrame(ls1) - -pageicon = Image.open('logodifa-removebg-preview.png') -#st.set_page_config(page_title = 'Your personal Digital Financial Advisory for Mutual Funds',page_icon=pageicon, layout='wide', initial_sidebar_state='expanded') - -def add_bg_from_url(): - st.markdown( - f""" - - """, - unsafe_allow_html=True - ) - -add_bg_from_url() - -#header = Image.open('logodifa-removebg-preview.png') -#header2 = Image.open('images-icone.png') -# header2 = header2.resize((1300,300)) - -st.subheader('Your personal Digital Financial Advisory for Mutual Funds') -st.write('-----') - -with st.sidebar: - selected = option_menu( - menu_title="Main Menu", - options=['Home', 'Profile Risk'], - icons=['house','cash'], - menu_icon='cast', - default_index=0, - ) - -if selected == 'Home': - #home.run() - st.write(""" - # Trend - Show are Corresponding Mutual Fund Trend Based On **NAV (Net Asset Value)** - """) - def user_input(): - stock_symbol = st.sidebar.selectbox('Category', ('Money Market', 'Bond', 'stock mutual funds')) - - tickerData = yf.Ticker(stock_symbol+'.JK') - return stock_symbol - - stock_symbol = user_input() - if stock_symbol == "Money Market" : - #low.run() - def user_input_low(): - low_symbol = st.sidebar.selectbox('Mutual Funds', ('Batavia Dana Kas Maxima','Sucorinvest Money Market Fund','Bahana Dana Likuid','Manulife Dana Kas II Kelas A','TRIM Kas 2','Danareksa Seruni Pasar Uang III','Sucorinvest Sharia Money Market Fund','Danamas Rupiah Plus','Danareksa Seruni Pasar Uang II','BNI-AM Dana Likuid')) - tickerData = yf.Ticker(low_symbol+'.JK') - return low_symbol - low_symbol = user_input_low() - if low_symbol == 'Batavia Dana Kas Maxima' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl2, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - if low_symbol == 'Sucorinvest Money Market Fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl7, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - plt.ylim(reversed(plt.ylim())) - line.xaxis.set_major_locator(locator) - line.yaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'Bahana Dana Likuid' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl1, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - plt.ylim(reversed(plt.ylim())) - line.xaxis.set_major_locator(locator) - line.yaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'Manulife Dana Kas II Kelas A' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl6, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'TRIM Kas 2' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl10, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - plt.ylim(reversed(plt.ylim())) - line.xaxis.set_major_locator(locator) - line.yaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'Danareksa Seruni Pasar Uang III' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl5, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'Sucorinvest Sharia Money Market Fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl9, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'Danamas Rupiah Plus' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl4, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'Danareksa Seruni Pasar Uang II' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl5, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif low_symbol == 'BNI-AM Dana Likuid' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfl3, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - st.write(""" - # Top 10 Money Market Mutual Funds - Show are Top 10 Money Market Mutual Fund Based On **AUM (Asset Under Management)** - """) - fig = plt.figure(figsize=(15,5)) - sns.barplot(data=dfp, y="Mutual Funds", x="AUM") - st.pyplot(fig) - - elif stock_symbol == "Bond" : - #med.run() - def user_input_med(): - med_symbol = st.sidebar.selectbox('Mutual Funds', ('ABF Indonesia Bond Index Fund', - 'Batavia Dana Obligasi Ultima', - 'Danamas_Stabil', - 'Eastspring IDR Fixed Income Fund Kelas A', - 'Eastspring Syariah Fixed Income Amanah Kelas A', - 'Manulife Obligasi Negara Indonesia II Kelas A', - 'Manulife Obligasi Unggulan Kelas A', - 'Schroder Dana Mantap Plus II', - 'Sucorinvest Sharia Sukuk Funds', - 'Sucorinvest Stable Fund')) - tickerData = yf.Ticker(med_symbol+'.JK') - return med_symbol - med_symbol = user_input_med() - if med_symbol == 'ABF Indonesia Bond Index Fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb1, y="Present", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - if med_symbol == 'Batavia Dana Obligasi Ultima' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb2, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Danamas_Stabil' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb3, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Eastspring IDR Fixed Income Fund Kelas A' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb4, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Eastspring Syariah Fixed Income Amanah Kelas A' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb5, y="Present", x="Date") - locator = mdates.DayLocator(interval=10) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Manulife Obligasi Negara Indonesia II Kelas A' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb6, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Manulife Obligasi Unggulan Kelas A' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb7, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Schroder Dana Mantap Plus II' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb8, y="Present", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Sucorinvest Sharia Sukuk Funds' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb9, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif med_symbol == 'Sucorinvest Stable Fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfb10, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - st.write(""" - # Top 10 Money Market Mutual Funds - Show are Top 10 Money Market Mutual Fund Based On **AUM (Asset Under Management)** - """) - fig = plt.figure(figsize=(15,5)) - sns.barplot(data=dfb, y="Mutual Funds", x="AUM") - st.pyplot(fig) - - else : - #high.run() - def user_input_high(): - saham_symbol = st.sidebar.selectbox('Mutual Funds', ('Manulife dana saham kelas A', - 'Batavia Dana Saham', - 'Sucorinvest equity fund', - 'Manulife Saham Andalan', - 'BNI-AM Indeks IDX30', - 'BNI-AM Dana Saham Inspiring Equity Fund', - 'Simas Saham Unggulan', - 'Schroder 90 Plus Equity Fund', - 'Sucorinvest Sharia Equity Fund', - 'Danareksa Mawar Konsumer 10 Kelas A')) - tickerData = yf.Ticker(saham_symbol+'.JK') - return saham_symbol - saham_symbol = user_input_high() - if saham_symbol == 'Manulife dana saham kelas A' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs1, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - if saham_symbol == 'Batavia Dana Saham' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs2, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'Sucorinvest equity fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs3, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'Manulife Saham Andalan' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs4, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'BNI-AM Indeks IDX30' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs5, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'BNI-AM Dana Saham Inspiring Equity Fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs6, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'Simas Saham Unggulan' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs7, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'Schroder 90 Plus Equity Fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs8, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'Sucorinvest Sharia Equity Fund' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs9, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - st.markdown('---') - - elif saham_symbol == 'Danareksa Mawar Konsumer 10 Kelas A' : - fig = plt.figure(figsize=(15,5)) - line = sns.lineplot(data=dfs10, y="value", x="date") - locator = mdates.DayLocator(interval=10) - line.xaxis.set_major_locator(locator) - st.pyplot(fig) - - st.markdown('---') - st.write(""" - # Top 10 Money Market Mutual Funds - Show are Top 10 Money Market Mutual Fund Based On **AUM (Asset Under Management)** - """) - fig = plt.figure(figsize=(15,5)) - sns.barplot(data=dfss, y="Mutual Funds", x="AUM") - st.pyplot(fig) - - -elif selected == 'Profile Risk': - #else: - #prediction.run() - st.title('Profiling Questionnaries') - st.markdown('---') - - # Create Form - - with st.form(key='form_parameters'): - st.subheader('A. Investment Time Horizon') - A1 = st.selectbox('As I withdraw money from these investments, I plan to spend it over a period of...', ('10 years or more','7-9 years','4-6 years','1-3 years','Less than 1 year')) - A2 = st.selectbox('I plan to begin taking money from my investments in...',('10 years or more', '6-10 years','3-5 years','1-2 years','1 or less than a year')) - - st.markdown('---') - st.subheader('B. Investment Knowlegde') - B1 = st.selectbox('When it comes to investing in stock or bond mutual funds or ETFs - or individual stocks or bonds - I would describe myself as...',('Very experienced (≥ 10 years)','Experienced (8 - 10 years)','Somewhat experienced (4 - 7 years)','Somewhat inexperienced (< 4 years)','Very inexperienced (0)')) - - st.markdown('---') - st.subheader('C. Risk Capacity') - C1 = st.selectbox('My purpose of investing money is...',('For the long term wealth growth','For the long term revenue and growth','Periodic income','Income and security of investment funds','Security of investment funds')) - C2 = st.selectbox('My current and future income sources (for example, salary, social security, pensions) are...',('Very stable','Stable','Somewhat stable','Unstable','Very unstable')) - C3 = st.selectbox('How many percent of income that will be invested?', ('> 50%','> 25%-50%', '> 10%-25%','> 0% - 10%','0%')) - C4 = st.selectbox('How many percent of loss investment that can be beared?',('> 50%','> 25%-50%', '> 10%-25%','> 0% - 10%','0%')) - - st.markdown('---') - st.subheader('D. Risk Attitude') - D1 = st.selectbox('From September 2022 through October 2022, bonds lost 4%. If I owned a bond investment that lost 4% in two months, I would...', ('Sell all the remaining investment','Sell a portion of the remaining investment','Hold onto the investment and sell nothing','Buy more of the remaining investment')) - D2 = st.selectbox('The chart shows the greatest 1-year loss and the highest 1-year gain on 3 different hypothetical investments of $10,000.* Given the potential gain or loss in any 1 year, I would invest my money in...',('EITHER a loss of $0 OR a gain of $200','EITHER a loss of $200 OR a gain of $500','EITHER a loss of $800 OR a gain of $1,200','EITHER a loss of $2,000 OR a gain of $2,500')) - image = Image.open('Range-Outcomes.png') - st.image(image) - D3 = st.selectbox('Investments with higher returns typically involve greater risk. The charts below show hypothetical annual returns (annual gains and losses) for four different investment portfolios over a 10 year period. Keeping in mind how the returns fluctuate, which investment portfolio would you be most comfortable holding?', ('Portfolio A', 'Portfolio B','Portfolio C','Portfolio D')) - image = Image.open('image.png') - st.image(image) - - submitted = st.form_submit_button('Submit') - - data_inf = { - 'A1' : A1, - 'A2' : A2, - 'B1' : B1, - 'C1' : C1, - 'C2' : C2, - 'C3' : C3, - 'C4' : C4, - 'D1' : D1, - 'D2' : D2, - 'D3' : D3 - } - - data_inf = pd.DataFrame([data_inf]) - st.dataframe(data_inf) - - if submitted: - data_inf['A1']=data_inf.A1.map({'10 years or more': 5, '7-9 years': 4,'4-6 years':3, '1-3 years':2, 'Less than 1 year':1}) - data_inf['A2']=data_inf.A2.map({'More than 8 years': 5, '7-8 years': 4,'5-6 years':3, '3-4 years':2, '1-2 years':1}) - data_inf['B1']=data_inf.B1.map({'Very experienced (≥ 10 years)': 5, 'Experienced (8 - 10 years)': 4,'Somewhat experienced (4 - 7 years)':3, 'Somewhat inexperienced (< 4 years)':2, 'Very inexperienced (0)':1}) - data_inf['C1']=data_inf.C1.map({'For the long term wealth growth': 5, 'For the long term revenue and growth': 4,'Periodic income':3, 'Income and security of investment funds':2, 'Security of investment funds':1}) - data_inf['C2']=data_inf.C2.map({'Very stable': 5, 'Stable': 4,'Somewhat stable':3, 'Unstable':2, 'Very unstable':1}) - data_inf['C3']=data_inf.C3.map({'> 50%': 5, '> 25% - 50%': 4,'> 10% - 25%':3, '> 0% - 10%':2, '0%':1}) - data_inf['C4']=data_inf.C4.map({'> 50%': 5, '> 25% - 50%': 4,'> 10% - 25%':3, '> 0% - 10%':2, '0%':1}) - data_inf['D1']=data_inf.D1.map({'Sell all the remaining investment': 0, 'Sell a portion of the remaining investment': 2,'Hold onto the investment and sell nothing':4, 'Buy more of the remaining investment':6}) - data_inf['D2']=data_inf.D2.map({'EITHER a loss of $0 OR a gain of $200': 0, 'EITHER a loss of $200 OR a gain of $500': 2,'EITHER a loss of $800 OR a gain of $1,200':4, 'EITHER a loss of $2,000 OR a gain of $2,500':6}) - data_inf['D3']=data_inf.D3.map({'Portfolio A': 0, 'Portfolio B': 2,'Portfolio C':4, 'Portfolio D':6}) - - data_inflist=['A1','A2','B1','C1','C2','C3','C4','D1','D2','D3'] - data_inf['score'] = data_inf[data_inflist].sum(axis=1) - - # st.write('# Score: ', str(int(data_inf['score']))) - print(type(data_inf['score'][0]),data_inf['score']) - saham = [] - stock = [] - if data_inf['score'][0] <= 21: - profile= 'Low Risk' - text = 'You have a low tolerance for risk and potential loss of capital or a short investment time horizon. Investors are willing to accept some short term fluctuations and small losses in your investment portfolio in exchange for modest returns. The primary objective of your investment portfolio will be to provide income by investing primarily in funds that invest in fixed-income securities. While capital appreciation is not a priority, a small portion of the portfolio may be invested in equity funds to provide the potential for some growth to offset the impact of inflation.' - rec1 = dfl[dfl['Label'] != 0] - rec = rec1.sort_values(by=['AUM', 'Percentage'],ascending=False).head(3) - elif 22 <= data_inf['score'][0] <= 36: - profile = 'Medium Risk' - text= ' You have a moderate tolerance for risk and loss of capital. Investors are willing to tolerate some fluctuations in your investment returns and moderate losses of capital. Investors have at least a medium term investment time horizon. The objective of investors portfolio will be to provide a combination of income and long term capital growth and therefore the portfolio will include at least 40% in fixed income investments.' - rec1 = dfm[dfm['Label'] != 0] - rec = rec1.sort_values(by=['AUM', 'Percentage'],ascending=False).head(3) - rec = rec.round(decimals=2) - elif data_inf['score'][0] >= 37: - profile = 'High Risk' - text = 'You tolerance for risk, portfolio volatility and investment losses is high or very high. Investors are willing to tolerate potentially significant and sustained price fluctuations and large losses of capital. Investors have extensive investment knowledge. Investors have no income requirements from your investments and have a long investment time horizon.' - sh = dfs[dfs['label'] != 0] - saham = sh.sort_values(by=['percentage'],ascending=False).head(20) - rec1 = dfh[dfh['label'] != 0] - rec = rec1.sort_values(by=['AUM', 'percentage'],ascending=False).head(3) - stock = rec['saham'] - - - #Prediction - st.write('# Profile Risk: {}'.format(profile)) - st.write(' {}'.format(text)) - st.write('# Our Recommendation: ') - st.dataframe(rec.round(decimals=2)) - - if len(saham) > 1: - st.write('# Top Holding: ') - st.dataframe(dfx[stock]) - st.write('# Top Stock: ') - top3 = dfx[stock] - top1 = stock.iloc[0] - saham['saham'].isin(top3[top1]).astype(int) - xx = saham.assign(Top=saham['saham'].isin(top3[top1]).astype(int)) - st.dataframe(xx) - diff --git a/spaces/teralomaniac/clewd/start.bat b/spaces/teralomaniac/clewd/start.bat deleted file mode 100644 index 2442e4bebad6370d76b606c0b6d8365cce59fcda..0000000000000000000000000000000000000000 --- a/spaces/teralomaniac/clewd/start.bat +++ /dev/null @@ -1,5 +0,0 @@ -pushd %~dp0 -call npm install --no-audit --fund false -node clewd.js -pause -popd \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Class 5 Maths Book Pdf Bangladesh !NEW!.md b/spaces/terfces0erbo/CollegeProjectV2/Class 5 Maths Book Pdf Bangladesh !NEW!.md deleted file mode 100644 index 5a9ce506316a4dce6a556ad72742ff9eb0cc0319..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Class 5 Maths Book Pdf Bangladesh !NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Class 5 maths book pdf bangladesh


    DOWNLOADhttps://bytlly.com/2uGm2k



    -
    -PDF)Wikipedia Hsc Math Book Solution In BanglaMSBSHSE Solutions For Class 8 Maths Part 1 Chapter 5 S U Ahmed. Math Hsc 1st Paper Book Solution Pdf ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Veer Zaara 2004 Hindi 720p BRRip Cha).md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Veer Zaara 2004 Hindi 720p BRRip Cha).md deleted file mode 100644 index ff518ff445adf13fe56d4bc69c1afcdd1a703686..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Veer Zaara 2004 Hindi 720p BRRip Cha).md +++ /dev/null @@ -1,6 +0,0 @@ -

    HD Online Player (Veer Zaara 2004 Hindi 720p BRRip Cha)


    DOWNLOAD ✦✦✦ https://bytlly.com/2uGkhU



    -
    -HD Online Player (Veer Zaara 2004 Hindi 720p BRRip Cha) - closes on 2021-01-30. Gta 5 Psp Iso Download 19 - closes on 2021-01-30. Sombra Vol17 Meu ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/terfces0erbo/CollegeProjectV2/IFastime Video Converter Ultimate Full Indir.md b/spaces/terfces0erbo/CollegeProjectV2/IFastime Video Converter Ultimate Full Indir.md deleted file mode 100644 index 9499559db9106e6f2fc460612e1116e06144facc..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/IFastime Video Converter Ultimate Full Indir.md +++ /dev/null @@ -1,54 +0,0 @@ -

    iFastime Video Converter Ultimate Full indir


    Download File ⚹⚹⚹ https://bytlly.com/2uGk6S



    - -mp4, .avi, .wmv, .mov, .flv, .m4v, .mkv formats and save on the disk with any quality. - -Main Features: - -- Convert any videos to several different formats in one click. - -- Convert video files for online upload on YouTube, Facebook, Instagram, etc. - -- Change video/audio file names and titles. - -- Convert video/audio files to higher quality (320, 500, 640, 1280, 1920 and 2800 Kbps) - -- Supports to convert videos/audio files from nearly all popular video sites: YouTube, Vimeo, Facebook, Instagram, Dailymotion, Metacafe, Facebook, Amazon and others. - -- Use this awesome conversion software to convert videos from FLV to MP4, MP3, AVI, WMV, MOV, 3GP, MKV, MP4, and more. - -- Get unlimited conversion capacity. - -- Convert video/audio files by hand, using its interface and convert to the most popular audio formats. - -- Install the converter to your computer as a freeware. - -- No the watermark of the output video/audio files. - -- Powerful batch conversion tool. - -- Optimized for Windows 10 64-bit. - -Simple instructions for download and use: - -1. Install the program. After installing, you will be able to use the program. - -2. For next step, You must paste URL of any video or audio files. - -3. Choose one of the formats you want to convert your files: H.264, AAC, AC3, MP3, WAV, etc. and select a quality. - -4. Press the Start button to convert. The conversion will be completed in few seconds. - -5. Wait a minute for the conversion to complete. - -Video converter for beginners - -Are you a beginner in video editing? Do you find complicated to convert your video to some new format? Then this ultimate video converter is for you! - -1. Convert video and audio files to any popular format. - -2. No the watermark of the output video/audio files. - -3. Convert video and audio files by hand, using its interface and convert to the most popular audio formats: MP3, WAV, AAC, AC3, FLAC, ALAC, WMA, OGG, AIFF, etc. 4fefd39f24
    -
    -
    -

    diff --git a/spaces/terrierteam/retrieve/sdm.md b/spaces/terrierteam/retrieve/sdm.md deleted file mode 100644 index 6e956fe39506aa23edf647e584ab940792fd0a10..0000000000000000000000000000000000000000 --- a/spaces/terrierteam/retrieve/sdm.md +++ /dev/null @@ -1,10 +0,0 @@ -### Sequential Dependence Model - -The Sequential Dependence Model (SDM) re-writes queries to boost terms that appear in sequence or in proximity with one another. -It functions as a `Q→Q` (query re-writing, query-to-query) transformer, and can be used in pipelines as such. - -
    -
    Q
    -
    SDM
    -
    Q
    -
    diff --git a/spaces/themanas021/seamless_m4t/lang_list.py b/spaces/themanas021/seamless_m4t/lang_list.py deleted file mode 100644 index 3360eb292170aa8a92733c4f07b9b541e3cbeead..0000000000000000000000000000000000000000 --- a/spaces/themanas021/seamless_m4t/lang_list.py +++ /dev/null @@ -1,254 +0,0 @@ -# Language dict -language_code_to_name = { - "afr": "Afrikaans", - "amh": "Amharic", - "arb": "Modern Standard Arabic", - "ary": "Moroccan Arabic", - "arz": "Egyptian Arabic", - "asm": "Assamese", - "ast": "Asturian", - "azj": "North Azerbaijani", - "bel": "Belarusian", - "ben": "Bengali", - "bos": "Bosnian", - "bul": "Bulgarian", - "cat": "Catalan", - "ceb": "Cebuano", - "ces": "Czech", - "ckb": "Central Kurdish", - "cmn": "Mandarin Chinese", - "cym": "Welsh", - "dan": "Danish", - "deu": "German", - "ell": "Greek", - "eng": "English", - "est": "Estonian", - "eus": "Basque", - "fin": "Finnish", - "fra": "French", - "gaz": "West Central Oromo", - "gle": "Irish", - "glg": "Galician", - "guj": "Gujarati", - "heb": "Hebrew", - "hin": "Hindi", - "hrv": "Croatian", - "hun": "Hungarian", - "hye": "Armenian", - "ibo": "Igbo", - "ind": "Indonesian", - "isl": "Icelandic", - "ita": "Italian", - "jav": "Javanese", - "jpn": "Japanese", - "kam": "Kamba", - "kan": "Kannada", - "kat": "Georgian", - "kaz": "Kazakh", - "kea": "Kabuverdianu", - "khk": "Halh Mongolian", - "khm": "Khmer", - "kir": "Kyrgyz", - "kor": "Korean", - "lao": "Lao", - "lit": "Lithuanian", - "ltz": "Luxembourgish", - "lug": "Ganda", - "luo": "Luo", - "lvs": "Standard Latvian", - "mai": "Maithili", - "mal": "Malayalam", - "mar": "Marathi", - "mkd": "Macedonian", - "mlt": "Maltese", - "mni": "Meitei", - "mya": "Burmese", - "nld": "Dutch", - "nno": "Norwegian Nynorsk", - "nob": "Norwegian Bokm\u00e5l", - "npi": "Nepali", - "nya": "Nyanja", - "oci": "Occitan", - "ory": "Odia", - "pan": "Punjabi", - "pbt": "Southern Pashto", - "pes": "Western Persian", - "pol": "Polish", - "por": "Portuguese", - "ron": "Romanian", - "rus": "Russian", - "slk": "Slovak", - "slv": "Slovenian", - "sna": "Shona", - "snd": "Sindhi", - "som": "Somali", - "spa": "Spanish", - "srp": "Serbian", - "swe": "Swedish", - "swh": "Swahili", - "tam": "Tamil", - "tel": "Telugu", - "tgk": "Tajik", - "tgl": "Tagalog", - "tha": "Thai", - "tur": "Turkish", - "ukr": "Ukrainian", - "urd": "Urdu", - "uzn": "Northern Uzbek", - "vie": "Vietnamese", - "xho": "Xhosa", - "yor": "Yoruba", - "yue": "Cantonese", - "zlm": "Colloquial Malay", - "zsm": "Standard Malay", - "zul": "Zulu", -} -LANGUAGE_NAME_TO_CODE = {v: k for k, v in language_code_to_name.items()} - -# Source langs: S2ST / S2TT / ASR don't need source lang -# T2TT / T2ST use this -text_source_language_codes = [ - "afr", - "amh", - "arb", - "ary", - "arz", - "asm", - "azj", - "bel", - "ben", - "bos", - "bul", - "cat", - "ceb", - "ces", - "ckb", - "cmn", - "cym", - "dan", - "deu", - "ell", - "eng", - "est", - "eus", - "fin", - "fra", - "gaz", - "gle", - "glg", - "guj", - "heb", - "hin", - "hrv", - "hun", - "hye", - "ibo", - "ind", - "isl", - "ita", - "jav", - "jpn", - "kan", - "kat", - "kaz", - "khk", - "khm", - "kir", - "kor", - "lao", - "lit", - "lug", - "luo", - "lvs", - "mai", - "mal", - "mar", - "mkd", - "mlt", - "mni", - "mya", - "nld", - "nno", - "nob", - "npi", - "nya", - "ory", - "pan", - "pbt", - "pes", - "pol", - "por", - "ron", - "rus", - "slk", - "slv", - "sna", - "snd", - "som", - "spa", - "srp", - "swe", - "swh", - "tam", - "tel", - "tgk", - "tgl", - "tha", - "tur", - "ukr", - "urd", - "uzn", - "vie", - "yor", - "yue", - "zsm", - "zul", -] -TEXT_SOURCE_LANGUAGE_NAMES = sorted([language_code_to_name[code] for code in text_source_language_codes]) - -# Target langs: -# S2ST / T2ST -s2st_target_language_codes = [ - "eng", - "arb", - "ben", - "cat", - "ces", - "cmn", - "cym", - "dan", - "deu", - "est", - "fin", - "fra", - "hin", - "ind", - "ita", - "jpn", - "kor", - "mlt", - "nld", - "pes", - "pol", - "por", - "ron", - "rus", - "slk", - "spa", - "swe", - "swh", - "tel", - "tgl", - "tha", - "tur", - "ukr", - "urd", - "uzn", - "vie", -] -S2ST_TARGET_LANGUAGE_NAMES = sorted([language_code_to_name[code] for code in s2st_target_language_codes]) - -# S2TT / ASR -S2TT_TARGET_LANGUAGE_NAMES = TEXT_SOURCE_LANGUAGE_NAMES -# T2TT -T2TT_TARGET_LANGUAGE_NAMES = TEXT_SOURCE_LANGUAGE_NAMES diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Barbie As Rapunzel 2002 Full __HOT__ Movie Dubbed In Hindi.md b/spaces/tialenAdioni/chat-gpt-api/logs/Barbie As Rapunzel 2002 Full __HOT__ Movie Dubbed In Hindi.md deleted file mode 100644 index 6de873d68d73e16067b8afb3d4dc985d436eefbb..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Barbie As Rapunzel 2002 Full __HOT__ Movie Dubbed In Hindi.md +++ /dev/null @@ -1,15 +0,0 @@ - -

    Barbie As Rapunzel: A Magical Tale of Love and Imagination

    -

    Barbie As Rapunzel is a 2002 animated film that features Barbie as the fairy tale heroine Rapunzel, who has the most beautiful hair in the world. The film was produced by Mattel, Rainmaker Entertainment, Artisan Home Entertainment, Lionsgate, and Family Home Entertainment. It was dubbed in Hindi and released in India in 2003.

    -

    Barbie As Rapunzel 2002 Full Movie Dubbed In Hindi


    DOWNLOADhttps://urlcod.com/2uK8iL



    -

    The film follows Rapunzel's journey from being a servant of the evil witch Gothel, who keeps her locked in a tower guarded by a dragon, to finding her true love Prince Stefan and breaking a curse that has separated two kingdoms. Along the way, Rapunzel discovers a magic paintbrush that can create anything she imagines, and befriends a friendly dragon named Penelope.

    -

    Barbie As Rapunzel is a story of love, courage, and imagination that shows that anything is possible if you believe in yourself. The film features beautiful animation, enchanting music, and memorable characters. It is one of the most popular Barbie films among fans and critics alike.

    -

    You can watch Barbie As Rapunzel online on various platforms, such as Archive.org, Facebook Watch, and Bilibili.tv. You can also buy or rent the DVD from Amazon or other online stores. Enjoy this magical adventure with Barbie and Rapunzel!

    - -

    Barbie As Rapunzel was directed by Owen Hurley, who also directed other Barbie films such as Barbie in the Nutcracker and Barbie of Swan Lake. The film was written by Cliff Ruby, Elana Lesser, Sarah Maizes, and Rob Hudnut, based on the classic fairy tale by the Brothers Grimm. The film also features a story consultant by Robert McKee, a renowned screenwriting instructor and author.

    -

    The film has an impressive voice cast that includes Kelly Sheridan as Barbie/Rapunzel, Anjelica Huston as Gothel, Cree Summer as Penelope, Ian James Corlett as Hobie, Mark Hildreth as Stefan, David Kaye as Hugo, Peter Kelamis as Otto, Russell Roberts as King Frederick, Christopher Gaze as King Wilhelm, Terry Klassen as Fat Swordsman, Chantal Strand as Kelly/Katrina, Danny McKinnon as Tommy, Britt McKillip as Melody, Jocelyne Loewen as Lorena, and Dale Wilson as Silversmith.

    -

    -

    The film also boasts a beautiful musical score by Arnie Roth, performed by the London Symphony Orchestra. The film features two original songs: "Constant As The Stars Above", sung by Jessica Simpson as Rapunzel's lullaby to Penelope; and "Wish Upon A Star", sung by Samantha Mumba during the end credits.

    -

    Barbie As Rapunzel was well received by critics and audiences alike. It won two awards: Best Animated Video Premiere at the 2003 DVD Exclusive Awards; and Best Animation at the 2003 Leo Awards. It was also nominated for six other awards: Best Animated Character Performance at the 2003 DVD Exclusive Awards; Best Overall Sound in an Animation Program or Series at the 2003 Leo Awards; Best Musical Score in an Animation Program or Series at the 2003 Leo Awards; Best Screenwriting in an Animation Program or Series at the 2003 Leo Awards; Best Direction in an Animation Program or Series at the 2003 Leo Awards; and Best Animation Program or Series at the 2003 Leo Awards.

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Bazadedateaccessgatafacutemanual.md b/spaces/tialenAdioni/chat-gpt-api/logs/Bazadedateaccessgatafacutemanual.md deleted file mode 100644 index 1859e3b6a26b412088e2eb03524476f49b622251..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Bazadedateaccessgatafacutemanual.md +++ /dev/null @@ -1,43 +0,0 @@ -
    -

    Baza de date Access gata făcută manual: cum să creați și să utilizați o bază de date simplă

    -

    Dacă sunteți în căutarea unei modalități ușoare și rapide de a crea și de a gestiona o bază de date pentru diferite scopuri, cum ar fi evidența clienților, a produselor sau a comenzilor, puteți folosi Microsoft Access, un program de gestionare a bazelor de date care face parte din pachetul Microsoft Office.

    -

    Microsoft Access vă permite să creați o bază de date gata făcută manual, folosind un șablon predefinit sau pornind de la zero. În acest articol, vă vom arăta pașii necesari pentru ambele opțiuni și vă vom oferi câteva sfaturi pentru a vă ajuta să utilizați baza de date creată în mod eficient.

    -

    Bazadedateaccessgatafacutemanual


    Download File ››››› https://urlcod.com/2uK64l



    -

    Cum să creați o bază de date Access gata făcută manual folosind un șablon

    -

    Unul dintre avantajele folosirii unui șablon este că acesta vă oferă o structură și un design gata făcute pentru baza de date, precum și câteva date de exemplu pe care le puteți modifica sau șterge după nevoie. Există multe șabloane disponibile în Access sau pe site-ul Office.com, pentru diferite tipuri de baze de date, cum ar fi evidența contactelor, a activelor, a proiectelor sau a evenimentelor.

    -

    Pentru a crea o bază de date Access gata făcută manual folosind un șablon, urmați acești pași:

    -
      -
    1. Deschideți Access și selectați fila Nou din vizualizarea Backstage.
    2. -
    3. Alegeți unul dintre șabloanele instalate cu Access sau căutați un șablon pe Office.com în caseta de căutare.
    4. -
    5. Introduceți un nume pentru baza de date în caseta Nume fișier și selectați o locație pentru a o salva.
    6. -
    7. Faceți clic pe Creați pentru a deschide baza de date.
    8. -
    -

    Odată ce ați creat baza de date, puteți să explorați și să modificați obiectele pe care le conține, cum ar fi tabelele, formularele, rapoartele sau interogările. De exemplu, puteți adăuga sau șterge câmpuri din tabele, puteți schimba aspectul sau funcționalitatea formularelor și rapoartelor sau puteți crea interogări noi pentru a extrage informații specifice din baza de date.

    -

    Cum să creați o bază de date Access gata făcută manual pornind de la zero

    -

    Dacă nu găsiți un șablon care să se potrivească nevoilor dvs. sau dacă aveți cerințe specifice pentru baza de date, puteți să creați o bază de date Access gata făcută manual pornind de la zero. Acest lucru vă oferă mai mult control asupra structurii și designului bazei de date, dar necesită mai mult timp și efort.

    -

    Pentru a crea o bază de date Access gata făcută manual pornind de la zero, urmați acești pași:

    -
      -
    1. Deschideți Access și selectați fila Nou din vizualizarea Backstage.
    2. -
    3. Selectați opțiunea B - -Bază de date necompletată și introduceți un nume pentru baza de date în caseta Nume fișier.
    4. -
    5. Faceți clic pe Creați pentru a deschide baza de date goală.
    6. -
    -

    Acum trebuie să creați obiectele bazei de date, cum ar fi tabelele, formularele, rapoartele sau interogările. Pentru a crea un tabel, puteți folosi una dintre următoarele metode:

    -
      -
    • Faceți clic pe butonul Tabel din grupul Tabele de pe fila Creați. Acest lucru vă va deschide o foaie de date goală în care puteți introduce datele și puteți defini câmpurile.
    • -
    • Faceți clic pe butonul Vizualizare proiectare din grupul Tabele de pe fila Creați. Acest lucru vă va deschide o fereastră în care puteți defini numele, tipul de date și proprietățile fiecărui câmp al tabelului.
    • -
    • Faceți clic pe butonul Părți de aplicație din grupul Tabele de pe fila Creați. Acest lucru vă va permite să adăugați la baza de date o parte de aplicație care conține un tabel și alte obiecte asociate, cum ar fi un formular sau o interogare.
    • -
    • Importați sau legați datele dintr-o altă sursă, cum ar fi o foaie de calcul Excel, un fișier text sau o altă bază de date. Pentru a face acest lucru, faceți clic pe butonul corespunzător din grupul Date externe de pe fila Date externe.
    • -
    -

    Pentru a crea alte obiecte ale bazei de date, cum ar fi formularele, rapoartele sau interogările, puteți folosi butoanele din grupurile corespunzătoare de pe fila Creați. Puteți alege să creați obiectele folosind un instrument simplu, cum ar fi Asistentul pentru formulare sau Asistentul pentru rapoarte, sau să le creați în mod personalizat folosind Vizualizarea proiectare sau Vizualizarea aspect.

    -

    Cum să utilizați o bază de date Access gata făcută manual

    -

    După ce ați creat o bază de date Access gata făcută manual, fie folosind un șablon, fie pornind de la zero, puteți să o utilizați pentru a introduce, modifica, șterge sau vizualiza datele. De asemenea, puteți să partajați baza de date cu alte persoane sau să o publicați pe web.

    -

    -

    Pentru a utiliza o bază de date Access gata făcută manual, urmați aceste sfaturi generale:

    -
      -
    • Pentru a introduce sau modifica datele într-un tabel, puteți folosi foaia de date asociată tabelului. Pentru a deschide foaia de date a unui tabel, faceți dublu clic pe numele tabelului din panoul de navigare. Apoi puteți tasta sau edita valorile în celulele foii de date.
    • -
    • Pentru a introduce sau modifica datele folosind un formular, puteți folosi formularul asociat tabelului. Pentru a deschide formularul unui tabel, faceți dublu clic pe numele formularului din panoul de navigare. Apoi puteți tasta sau edita valorile în controalele formularului.
    • -
    • Pentru a șterge datele dintr-un tabel sau dintr-un formular, puteți select

      e93f5a0c3f
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Command And Conquer Red Alert 3 Uprising [English]-RELOADED BEST Crack.md b/spaces/tialenAdioni/chat-gpt-api/logs/Command And Conquer Red Alert 3 Uprising [English]-RELOADED BEST Crack.md deleted file mode 100644 index 06d73a094cb036aed16e6303939621e85d55d006..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Command And Conquer Red Alert 3 Uprising [English]-RELOADED BEST Crack.md +++ /dev/null @@ -1,40 +0,0 @@ - -Here is a possible title and article for your keyword: - -

      How to Download and Install Command And Conquer Red Alert 3 Uprising [English]-RELOADED Crack

      - -

      If you are a fan of real-time strategy games, you might be interested in Command And Conquer Red Alert 3 Uprising, a standalone expansion pack for the 2008 game Command And Conquer Red Alert 3. This expansion pack features four new mini-campaigns, new units, new maps, and a new mode called Commander's Challenge. In this article, we will show you how to download and install Command And Conquer Red Alert 3 Uprising [English]-RELOADED crack, which will allow you to play the game without any restrictions.

      - -

      What is Command And Conquer Red Alert 3 Uprising [English]-RELOADED Crack?

      - -

      Command And Conquer Red Alert 3 Uprising [English]-RELOADED crack is a modified version of the game's executable file that bypasses the online authentication and activation process required by the original game. This means that you can play the game without having to register online, enter a serial key, or use a disc. The crack also includes an English language pack that lets you play the game in English, regardless of your region.

      -

      Command And Conquer Red Alert 3 Uprising [English]-RELOADED Crack


      Download File > https://urlcod.com/2uK5vR



      - -

      Where to Download Command And Conquer Red Alert 3 Uprising [English]-RELOADED Crack?

      - -

      There are many websites that offer Command And Conquer Red Alert 3 Uprising [English]-RELOADED crack for download, but not all of them are safe or reliable. Some of them may contain viruses, malware, or unwanted programs that can harm your computer or compromise your privacy. Therefore, you should be careful when choosing a source for downloading the crack.

      - -

      One of the websites that we recommend for downloading Command And Conquer Red Alert 3 Uprising [English]-RELOADED crack is Mod DB[^1^], a popular platform for modding games. Mod DB hosts a variety of mods, addons, patches, and cracks for various games, including Command And Conquer Red Alert 3 Uprising. You can find the crack file under the addons section of the game's page on Mod DB. The file name is RA3Uprising_English_LangPack1.00.exe and it has a size of 819.38 MB.

      - -

      How to Install Command And Conquer Red Alert 3 Uprising [English]-RELOADED Crack?

      - -

      Before you install Command And Conquer Red Alert 3 Uprising [English]-RELOADED crack, you need to have the base game installed on your computer. You can either buy the game from Steam[^3^] or download it from another source. If you choose the latter option, make sure that the game version is v1.12/v1.0 and that it supports MULTi12 languages.

      - -

      Once you have the base game installed, follow these steps to install Command And Conquer Red Alert 3 Uprising [English]-RELOADED crack:

      - -
        -
      1. Download RA3Uprising_English_LangPack1.00.exe from Mod DB[^1^] or another trusted website.
      2. -
      3. Run the executable file and follow the instructions on the screen.
      4. -
      5. Select your language via the installer or with the game's -ui parameter.
      6. -
      7. Copy the contents of the Crack folder from the RELOADED ISO image to your game installation folder.
      8. -
      9. Play the game!
      10. -
      - -

      Note: If you encounter any problems while installing or playing the game, you can check out this repack guide[^2^] for more information and troubleshooting tips.

      -

      - -

      Conclusion

      - -

      Command And Conquer Red Alert 3 Uprising is a fun and challenging expansion pack for one of the best real-time strategy games ever made. With Command And Conquer Red Alert 3 Uprising [English]-RELOADED crack, you can enjoy this game without any limitations or hassles. Just download and install the crack following our guide and you are ready to command and conquer!

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Graphon Go Global 4 Crack Learn How to Get the Most Out of It.md b/spaces/tialenAdioni/chat-gpt-api/logs/Graphon Go Global 4 Crack Learn How to Get the Most Out of It.md deleted file mode 100644 index 57f7216e5e90f0ac1b295fe9455f2dd89010eed6..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Graphon Go Global 4 Crack Learn How to Get the Most Out of It.md +++ /dev/null @@ -1,76 +0,0 @@ - -

      How GraphOn GO-Global 4 Can Help You Deliver Windows Applications Anywhere

      -

      If you are an ISV or an IT professional who needs to provide remote access to Windows applications, you may have considered solutions like Citrix, RDS, or VMware Horizon. However, these solutions are complex, expensive, and resource-intensive. They require you to install and maintain multiple servers, licenses, and components, and they often deliver a poor user experience with slow logons, printing issues, and compatibility problems.

      -

      Fortunately, there is a better alternative: GraphOn GO-Global 4. GO-Global 4 is a simple and cost-effective solution that enables you to securely deliver Windows applications anywhere, on demand, from any cloud or on-premises server. You can publish your applications to any device that supports a browser, without rewriting your code or installing any client software. GO-Global 4 uses a proprietary protocol that minimizes bandwidth consumption and maximizes performance. It also offers features like integrated load balancing, session shadowing, single sign-on, and universal printing.

      -

      graphon go global 4 crack


      Downloadhttps://urlcod.com/2uK1fP



      -

      With GO-Global 4, you can:

      -
        -
      • Reduce the cost and complexity of delivering your Windows applications by up to 70% compared to other solutions.
      • -
      • Provide a web-native experience to your end users, who can access your applications from any device with a browser.
      • -
      • Leverage any cloud service provider of your choice, or use your own on-premises server.
      • -
      • Scale economically as your business grows, without adding more servers or licenses.
      • -
      • Cut down the time and effort required to manage remote access, with easy installation, configuration, and monitoring tools.
      • -
      • Enhance security and compliance with TLS encryption, two-factor authentication, and OpenID Connect support.
      • -
      -

      If you want to learn more about how GO-Global 4 can help you deliver Windows applications anywhere, you can request a demo[^1^], read the technical specifications[^2^], or check out the security overview[^3^]. You can also sign up for a free 30-day trial[^1^] and see for yourself how GO-Global 4 can transform your remote access experience.

      - -

      GO-Global 4 is trusted by thousands of customers worldwide, who use it to deliver Windows applications to various industries and sectors. Here are some examples of how GO-Global 4 can benefit different types of businesses:

      -

      graphon go global 4 license key generator
      -graphon go global 4 full version download
      -graphon go global 4 activation code free
      -graphon go global 4 serial number crack
      -graphon go global 4 patch file download
      -graphon go global 4 keygen torrent
      -graphon go global 4 registration code crack
      -graphon go global 4 cracked software download
      -graphon go global 4 product key free
      -graphon go global 4 crack for windows 10
      -graphon go global 4 crack for mac os
      -graphon go global 4 crack for linux
      -graphon go global 4 crack for android
      -graphon go global 4 crack for ios
      -graphon go global 4 crack for ipad
      -graphon go global 4 crack for iphone
      -graphon go global 4 crack for chromebook
      -graphon go global 4 crack for raspberry pi
      -graphon go global 4 crack for windows server
      -graphon go global 4 crack for ubuntu
      -graphon go global 4 crack for centos
      -graphon go global 4 crack for red hat linux
      -graphon go global 4 crack for debian linux
      -graphon go global 4 crack for fedora linux
      -graphon go global 4 crack for kali linux
      -graphon go global 4 crack for mint linux
      -graphon go global 4 crack for elementary os
      -graphon go global 4 crack for manjaro linux
      -graphon go global 4 crack for arch linux
      -graphon go global 4 crack for pop os
      -graphon go global 4 crack for zorin os
      -graphon go global 4 crack for solus linux
      -graphon go global 4 crack for gentoo linux
      -graphon go global 4 crack for slackware linux
      -graphon go global 4 crack for suse linux
      -graphon go global 4 crack for alpine linux
      -graphon go global 4 crack for void linux
      -graphon go global 4 crack for nixos linux
      -graphon go global 4 crack for clear linux
      -graphon go global 4 crack for garuda linux
      -how to install graphon go global 4 crack
      -how to use graphon go global 4 crack
      -how to uninstall graphon go global 4 crack
      -how to update graphon go global 4 crack
      -how to fix graphon go global 4 crack errors
      -how to get support for graphon go global 4 crack
      -how to buy graphon go global 4 license key
      -how to renew graphon go global 4 license key
      -how to transfer graphon go global 4 license key
      -how to activate graphon go global 4 license key

      -
        -
      • Healthcare: GO-Global 4 can help healthcare providers access medical records, billing systems, and diagnostic tools from any device and location, while ensuring compliance with HIPAA and other regulations.
      • -
      • Education: GO-Global 4 can help educators and students access learning management systems, educational software, and online courses from any device and location, while reducing IT costs and complexity.
      • -
      • Manufacturing: GO-Global 4 can help manufacturers access inventory management systems, production planning software, and quality control tools from any device and location, while improving efficiency and productivity.
      • -
      • Finance: GO-Global 4 can help financial institutions access accounting software, banking systems, and trading platforms from any device and location, while ensuring security and compliance with PCI DSS and other regulations.
      • -
      -

      As you can see, GO-Global 4 is a versatile and powerful solution that can help you deliver Windows applications anywhere, on demand, from any cloud or on-premises server. Whether you are an ISV or an IT professional, you can benefit from the simplicity, cost-effectiveness, and performance of GO-Global 4. Don't wait any longer. Request a demo or sign up for a free 30-day trial today and see for yourself how GO-Global 4 can transform your remote access experience.

      e753bf7129
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bus Simulator Indonesia Sticker Download Where to Find and How to Use Them.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bus Simulator Indonesia Sticker Download Where to Find and How to Use Them.md deleted file mode 100644 index 4f876099ae323df53b73a14261eb130f8e02ea67..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bus Simulator Indonesia Sticker Download Where to Find and How to Use Them.md +++ /dev/null @@ -1,145 +0,0 @@ -
      -

      How to Download and Use Stickers for Bus Simulator Indonesia

      -

      Bus Simulator Indonesia is a popular game that lets you experience what it is like to be a bus driver in Indonesia. You can drive various types of buses, explore authentic Indonesian cities and places, and customize your bus with different liveries and accessories. One of the most fun aspects of the game is using stickers to design your own livery. In this article, we will show you how to download and use stickers for Bus Simulator Indonesia.

      -

      bus simulator indonesia sticker download


      Download File > https://bltlly.com/2uOnFg



      -

      What is Bus Simulator Indonesia?

      -

      A brief introduction to the game and its features

      -

      Bus Simulator Indonesia (also known as BUSSID) is a simulation game developed by Maleo. It was released in 2017 and has been downloaded by over 100 million players. The game allows you to drive various Indonesian buses, such as pariwisata, truk, and angkot, on realistic roads and traffic. You can also choose different routes, weather conditions, time of day, and passengers. The game has high-quality and detailed 3D graphics, realistic sound effects, and cool and fun honks, including the iconic "Om Telolet Om!" honk.

      -

      Why stickers are important for customizing your bus

      -

      Stickers are one of the ways you can customize your bus in Bus Simulator Indonesia. You can use stickers to add logos, names, slogans, images, patterns, colors, and other elements to your bus. You can also use stickers to create your own unique livery or mimic the livery of real bus operators. Stickers can make your bus look more attractive, distinctive, and personal. They can also help you express your creativity and style.

      -

      How to Download Stickers for Bus Simulator Indonesia

      -

      The sources of stickers online

      -

      There are many sources of stickers online that you can use for Bus Simulator Indonesia. Some of them are:

      -
        -
      • [JalanTikus](^2^): This website offers more than 20 stickers for Bus Simulator Indonesia that are based on real bus liveries, game posters, 3D graphics, and anime characters. You can download them for free in PNG format.
      • -
      • [Masdefi](^1^): This website provides more than 100 stickers for Bus Simulator Indonesia that are divided into five files. You can download them for free in RAR format and extract them to get the PNG files. The stickers include polosan, boneka, kaca, dashboard, and other types.
      • -
      • [YouTube](^3^): This video shows you how to download more than 60 accessories for livery skin in Bus Simulator Indonesia. You can find the download link in the description of the video. The accessories include stickers, lights, horns, mirrors, and other items.
      • -
      -

      The steps to download and save stickers in PNG format

      -

      The steps to download and save stickers in PNG format are:

      -
        -
      1. Go to the source website or video that you want to get the stickers from.
      2. -
      3. Click on the download link or button that is provided.
      4. -
      5. Choose the location where you want to save the file on your device.
      6. -
      7. If the file is in RAR format, you need to extract it using an app like WinRAR or ZArchiver.
      8. -
      9. Open the folder where you saved or extracted the file and look for the PNG files that have the sticker images.
      10. -
      11. Copy or move the PNG files to the folder where you store your stickers for Bus Simulator Indonesia. The default folder is BUSSID > Stiker.
      12. -
      -

      How to Use Stickers for Bus Simulator Indonesia

      -

      The steps to access the livery editor in the game

      -

      The steps to access the livery editor in the game are:

      -

      download stiker bussid terbaru dan terlengkap format png
      -stiker bus simulator indonesia terkeren dan terbaru
      -how to download bus livery sticker for bus simulator indonesia
      -kumpulan stiker bus simulator indonesia keren untuk game bussid
      -stiker bus simulator dari livery bus asli
      -download stiker, lampu strobo, dan aksesoris dashboard bussid png
      -stiker bus simulator 3d hd
      -stiker bus simulator anime
      -cara memasang stiker bus simulator
      -download mod bussid disini
      -stiker bus simulator terlengkap 2023
      -stiker bus simulator dari poster game
      -stiker bus simulator untuk gamers cewek
      -download stiker boneka, stiker kaca, dashboard bussid
      -stiker bus simulator dari skin bus asli
      -download mod truck fuso, hino, canter, ud quester bussid
      -stiker bus simulator dari foto bus asli
      -download mentahan sticker bussid polosan
      -stiker bus simulator dari logo game
      -stiker bus simulator dari wallpaper game
      -download livery bussid shd, hd, xhd, sdd, jetbus, jb3+
      -stiker bus simulator dari meme game
      -stiker bus simulator dari karakter game
      -download template bussid untuk membuat livery sendiri
      -stiker bus simulator dari nama game
      -download sound bussid klakson telolet, suara mesin, suara knalpot
      -stiker bus simulator dari slogan game
      -stiker bus simulator dari warna game
      -download traffic bussid mobil sport, angkot, truk, motor, polisi
      -stiker bus simulator dari genre game
      -download map bussid sumatera, jawa, bali, sulawesi, kalimantan
      -stiker bus simulator dari rating game
      -stiker bus simulator dari developer game maleo
      -download cheat bussid unlimited money, unlock all items, no ads
      -stiker bus simulator dari review game
      -download update bussid terbaru versi 3.6.1 apk mod obb data
      -stiker bus simulator dari fitur game multiplayer online offline mode
      -download tutorial bussid cara membuat livery, modifikasi bus, mengganti sound
      -stiker bus simulator dari tips dan trik game cara mendapatkan uang banyak
      -download video bussid gameplay, showcase mod, live streaming
      -stiker bus simulator dari komunitas game grup facebook, whatsapp, telegram
      -download aplikasi bussid editor untuk edit livery dan sticker
      -stiker bus simulator dari event game kontes livery dan mod
      -download tema bussid untuk launcher android
      -stiker bus simulator dari inspirasi game simulasi lainnya
      -download emulator bussid untuk pc windows 10 8 7 mac os
      -stiker bus simulator dari rekomendasi game serupa truck simulator indonesia
      -download link alternatif bussid google play store app store apkpure uptodown

      -
        -
      1. Open the Bus Simulator Indonesia game on your device.
      2. -
      3. Tap on the Garage icon on the main menu.
      4. -
      5. Tap on the Livery icon on the bottom left corner of the screen.
      6. -
      7. Choose the bus that you want to customize and tap on the Edit icon on the bottom right corner of the screen.
      8. -
      9. You will see the livery editor interface where you can apply stickers and other accessories to your bus.
      10. -
      -

      The steps to apply stickers to your bus

      -

      The steps to apply stickers to your bus are:

      -
        -
      1. In the livery editor interface, tap on the Sticker icon on the top left corner of the screen.
      2. -
      3. You will see a list of stickers that you have downloaded or created. Tap on the sticker that you want to use.
      4. -
      5. You can adjust the size, position, rotation, and opacity of the sticker by using the sliders and buttons on the bottom of the screen.
      6. -
      7. You can also duplicate, delete, or flip the sticker by tapping on the icons on the top right corner of the screen.
      8. -
      9. You can apply multiple stickers to different parts of your bus by repeating these steps.
      10. -
      11. When you are done, tap on the Save icon on the top left corner of the screen to save your livery.
      12. -
      -

      Some Examples of Stickers for Bus Simulator Indonesia

      -

      Stickers based on real bus liveries

      -

      Some stickers are based on real bus liveries that are used by actual bus operators in Indonesia. These stickers can make your bus look more realistic and authentic. Some examples of these stickers are:

      - - - - - - - -
      Sticker NameBus OperatorDescription
      Sinar JayaSinar Jaya GroupA sticker that has a red and white color scheme with a star logo and a Sinar Jaya name.
      Rosalia IndahRosalia Indah GroupA sticker that has a blue and white color scheme with a flower logo and a Rosalia Indah name.
      Pahala KencanaPahala Kencana GroupA sticker that has a yellow and green color scheme with a lion logo and a Pahala Kencana name.
      Lorena KarinaLorena Karina GroupA sticker that has a purple and white color scheme with a butterfly logo and a Lorena Karina name.
      Haryanto 001Haryanto GroupA sticker that has a black and gold color scheme with a dragon logo and a Haryanto 001 name.
      -

      Stickers based on game posters and 3D graphics

      -

      Some stickers are based on game posters and 3D graphics that are related to Bus Simulator Indonesia or other games. These stickers can make your bus look more cool and fun. Some examples of these stickers are:

      - - - - - - - - -
      Sticker NameGame Poster or GraphicDescription
      Bussid Poster 1The official poster of Bus Simulator Indonesia.A sticker that has an image of a bus driving on a road with mountains, trees, and clouds in the background. It also has a Bus Simulator Indonesia logo and name.
      Bussid Poster 2The official poster of Bus Simulator Indonesia.A sticker that has an image of a bus driving on a bridge with skyscrapers, cars, and boats in the background. It also has a Bus Simulator Indonesia logo and name.
      Bussid 3D Graphic 1A 3D graphic created by JalanTikus.[^2 ^)]A sticker that has a 3D image of a bus with a Bus Simulator Indonesia logo and name on it. It also has a JalanTikus logo and name on the bottom.
      Bussid 3D Graphic 2A 3D graphic created by JalanTikus.A sticker that has a 3D image of a bus with a Bus Simulator Indonesia logo and name on it. It also has a JalanTikus logo and name on the top.
      GTA V PosterThe official poster of Grand Theft Auto V.A sticker that has an image of three characters from the game holding guns and money. It also has a Grand Theft Auto V logo and name.
      Need for Speed PosterThe official poster of Need for Speed: Heat.A sticker that has an image of a car racing on a street with flames and neon lights. It also has a Need for Speed: Heat logo and name.
      -

      Stickers based on anime characters and themes

      -

      Some stickers are based on anime characters and themes that are popular among fans. These stickers can make your bus look more cute and colorful. Some examples of these stickers are:

      - - - - - - - - -
      Sticker NameAnime Character or ThemeDescription
      Naruto StickerNaruto Uzumaki from Naruto.A sticker that has an image of Naruto Uzumaki, the main protagonist of the anime series Naruto. He is wearing his orange jumpsuit and holding a kunai. He also has his signature whisker marks and headband.
      Sakura StickerSakura Haruno from Naruto.A sticker that has an image of Sakura Haruno, one of the main supporting characters of the anime series Naruto. She is wearing her pink dress and gloves and holding a shuriken. She also has her signature pink hair and forehead protector.
      Luffy StickerMonkey D. Luffy from One Piece.A sticker that has an image of Monkey D. Luffy, the main protagonist of the anime series One Piece. He is wearing his red vest and straw hat and holding a meat bone. He also has his signature scar under his eye and smile.
      Zoro StickerRoronoa Zoro from One Piece.A sticker that has an image of Roronoa Zoro, one of the main supporting characters of the anime series One Piece. He is wearing his green coat and bandana and holding three swords. He also has his signature earrings and scar on his chest.
      Tanjiro StickerTanjiro Kamado from Demon Slayer: Kimetsu no Yaiba.A sticker that has an image of Tanjiro Kamado, the main protagonist of the anime series Demon Slayer: Kimetsu no Yaiba. He is wearing his black and green checkered haori and holding his nichirin blade. He also has his signature scar on his forehead and earrings.
      Nezuko StickerNezuko Kamado from Demon Slayer: Kimetsu no Yaiba.A sticker that has an image of Nezuko Kamado, one of the main supporting characters of the anime series Demon Slayer: Kimetsu no Yaiba. She is wearing her pink kimono and bamboo muzzle and holding her brother's hand. She also has her signature pink eyes and hair.
      -

      Conclusion and FAQs

      -

      In conclusion, stickers are a great way to customize your bus in Bus Simulator Indonesia. You can download stickers from various sources online and use them to create your own livery or copy the livery of real bus operators. You can also use stickers based on game posters, 3D graphics, anime characters, and themes to make your bus look more cool and fun. Stickers can help you express your creativity and style in the game. We hope this article has helped you learn how to download and use stickers for Bus Simulator Indonesia.

      -

      Here are some frequently asked questions about stickers for Bus Simulator Indonesia:

      -
        -
      • Q: How can I create my own stickers for Bus Simulator Indonesia?
      • -
      • A: You can create your own stickers for Bus Simulator Indonesia by using an app like PicsArt, Photoshop, or GIMP. You need to create a PNG file with a transparent background and a size of 512 x 512 pixels. You can draw or paste any image or text that you want on the file. Then, you need to save the file and copy or move it to the folder where you store your stickers for Bus Simulator Indonesia.
      • -
      • Q: How can I share my stickers with other players?
      • -
      • A: You can share your stickers with other players by uploading them to a website or a social media platform that allows file sharing. You can also use an app like Google Drive, Dropbox, or MediaFire to upload your files and generate a download link. Then, you can share the link with other players who want to download your stickers.
      • -
      • Q: How can I remove stickers from my bus?
      • -
      • A: You can remove stickers from your bus by accessing the livery editor in the game and tapping on the sticker that you want to remove. Then, you can tap on the Delete icon on the top right corner of the screen. You can also remove all stickers from your bus by tapping on the Clear icon on the top left corner of the screen.
      • -
      • Q: How can I get more buses for Bus Simulator Indonesia?
      • -
      • A: You can get more buses for Bus Simulator Indonesia by downloading mods from various sources online. Mods are modifications that add new buses, maps, features, and other elements to the game. You need to download the mod files and copy or move them to the folder where you store your mods for Bus Simulator Indonesia. The default folder is BUSSID > Mods.
      • -
      • Q: How can I update Bus Simulator Indonesia?
      • -
      • A: You can update Bus Simulator Indonesia by going to the Google Play Store or the App Store and checking if there is a new version available. If there is, you can tap on the Update button and wait for the download and installation to finish. You can also enable automatic updates for the game in your device settings.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Import Csv To Sql Server C Net Download __EXCLUSIVE__.md b/spaces/tioseFevbu/cartoon-converter/scripts/Import Csv To Sql Server C Net Download __EXCLUSIVE__.md deleted file mode 100644 index 1098cda3ab0e64c18f0271860f5ce0a2f2717b52..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Import Csv To Sql Server C Net Download __EXCLUSIVE__.md +++ /dev/null @@ -1,94 +0,0 @@ - -

      How to Import CSV Files to SQL Server using C#.NET

      -

      CSV files are a common format for storing and exchanging data, especially when dealing with large datasets. However, sometimes you may need to import CSV files to SQL Server for further analysis or processing. In this article, we will show you how to import CSV files to SQL Server using C#.NET, a popular programming language for web development.

      -

      To import CSV files to SQL Server using C#.NET, you will need the following:

      -

      import csv to sql server c net download


      Download === https://urlcod.com/2uHvHp



      -
        -
      • A CSV file that contains the data you want to import. For this example, we will use a file named "wine.csv" that contains information about different types of wine.
      • -
      • A SQL Server database that has a table with the same structure as the CSV file. For this example, we will use a database named "WineDB" and a table named "Wine" with the following columns: Id, Name, Country, Region, Alcohol, Price.
      • -
      • A C#.NET project that can connect to the SQL Server database and execute SQL commands. For this example, we will use Visual Studio 2019 and the System.Data.SqlClient namespace.
      • -
      -

      The steps to import CSV files to SQL Server using C#.NET are as follows:

      -
        -
      1. Read the CSV file and store the data in a DataTable object. You can use the StreamReader class to read the CSV file line by line, and use the String.Split method to split each line by the comma delimiter. Then, you can use the DataTable class to create a DataTable object and add columns and rows based on the CSV file data.
      2. -
      3. Use the SqlBulkCopy class to bulk insert the DataTable data into the SQL Server table. You can use the SqlBulkCopy class to efficiently transfer data from a DataTable object to a SQL Server table. You need to specify the connection string, the destination table name, and the column mappings between the DataTable and the table.
      4. -
      5. Close the StreamReader and SqlBulkCopy objects and dispose of any resources they use.
      6. -
      -

      The following code snippet shows how to import CSV files to SQL Server using C#.NET:

      - -```csharp -using System; -using System.Data; -using System.Data.SqlClient; -using System.IO; - -namespace ImportCSV - - class Program - - static void Main(string[] args) - - // The path of the CSV file - string csvFilePath = @"C:\wine.csv"; - - // The connection string of the SQL Server database - string connectionString = @"Server=localhost;Database=WineDB;Integrated Security=True;"; - - // The name of the destination table in the database - string tableName = "Wine"; - - // Create a DataTable object to store the CSV data - DataTable dt = new DataTable(); - - // Create a StreamReader object to read the CSV file - using (StreamReader sr = new StreamReader(csvFilePath)) - - // Read the first line of the CSV file as the header - string[] headers = sr.ReadLine().Split(','); - - // Add columns to the DataTable based on the header - foreach (string header in headers) - - dt.Columns.Add(header); - - - // Read the rest of the lines of the CSV file as the data - while (!sr.EndOfStream) - - // Split each line by comma delimiter - string[] rows = sr.ReadLine().Split(','); - - // Add rows to the DataTable based on the data - DataRow dr = dt.NewRow(); - for (int i = 0; i < headers.Length; i++) - - dr[i] = rows[i]; - - dt.Rows.Add(dr); - - - - // Create a SqlBulkCopy object to bulk insert the data into the database - using (SqlBulkCopy sbc = new SqlBulkCopy(connectionString)) - - // Specify the destination table name - sbc.DestinationTableName = tableName; - - // Specify the column mappings between the DataTable and the table - foreach (DataColumn dc in dt.Columns) - - sbc.ColumnMappings.Add(dc.ColumnName, dc.ColumnName); - - - // Write the data from the DataTable to the table - sbc.WriteToServer(dt); - - - Console.WriteLine("CSV file imported successfully."); - - - -```

      -

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch- __HOT__.md b/spaces/tioseFevbu/cartoon-converter/scripts/Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch- __HOT__.md deleted file mode 100644 index 9a95cbece72db82b0c73ea4015f9a1b1feee2963..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch- __HOT__.md +++ /dev/null @@ -1,36 +0,0 @@ -
      -I can try to help you with that. Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch-": - -```html -

      How to Update QuickBooks Enterprise Accountant 2016 to the Latest Version

      -

      If you are using QuickBooks Enterprise Accountant 2016, you might be wondering how to update it to the latest version. Updating your software can help you enjoy new features, improved performance, and enhanced security. In this article, we will show you how to update QuickBooks Enterprise Accountant 2016 to the latest version using a patch file.

      -

      Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch-


      Download File ✫✫✫ https://urlcod.com/2uHxai



      -

      What is QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch-

      -

      QuickBooks Enterprise Accountant 2016 is a powerful accounting software designed for small and medium-sized businesses. It offers advanced features such as inventory management, custom reporting, user permissions, and more. However, Intuit has a 3-year lifecycle policy for its products, which means that QuickBooks Enterprise Accountant 2016 will no longer be supported after May 31, 2019.

      -

      To continue using QuickBooks Enterprise Accountant 2016, you need to update it to the latest version using a patch file. A patch file is a small program that fixes bugs and improves functionality in your software. The patch file for QuickBooks Enterprise Accountant 2016 is called "Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch-". It is available for download from various online sources.

      -

      How to Update QuickBooks Enterprise Accountant 2016 to the Latest Version Using a Patch File

      -

      Before you update your software, make sure that you have a backup of your company file and that all users are logged out of QuickBooks. Also, make sure that your computer meets the system requirements for the latest version of QuickBooks Enterprise Accountant.

      -

      Here are the steps to update QuickBooks Enterprise Accountant 2016 to the latest version using a patch file:

      -

      -
        -
      1. Download the patch file from a reliable source. You can search for "Intuit QuickBooks Enterprise Accountant 2016 16.0 R3 Incl Patch-" on Google or other search engines and choose a reputable website to download it from.
      2. -
      3. Save the patch file to your desktop or another convenient location.
      4. -
      5. Run the patch file by double-clicking on it. Follow the on-screen instructions to install it.
      6. -
      7. Open your new version of QuickBooks Enterprise Accountant by clicking on the desktop icon or from the Start menu.
      8. -
      9. Go to the File menu and select Open or Restore Company.
      10. -
      11. Click Restore a backup copy and then click Local backup.
      12. -
      13. Browse your computer for your company file backup. You should have created a backup before updating your software.
      14. -
      15. Select your company file backup and then click Open.
      16. -
      17. Sign in to your company file as an admin user.
      18. -
      19. QuickBooks will automatically create a backup of your existing company file before upgrading it. You can change the default location of the backup if you want.
      20. -
      21. Click Update Now to start the upgrade process.
      22. -
      23. Wait for the upgrade process to complete. It may take some time depending on the size of your company file and your computer speed.
      24. -
      25. Once the upgrade is done, click Done.
      26. -
      27. You can now use your updated version of QuickBooks Enterprise Accountant with your company file.
      28. -
      -

      Conclusion

      -

      Updating your software is important to keep it running smoothly and securely. By following the steps above, you can update QuickBooks Enterprise Accountant 2016 to the latest version using a patch file. This will allow you to enjoy new features and enhancements in your accounting software. If you need any help with updating your software or using QuickBooks Enterprise Accountant, you can contact Intuit support or visit their website for more information.

      - -```

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/tom-doerr/logo_generator/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py b/spaces/tom-doerr/logo_generator/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py deleted file mode 100644 index f6103a890bbefcc6a01ef875463d2a24c6b20de7..0000000000000000000000000000000000000000 --- a/spaces/tom-doerr/logo_generator/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py +++ /dev/null @@ -1,442 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""JAX Ops for symmetric matrices used by the Shampoo optimizer.""" - -import functools -from typing import Any, List, Optional, Sequence, Union - -import jax -import jax.numpy as jnp -import numpy as np -from flax import struct -from jax import lax - - -@struct.dataclass -class SlicedSymmetricMatrix: - """A symmetric matrix represented by lower-triangular block row slices. - - For example, the symmetric matrix M = [[a, b^T], [b, c]] would be represented - by the block rows a and [b, c]. - - The matrix may be batched, in which case each entry of block_rows may have - dimension greater than 2. The last two dimensions represent the rows and cols. - """ - - block_rows: List[jnp.ndarray] - - -def product_with_transpose( - mat1, - mat2, - axes, - precision=lax.Precision.DEFAULT, -): - """Returns mat1 * mat2^T for two matrices (possibly batched). - - The rows and columns are the last two dimensions for each matrix. - - Args: - mat1: First matrix. - mat2: Second matrix. - axes: The axes over which to apply the product. - precision: JAX precision to use for the multiplication. - """ - return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision) - - -@functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision")) -def sliced_transposed_product( - mat, - block_size, - axes=(-1,), - precision=lax.Precision.DEFAULT, -): - """Returns the blocked slices representing a symmetric contraction. - - Specifically, the output is a contraction of the input mat with itself, in the - specified axes. - - Args: - mat: The matrix for which we will compute a contraction with itself. - block_size: The size of row blocks to compute. - axes: Axes to use for the contraction. - precision: The precision to use in each computation. - - Raises: - ValueError: Raised when the specified block size does not evenly divide - the number of rows of the input mat. - """ - rank = len(mat.shape) - - def _make_axis_positive(ax): - assert -rank <= ax < rank - return ax + rank if ax < 0 else ax - - positive_axes = [_make_axis_positive(ax) for ax in axes] - assert len(positive_axes) == len(axes) - remaining_axes = set(range(rank)) - set(positive_axes) - assert len(remaining_axes) == 1 - remaining_ax = remaining_axes.pop() - - num_rows = mat.shape[remaining_ax] - if num_rows % block_size != 0: - raise ValueError( - "The row dimension must be divisible by block_size. " - f"Instead got row dimension={num_rows} and block_size={block_size}." - ) - - block_rows = [] - for i in range(num_rows // block_size): - start_indices = [0] * rank - start_indices[remaining_ax] = i * block_size - - slice_sizes = list(mat.shape) - slice_sizes[remaining_ax] = block_size - - slice_sizes_full = list(mat.shape) - slice_sizes_full[remaining_ax] = (i + 1) * block_size - - block_rows.append( - product_with_transpose( - lax.dynamic_slice( - mat, start_indices=start_indices, slice_sizes=slice_sizes - ), - lax.dynamic_slice( - mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full - ), - axes=(axes, axes), - precision=precision, - ) - ) - - return SlicedSymmetricMatrix(block_rows=block_rows) - - -@functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision")) -def sliced_transposed_product_concat( - mat, - block_size, - axes=(-1,), - precision=lax.Precision.DEFAULT, -): - """Returns the concatenated slices representing mat*mat^T. - - Args: - mat: The matrix for which we will compute mat*mat^T. It does not need to be - square, and may be batched. - block_size: The size of row blocks to compute. - axes: Axes to use for the contraction. - precision: The precision to use in each computation. - - Raises: - ValueError: Raised when the specified block size does not evenly divide - the number of rows of the input mat. - """ - sliced_symmetric_matrix = sliced_transposed_product( - mat=mat, block_size=block_size, axes=axes, precision=precision - ) - return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) - - -@jax.jit -def materialize_matrix(symmetric_matrix): - """Returns a materialized symmetric matrix. - - Args: - symmetric_matrix: the matrix represented by lower-triangular block slices. - """ - block_rows = symmetric_matrix.block_rows - block_size = block_rows[0].shape[-2] - num_blocks = len(block_rows) - - # Slice the lower-triangular and diagonal blocks into blocks. - blocks = [ - [ - block_row[Ellipsis, i * block_size : (i + 1) * block_size] - for i in range(k + 1) - ] - for k, block_row in enumerate(block_rows) - ] - - # Generate the (off-diagonal) upper-triangular blocks. - off_diags = [[] for _ in range(num_blocks - 1)] - for k, block_row in enumerate(block_rows[1:]): - for i in range(k + 1): - off_diags[i].append( - jnp.swapaxes( - a=block_row[Ellipsis, i * block_size : (i + 1) * block_size], - axis1=-1, - axis2=-2, - ) - ) - - return jnp.block( - [row + row_t for row, row_t in zip(blocks[:-1], off_diags)] + [blocks[-1]] - ) - - -@functools.partial(jax.jit, static_argnames=("num_blocks")) -def materialize_matrix_from_concat( - block_rows_concat, - num_blocks=None, -): - """Returns a materialized symmetric matrix from concatenated slices. - - Args: - block_rows_concat: The matrix represented as the concatenated - lower-triangular blocks. - num_blocks: The number of block-rows used to represent the symmetric matrix. - If not specified, it is inferred from the shape of block_rows_concat. - """ - if num_blocks is None: - num_blocks = find_num_blocks(block_rows_concat) - - block_size = block_rows_concat.shape[-2] - - block_rows = [ - block_rows_concat[ - Ellipsis, - (k * (k + 1)) - // 2 - * block_size : (((k + 1) * (k + 2)) // 2 + 1) - * block_size, - ] - for k in range(num_blocks) - ] - - return materialize_matrix(SlicedSymmetricMatrix(block_rows=block_rows)) - - -@functools.partial(jax.jit, static_argnames=("alpha", "beta", "axes")) -def update_sliced_rows( - symmetric_matrix, - mat, - alpha, - beta, - axes=(-1,), -): - """Implements the blocked equivalent of SYRK. - - Specifically, the symmetric matrix (represented using lower-triangular block - rows) is updated using the sliced product of mat. - - Args: - symmetric_matrix: The symmetric matrix to update. - mat: The matrix to use for the update = mat * mat^T. The number of rows - should match that of symmetric_matrix. - alpha: The weight for the update. - beta: The weight for the original symmetric matrix. - axes: Axes to use for the contraction of the update. - - Returns: - The updated rows of alpha * mat * mat^T + beta * symmetric_matrix. - """ - block_size = symmetric_matrix.block_rows[0].shape[-2] - sym_prod = sliced_transposed_product(mat=mat, block_size=block_size, axes=axes) - return SlicedSymmetricMatrix( - block_rows=[ - update * alpha + row * beta - for update, row in zip(sym_prod.block_rows, symmetric_matrix.block_rows) - ] - ) - - -def num_blocks_from_total_blocks(total_blocks): - """Returns the number of blocks (i.e. - - block rows) from the total blocks. - - This is the inverse of the function x -> x*(x+1)/2. - - For example, the matrix M = [[A, B^T], [B, C]] may be represented using a - total of 3 blocks ([A, B, C]). The number of corresponding block rows is 2. - - Args: - total_blocks: The total blocks used to represent the matrix. - """ - num_blocks = np.round((np.sqrt(8 * total_blocks + 1) - 1) / 2).astype(np.int32) - if (num_blocks * (num_blocks + 1)) / 2 != total_blocks: - raise ValueError( - f"total_blocks={total_blocks} does not correspond to " - "a symmetric matrix. It must have the form total_blocks = x*(x+1)/2." - ) - return num_blocks - - -def find_num_blocks(block_rows_concat): - """Returns the number of (row) blocks representing the concatenated matrix. - - For example, an input with dimensions [256, 2560] represents 10 square blocks, - which matches 4 lower-triangular block rows (1+2+3+4). So this function will - return 4. - - Use ordinary numpy functions here so that the returned value is static. - - Args: - block_rows_concat: The concatenated block array. - - Raises: - ValueError: When the dimensions of the matrix do not correspond to a lower - triangular block representation. - """ - # Compute the number of square blocks used to represent the matrix. - total_blocks = block_rows_concat.shape[-1] / block_rows_concat.shape[-2] - # Determine the number of block rows by inverting y = x*(x+1)/2. - return num_blocks_from_total_blocks(total_blocks) - - -@functools.partial(jax.jit, static_argnames=("block_size")) -def slice_symmetric_matrix( - mat, - block_size, -): - """Returns sliced row blocks. - - Args: - mat: A symmetric matrix. - block_size: The size of the row slices. - """ - num_rows = mat.shape[-2] - num_cols = mat.shape[-1] - if num_rows != num_cols: - raise ValueError("mat is not square.") - if num_rows % block_size != 0: - raise ValueError( - "block size does not evenly divide rows. " - f"num_rows={num_rows}, block_size={block_size}" - ) - return SlicedSymmetricMatrix( - block_rows=[ - mat[ - Ellipsis, - i * block_size : (i + 1) * block_size, - 0 : (i + 1) * block_size, - ] - for i in range(num_rows // block_size) - ] - ) - - -@functools.partial(jax.jit, static_argnames=("block_size")) -def slice_symmetric_matrix_concat( - mat, - block_size, -): - """Returns the concatenated sliced row blocks. - - Args: - mat: A symmetric matrix. - block_size: The size of the row slices. - """ - sliced_symmetric_matrix = slice_symmetric_matrix(mat=mat, block_size=block_size) - return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) - - -def sliced_matrix_diag(mat): - """Returns the diagonal of the symmetric matrix. - - Args: - mat: The symmetric matrix represented in concatenated block form. - """ - rows, cols = mat.shape - total_blocks = cols // rows - num_blocks = num_blocks_from_total_blocks(total_blocks) - diags = [] - for i in range(num_blocks): - last_index = rows * ((i + 2) * (i + 1)) // 2 - first_index = last_index - rows - diags.append(jnp.diag(mat[Ellipsis, first_index:last_index])) - return jnp.concatenate(diags, axis=-1) - - -def diag_as_concat(diag, block_size): - """Returns the representation of a diagonal matrix in symmetric block form. - - Args: - diag: The 1D array for the diagonals. - block_size: The size of blocks to use. Must divide the length of diag. - """ - assert len(diag.shape) == 1 # diag must be 1D. - assert len(diag) % block_size == 0 - num_diag_blocks = len(diag) // block_size - blocks = [] - for i in range(num_diag_blocks): - blocks.append(jnp.zeros(shape=(block_size, block_size * i), dtype=diag.dtype)) - blocks.append(jnp.diag(diag[i * block_size : (i + 1) * block_size])) - return jnp.concatenate(blocks, axis=-1) - - -def row_abs_maxes(mat): - """Returns the max of the absolute values of the rows of the full matrix. - - For example the symmetric matrix M = [[1, 6], [6, 2]] is represented using - mat = [1, 6, 2] with block_size = 1. In this case the function returns the - aboslute row maxes of the original symmetric matrix, [6, 6]. - - Args: - mat: The symmetric matrix represented as the concatenated blocks. - """ - rows, cols = mat.shape - - # Find col and row max for each block. - col_maxes = [] - row_maxes = [] - for i in range(cols // rows): - block = jnp.abs(mat[Ellipsis, i * rows : (i + 1) * rows]) - col_maxes.append(jnp.max(block, axis=1)) - row_maxes.append(jnp.max(block, axis=0)) - - # global row max from block maxes. - num_blocks = num_blocks_from_total_blocks(cols // rows) - maxes = [] - for i in range(num_blocks): - maxes.append( - jnp.concatenate( - row_maxes[(i * (i + 1) // 2) : ((i + 2) * (i + 1) // 2)] - + [ - col_maxes[((j + 1) * (j + 2)) // 2 - (j - i + 1)] - for j in range(i + 1, num_blocks) - ], - axis=-1, - ) - ) - - return jnp.max(jnp.stack(maxes), axis=0) - - -def times_vector(mat, vec): - """Returns the symmetric block-concatenated matrix multiplied by a vector. - - Specifically, each value in the vector is multiplied by a row of the full - matrix. That is, the vector is broadcast and multiplied element-wise. Note - this would be the transpose of full_mat * vec if full_mat represented the full - symmetric matrix. - - Args: - mat: The symmetric matrix represented as the concatenated blocks. - vec: The vector, having the same dimension as the materialized matrix. - """ - rows, cols = mat.shape - num_blocks = num_blocks_from_total_blocks(cols // rows) - multiplied = [] - for i in range(num_blocks): - mat_block = mat[ - Ellipsis, rows * ((i + 1) * i) // 2 : rows * ((i + 1) * (i + 2)) // 2 - ] - vec_block = vec[Ellipsis, rows * i : rows * (i + 1)] - multiplied.append(jnp.einsum("...ij,...i->ij", mat_block, vec_block)) - return jnp.concatenate(multiplied, axis=-1) diff --git a/spaces/tomandandy/MusicGen3/audiocraft/modules/conditioners.py b/spaces/tomandandy/MusicGen3/audiocraft/modules/conditioners.py deleted file mode 100644 index 82792316024b88d4c5c38b0a28f443627771d509..0000000000000000000000000000000000000000 --- a/spaces/tomandandy/MusicGen3/audiocraft/modules/conditioners.py +++ /dev/null @@ -1,990 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from itertools import chain -import logging -import math -import random -import re -import typing as tp -import warnings - -from einops import rearrange -from num2words import num2words -import spacy -from transformers import T5EncoderModel, T5Tokenizer # type: ignore -import torchaudio -import torch -from torch import nn -from torch import Tensor -import torch.nn.functional as F -from torch.nn.utils.rnn import pad_sequence - -from .streaming import StreamingModule -from .transformer import create_sin_embedding -from ..data.audio_dataset import SegmentInfo -from ..utils.autocast import TorchAutocast -from ..utils.utils import hash_trick, length_to_mask, collate - - -logger = logging.getLogger(__name__) -TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) -ConditionType = tp.Tuple[Tensor, Tensor] # condition, mask - - -class WavCondition(tp.NamedTuple): - wav: Tensor - length: Tensor - path: tp.List[tp.Optional[str]] = [] - - -def nullify_condition(condition: ConditionType, dim: int = 1): - """This function transforms an input condition to a null condition. - The way it is done by converting it to a single zero vector similarly - to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. - - Args: - condition (ConditionType): a tuple of condition and mask (tp.Tuple[Tensor, Tensor]) - dim (int): the dimension that will be truncated (should be the time dimension) - WARNING!: dim should not be the batch dimension! - Returns: - ConditionType: a tuple of null condition and mask - """ - assert dim != 0, "dim cannot be the batch dimension!" - assert type(condition) == tuple and \ - type(condition[0]) == Tensor and \ - type(condition[1]) == Tensor, "'nullify_condition' got an unexpected input type!" - cond, mask = condition - B = cond.shape[0] - last_dim = cond.dim() - 1 - out = cond.transpose(dim, last_dim) - out = 0. * out[..., :1] - out = out.transpose(dim, last_dim) - mask = torch.zeros((B, 1), device=out.device).int() - assert cond.dim() == out.dim() - return out, mask - - -def nullify_wav(wav: Tensor) -> WavCondition: - """Create a nullified WavCondition from a wav tensor with appropriate shape. - - Args: - wav (Tensor): tensor of shape [B, T] - Returns: - WavCondition: wav condition with nullified wav. - """ - null_wav, _ = nullify_condition((wav, torch.zeros_like(wav)), dim=wav.dim() - 1) - return WavCondition( - wav=null_wav, - length=torch.tensor([0] * wav.shape[0], device=wav.device), - path=['null_wav'] * wav.shape[0] - ) - - -@dataclass -class ConditioningAttributes: - text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) - wav: tp.Dict[str, WavCondition] = field(default_factory=dict) - - def __getitem__(self, item): - return getattr(self, item) - - @property - def text_attributes(self): - return self.text.keys() - - @property - def wav_attributes(self): - return self.wav.keys() - - @property - def attributes(self): - return {"text": self.text_attributes, "wav": self.wav_attributes} - - def to_flat_dict(self): - return { - **{f"text.{k}": v for k, v in self.text.items()}, - **{f"wav.{k}": v for k, v in self.wav.items()}, - } - - @classmethod - def from_flat_dict(cls, x): - out = cls() - for k, v in x.items(): - kind, att = k.split(".") - out[kind][att] = v - return out - - -class SegmentWithAttributes(SegmentInfo): - """Base class for all dataclasses that are used for conditioning. - All child classes should implement `to_condition_attributes` that converts - the existing attributes to a dataclass of type ConditioningAttributes. - """ - def to_condition_attributes(self) -> ConditioningAttributes: - raise NotImplementedError() - - -class Tokenizer: - """Base class for all tokenizers - (in case we want to introduce more advances tokenizers in the future). - """ - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - raise NotImplementedError() - - -class WhiteSpaceTokenizer(Tokenizer): - """This tokenizer should be used for natural language descriptions. - For example: - ["he didn't, know he's going home.", 'shorter sentence'] => - [[78, 62, 31, 4, 78, 25, 19, 34], - [59, 77, 0, 0, 0, 0, 0, 0]] - """ - PUNCTUATIONS = "?:!.,;" - - def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", - lemma: bool = True, stopwords: bool = True) -> None: - self.n_bins = n_bins - self.pad_idx = pad_idx - self.lemma = lemma - self.stopwords = stopwords - try: - self.nlp = spacy.load(language) - except IOError: - spacy.cli.download(language) # type: ignore - self.nlp = spacy.load(language) - - @tp.no_type_check - def __call__( - self, - texts: tp.List[tp.Optional[str]], - return_text: bool = False - ) -> tp.Tuple[Tensor, Tensor]: - """Take a list of strings and convert them to a tensor of indices. - - Args: - texts (tp.List[str]): List of strings. - return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. - Returns: - tp.Tuple[Tensor, Tensor]: - - Indices of words in the LUT. - - And a mask indicating where the padding tokens are - """ - output, lengths = [], [] - texts = deepcopy(texts) - for i, text in enumerate(texts): - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(Tensor([self.pad_idx])) - lengths.append(0) - continue - - # convert numbers to words - text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore - # normalize text - text = self.nlp(text) # type: ignore - # remove stopwords - if self.stopwords: - text = [w for w in text if not w.is_stop] # type: ignore - # remove punctuations - text = [w for w in text if w.text not in self.PUNCTUATIONS] # type: ignore - # lemmatize if needed - text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore - - texts[i] = " ".join(text) - lengths.append(len(text)) - # convert to tensor - tokens = Tensor([hash_trick(w, self.n_bins) for w in text]) - output.append(tokens) - - mask = length_to_mask(torch.IntTensor(lengths)).int() - padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() - if return_text: - return padded_output, mask, texts # type: ignore - return padded_output, mask - - -class NoopTokenizer(Tokenizer): - """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. - The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split - strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will - split it to ["Jeff", "Buckley"] and return an index per word. - - For example: - ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] - ["Metal", "Rock", "Classical"] => [0, 223, 51] - """ - def __init__(self, n_bins: int, pad_idx: int = 0): - self.n_bins = n_bins - self.pad_idx = pad_idx - - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]: - output, lengths = [], [] - for text in texts: - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(self.pad_idx) - lengths.append(0) - else: - output.append(hash_trick(text, self.n_bins)) - lengths.append(1) - - tokens = torch.LongTensor(output).unsqueeze(1) - mask = length_to_mask(torch.IntTensor(lengths)).int() - return tokens, mask - - -class BaseConditioner(nn.Module): - """Base model for all conditioner modules. We allow the output dim to be different - than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; - 2) make all condition dims consistent. - - Args: - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - """ - def __init__(self, dim, output_dim): - super().__init__() - self.dim = dim - self.output_dim = output_dim - self.output_proj = nn.Linear(dim, output_dim) - - def tokenize(self, *args, **kwargs) -> tp.Any: - """Should be any part of the processing that will lead to a synchronization - point, e.g. BPE tokenization with transfer to the GPU. - - The returned value will be saved and return later when calling forward(). - """ - raise NotImplementedError() - - def forward(self, inputs: tp.Any) -> ConditionType: - """Gets input that should be used as conditioning (e.g, genre, description or a waveform). - Outputs a ConditionType, after the input data was embedded as a dense vector. - - Returns: - ConditionType: - - A tensor of size [B, T, D] where B is the batch size, T is the length of the - output embedding and D is the dimension of the embedding. - - And a mask indicating where the padding tokens. - """ - raise NotImplementedError() - - -class TextConditioner(BaseConditioner): - ... - - -class LUTConditioner(TextConditioner): - """Lookup table TextConditioner. - - Args: - n_bins (int): Number of bins. - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - tokenizer (str): Name of the tokenizer. - pad_idx (int, optional): Index for padding token. Defaults to 0. - """ - def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): - super().__init__(dim, output_dim) - self.embed = nn.Embedding(n_bins, dim) - self.tokenizer: Tokenizer - if tokenizer == "whitespace": - self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) - elif tokenizer == "noop": - self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) - else: - raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - device = self.embed.weight.device - tokens, mask = self.tokenizer(x) - tokens, mask = tokens.to(device), mask.to(device) - return tokens, mask - - def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: - tokens, mask = inputs - embeds = self.embed(tokens) - embeds = self.output_proj(embeds) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class T5Conditioner(TextConditioner): - """T5-based TextConditioner. - - Args: - name (str): Name of the T5 model. - output_dim (int): Output dim of the conditioner. - finetune (bool): Whether to fine-tune T5 at train time. - device (str): Device for T5 Conditioner. - autocast_dtype (tp.Optional[str], optional): Autocast dtype. - word_dropout (float, optional): Word dropout probability. - normalize_text (bool, optional): Whether to apply text normalization. - """ - MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", - "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", - "google/flan-t5-xl", "google/flan-t5-xxl"] - MODELS_DIMS = { - "t5-small": 512, - "t5-base": 768, - "t5-large": 1024, - "t5-3b": 1024, - "t5-11b": 1024, - "google/flan-t5-small": 512, - "google/flan-t5-base": 768, - "google/flan-t5-large": 1024, - "google/flan-t5-3b": 1024, - "google/flan-t5-11b": 1024, - } - - def __init__(self, name: str, output_dim: int, finetune: bool, device: str, - autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., - normalize_text: bool = False): - assert name in self.MODELS, f"unrecognized t5 model name (should in {self.MODELS})" - super().__init__(self.MODELS_DIMS[name], output_dim) - self.device = device - self.name = name - self.finetune = finetune - self.word_dropout = word_dropout - - if autocast_dtype is None or self.device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - if self.device != 'cpu': - logger.warning("T5 has no autocast, this might lead to NaN") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # Let's disable logging temporarily because T5 will vomit some errors otherwise. - # thanks https://gist.github.com/simon-weber/7853144 - previous_level = logging.root.manager.disable - logging.disable(logging.ERROR) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - self.t5_tokenizer = T5Tokenizer.from_pretrained(name) - t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) - finally: - logging.disable(previous_level) - if finetune: - self.t5 = t5 - else: - # this makes sure that the t5 models is not part - # of the saved checkpoint - self.__dict__["t5"] = t5.to(device) - - self.normalize_text = normalize_text - if normalize_text: - self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: - # if current sample doesn't have a certain attribute, replace with empty string - entries: tp.List[str] = [xi if xi is not None else "" for xi in x] - if self.normalize_text: - _, _, entries = self.text_normalizer(entries, return_text=True) - if self.word_dropout > 0. and self.training: - new_entries = [] - for entry in entries: - words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] - new_entries.append(" ".join(words)) - entries = new_entries - - empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) - - inputs = self.t5_tokenizer(entries, return_tensors="pt", padding=True).to(self.device) - mask = inputs["attention_mask"] - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - return inputs - - def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: - mask = inputs["attention_mask"] - with torch.set_grad_enabled(self.finetune), self.autocast: - embeds = self.t5(**inputs).last_hidden_state - embeds = self.output_proj(embeds.to(self.output_proj.weight)) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class WaveformConditioner(BaseConditioner): - """Base class for all conditioners that take a waveform as input. - Classes that inherit must implement `_get_wav_embedding` that outputs - a continuous tensor, and `_downsampling_factor` that returns the down-sampling - factor of the embedding model. - - Args: - dim (int): The internal representation dimension. - output_dim (int): Output dimension. - device (tp.Union[torch.device, str]): Device. - """ - def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): - super().__init__(dim, output_dim) - self.device = device - - def tokenize(self, wav_length: WavCondition) -> WavCondition: - wav, length, path = wav_length - assert length is not None - return WavCondition(wav.to(self.device), length.to(self.device), path) - - def _get_wav_embedding(self, wav: Tensor) -> Tensor: - """Gets as input a wav and returns a dense vector of conditions.""" - raise NotImplementedError() - - def _downsampling_factor(self): - """Returns the downsampling factor of the embedding model.""" - raise NotImplementedError() - - def forward(self, inputs: WavCondition) -> ConditionType: - """ - Args: - input (WavCondition): Tuple of (waveform, lengths). - Returns: - ConditionType: Dense vector representing the conditioning along with its' mask. - """ - wav, lengths, path = inputs - with torch.no_grad(): - embeds = self._get_wav_embedding(wav) - embeds = embeds.to(self.output_proj.weight) - embeds = self.output_proj(embeds) - - if lengths is not None: - lengths = lengths / self._downsampling_factor() - mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore - else: - mask = torch.ones_like(embeds) - embeds = (embeds * mask.unsqueeze(2).to(self.device)) - - return embeds, mask - - -class ChromaStemConditioner(WaveformConditioner): - """Chroma conditioner that uses DEMUCS to first filter out drums and bass. The is followed by - the insight the drums and bass often dominate the chroma, leading to the chroma not containing the - information about melody. - - Args: - output_dim (int): Output dimension for the conditioner. - sample_rate (int): Sample rate for the chroma extractor. - n_chroma (int): Number of chroma for the chroma extractor. - radix2_exp (int): Radix2 exponent for the chroma extractor. - duration (float): Duration used during training. This is later used for correct padding - in case we are using chroma as prefix. - match_len_on_eval (bool, optional): If True then all chromas are padded to the training - duration. Defaults to False. - eval_wavs (str, optional): Path to a json egg with waveform, this waveforms are used as - conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). - Defaults to None. - n_eval_wavs (int, optional): Limits the number of waveforms used for conditioning. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for the conditioner. - **kwargs: Additional parameters for the chroma extractor. - """ - def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, - duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, - n_eval_wavs: int = 0, device: tp.Union[torch.device, str] = "cpu", **kwargs): - from demucs import pretrained - super().__init__(dim=n_chroma, output_dim=output_dim, device=device) - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.sample_rate = sample_rate - self.match_len_on_eval = match_len_on_eval - self.duration = duration - self.__dict__["demucs"] = pretrained.get_model('htdemucs').to(device) - self.stem2idx = {'drums': 0, 'bass': 1, 'other': 2, 'vocal': 3} - self.stem_idx = torch.LongTensor([self.stem2idx['vocal'], self.stem2idx['other']]).to(device) - self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, - device=device, **kwargs) - self.chroma_len = self._get_chroma_len() - - def _downsampling_factor(self): - return self.chroma.winhop - - def _get_chroma_len(self): - """Get length of chroma during training""" - dummy_wav = torch.zeros((1, self.sample_rate * self.duration), device=self.device) - dummy_chr = self.chroma(dummy_wav) - return dummy_chr.shape[1] - - @torch.no_grad() - def _get_filtered_wav(self, wav): - from demucs.apply import apply_model - from demucs.audio import convert_audio - with self.autocast: - wav = convert_audio(wav, self.sample_rate, self.demucs.samplerate, self.demucs.audio_channels) - stems = apply_model(self.demucs, wav, device=self.device) - stems = stems[:, self.stem_idx] # extract stem - stems = stems.sum(1) # merge extracted stems - stems = stems.mean(1, keepdim=True) # mono - stems = convert_audio(stems, self.demucs.samplerate, self.sample_rate, 1) - return stems - - @torch.no_grad() - def _get_wav_embedding(self, wav): - # avoid 0-size tensors when we are working with null conds - if wav.shape[-1] == 1: - return self.chroma(wav) - stems = self._get_filtered_wav(wav) - chroma = self.chroma(stems) - - if self.match_len_on_eval: - b, t, c = chroma.shape - if t > self.chroma_len: - chroma = chroma[:, :self.chroma_len] - logger.debug(f'chroma was truncated! ({t} -> {chroma.shape[1]})') - elif t < self.chroma_len: - # chroma = F.pad(chroma, (0, 0, 0, self.chroma_len - t)) - n_repeat = int(math.ceil(self.chroma_len / t)) - chroma = chroma.repeat(1, n_repeat, 1) - chroma = chroma[:, :self.chroma_len] - logger.debug(f'chroma was zero-padded! ({t} -> {chroma.shape[1]})') - return chroma - - -class ChromaExtractor(nn.Module): - """Chroma extraction class, handles chroma extraction and quantization. - - Args: - sample_rate (int): Sample rate. - n_chroma (int): Number of chroma to consider. - radix2_exp (int): Radix2 exponent. - nfft (tp.Optional[int], optional): Number of FFT. - winlen (tp.Optional[int], optional): Window length. - winhop (tp.Optional[int], optional): Window hop size. - argmax (bool, optional): Whether to use argmax. Defaults to False. - norm (float, optional): Norm for chroma normalization. Defaults to inf. - device (tp.Union[torch.device, str], optional): Device to use. Defaults to cpu. - """ - def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12, - nfft: tp.Optional[int] = None, winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None, - argmax: bool = False, norm: float = torch.inf, device: tp.Union[torch.device, str] = "cpu"): - super().__init__() - from librosa import filters - self.device = device - self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32) - self.winlen = winlen or 2 ** radix2_exp - self.nfft = nfft or self.winlen - self.winhop = winhop or (self.winlen // 4) - self.sr = sample_rate - self.n_chroma = n_chroma - self.norm = norm - self.argmax = argmax - self.window = torch.hann_window(self.winlen).to(device) - self.fbanks = torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0, - n_chroma=self.n_chroma)).to(device) - self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen, - hop_length=self.winhop, power=2, center=True, - pad=0, normalized=True).to(device) - - def forward(self, wav): - with self.autocast: - T = wav.shape[-1] - # in case we are getting a wav that was dropped out (nullified) - # make sure wav length is no less that nfft - if T < self.nfft: - pad = self.nfft - T - r = 0 if pad % 2 == 0 else 1 - wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0) - assert wav.shape[-1] == self.nfft, f'expected len {self.nfft} but got {wav.shape[-1]}' - spec = self.spec(wav).squeeze(1) - raw_chroma = torch.einsum("cf,...ft->...ct", self.fbanks, spec) - norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6) - norm_chroma = rearrange(norm_chroma, "b d t -> b t d") - - if self.argmax: - idx = norm_chroma.argmax(-1, keepdims=True) - norm_chroma[:] = 0 - norm_chroma.scatter_(dim=-1, index=idx, value=1) - - return norm_chroma - - -def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str): - """Utility function for nullifying an attribute inside an ConditioningAttributes object. - If the condition is of type "wav", then nullify it using "nullify_condition". - If the condition is of any other type, set its' value to None. - Works in-place. - """ - if condition_type not in ["text", "wav"]: - raise ValueError( - "dropout_condition got an unexpected condition type!" - f" expected 'wav' or 'text' but got '{condition_type}'" - ) - - if condition not in getattr(sample, condition_type): - raise ValueError( - "dropout_condition received an unexpected condition!" - f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" - f"but got '{condition}' of type '{condition_type}'!" - ) - - if condition_type == "wav": - wav, length, path = sample.wav[condition] - sample.wav[condition] = nullify_wav(wav) - else: - sample.text[condition] = None - - return sample - - -class DropoutModule(nn.Module): - """Base class for all dropout modules.""" - def __init__(self, seed: int = 1234): - super().__init__() - self.rng = torch.Generator() - self.rng.manual_seed(seed) - - -class AttributeDropout(DropoutModule): - """Applies dropout with a given probability per attribute. This is different from the behavior of - ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example, - "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout - where if "artist" is dropped "genre" must also be dropped. - - Args: - p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: - ... - "genre": 0.1, - "artist": 0.5, - "wav": 0.25, - ... - active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. - seed (int, optional): Random seed. - """ - def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): - super().__init__(seed=seed) - self.active_on_eval = active_on_eval - # construct dict that return the values from p otherwise 0 - self.p = {} - for condition_type, probs in p.items(): - self.p[condition_type] = defaultdict(lambda: 0, probs) - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after certain attributes were set to None. - """ - if not self.training and not self.active_on_eval: - return samples - - samples = deepcopy(samples) - - for condition_type, ps in self.p.items(): # for condition types [text, wav] - for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) - if torch.rand(1, generator=self.rng).item() < p: - for sample in samples: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"AttributeDropout({dict(self.p)})" - - -class ClassifierFreeGuidanceDropout(DropoutModule): - """Applies Classifier Free Guidance dropout, meaning all attributes - are dropped with the same probability. - - Args: - p (float): Probability to apply condition dropout during training. - seed (int): Random seed. - """ - def __init__(self, p: float, seed: int = 1234): - super().__init__(seed=seed) - self.p = p - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (tp.List[ConditioningAttributes]): List of conditions. - Returns: - tp.List[ConditioningAttributes]: List of conditions after all attributes were set to None. - """ - if not self.training: - return samples - - # decide on which attributes to drop in a batched fashion - drop = torch.rand(1, generator=self.rng).item() < self.p - if not drop: - return samples - - # nullify conditions of all attributes - samples = deepcopy(samples) - - for condition_type in ["wav", "text"]: - for sample in samples: - for condition in sample.attributes[condition_type]: - dropout_condition(sample, condition_type, condition) - - return samples - - def __repr__(self): - return f"ClassifierFreeGuidanceDropout(p={self.p})" - - -class ConditioningProvider(nn.Module): - """Main class to provide conditions given all the supported conditioners. - - Args: - conditioners (dict): Dictionary of conditioners. - merge_text_conditions_p (float, optional): Probability to merge all text sources - into a single text condition. Defaults to 0. - drop_desc_p (float, optional): Probability to drop the original description - when merging all text sources into a single text condition. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for conditioners and output condition types. - """ - def __init__( - self, - conditioners: tp.Dict[str, BaseConditioner], - merge_text_conditions_p: float = 0, - drop_desc_p: float = 0, - device: tp.Union[torch.device, str] = "cpu", - ): - super().__init__() - self.device = device - self.merge_text_conditions_p = merge_text_conditions_p - self.drop_desc_p = drop_desc_p - self.conditioners = nn.ModuleDict(conditioners) - - @property - def text_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] - - @property - def wav_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] - - @property - def has_wav_condition(self): - return len(self.wav_conditions) > 0 - - def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: - """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. - This should be called before starting any real GPU work to avoid synchronization points. - This will return a dict matching conditioner names to their arbitrary tokenized representations. - - Args: - inputs (list[ConditioningAttribres]): List of ConditioningAttributes objects containing - text and wav conditions. - """ - assert all([type(x) == ConditioningAttributes for x in inputs]), \ - "got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]" \ - f" but types were {set([type(x) for x in inputs])}" - - output = {} - text = self._collate_text(inputs) - wavs = self._collate_wavs(inputs) - - assert set(text.keys() | wavs.keys()).issubset(set(self.conditioners.keys())), \ - f"got an unexpected attribute! Expected {self.conditioners.keys()}, got {text.keys(), wavs.keys()}" - - for attribute, batch in chain(text.items(), wavs.items()): - output[attribute] = self.conditioners[attribute].tokenize(batch) - return output - - def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: - """Compute pairs of `(embedding, mask)` using the configured conditioners - and the tokenized representations. The output is for example: - - { - "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), - "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), - ... - } - - Args: - tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. - """ - output = {} - for attribute, inputs in tokenized.items(): - condition, mask = self.conditioners[attribute](inputs) - output[attribute] = (condition, mask) - return output - - def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: - """Given a list of ConditioningAttributes objects, compile a dictionary where the keys - are the attributes and the values are the aggregated input per attribute. - For example: - Input: - [ - ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), - ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), - ] - Output: - { - "genre": ["Rock", "Hip-hop"], - "description": ["A rock song with a guitar solo", "A hip-hop verse"] - } - """ - batch_per_attribute: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) - - def _merge_conds(cond, merge_text_conditions_p=0, drop_desc_p=0): - def is_valid(k, v): - k_valid = k in ['key', 'bpm', 'genre', 'moods', 'instrument'] - v_valid = v is not None and isinstance(v, (int, float, str, list)) - return k_valid and v_valid - - def process_value(v): - if isinstance(v, (int, float, str)): - return v - if isinstance(v, list): - return ", ".join(v) - else: - RuntimeError(f"unknown type for text value! ({type(v), v})") - - desc = cond.text['description'] - meta_data = "" - if random.uniform(0, 1) < merge_text_conditions_p: - meta_pairs = [f'{k}: {process_value(v)}' for k, v in cond.text.items() if is_valid(k, v)] - random.shuffle(meta_pairs) - meta_data = ". ".join(meta_pairs) - desc = desc if not random.uniform(0, 1) < drop_desc_p else None - - if desc is None: - desc = meta_data if len(meta_data) > 1 else None - else: - desc = desc.rstrip('.') + ". " + meta_data - cond.text['description'] = desc.strip() if desc else None - - if self.training and self.merge_text_conditions_p: - for sample in samples: - _merge_conds(sample, self.merge_text_conditions_p, self.drop_desc_p) - - texts = [x.text for x in samples] - for text in texts: - for condition in self.text_conditions: - batch_per_attribute[condition].append(text[condition]) - - return batch_per_attribute - - def _collate_wavs(self, samples: tp.List[ConditioningAttributes]): - """Generate a dict where the keys are attributes by which we fetch similar wavs, - and the values are Tensors of wavs according to said attribtues. - - *Note*: by the time the samples reach this function, each sample should have some waveform - inside the "wav" attribute. It should be either: - 1. A real waveform - 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) - 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) - - Args: - samples (tp.List[ConditioningAttributes]): List of ConditioningAttributes samples. - Returns: - dict: A dicionary mapping an attribute name to wavs. - """ - wavs = defaultdict(list) - lens = defaultdict(list) - paths = defaultdict(list) - out = {} - - for sample in samples: - for attribute in self.wav_conditions: - wav, length, path = sample.wav[attribute] - wavs[attribute].append(wav.flatten()) - lens[attribute].append(length) - paths[attribute].append(path) - - # stack all wavs to a single tensor - for attribute in self.wav_conditions: - stacked_wav, _ = collate(wavs[attribute], dim=0) - out[attribute] = WavCondition(stacked_wav.unsqueeze(1), - torch.cat(lens['self_wav']), paths[attribute]) # type: ignore - - return out - - -class ConditionFuser(StreamingModule): - """Condition fuser handles the logic to combine the different conditions - to the actual model input. - - Args: - fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse - each condition. For example: - { - "prepend": ["description"], - "sum": ["genre", "bpm"], - "cross": ["description"], - } - cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. - cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. - """ - FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] - - def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, - cross_attention_pos_emb_scale: float = 1.0): - super().__init__() - assert all( - [k in self.FUSING_METHODS for k in fuse2cond.keys()] - ), f"got invalid fuse method, allowed methods: {self.FUSING_MEHTODS}" - self.cross_attention_pos_emb = cross_attention_pos_emb - self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale - self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond - self.cond2fuse: tp.Dict[str, str] = {} - for fuse_method, conditions in fuse2cond.items(): - for condition in conditions: - self.cond2fuse[condition] = fuse_method - - def forward( - self, - input: Tensor, - conditions: tp.Dict[str, ConditionType] - ) -> tp.Tuple[Tensor, tp.Optional[Tensor]]: - """Fuse the conditions to the provided model input. - - Args: - input (Tensor): Transformer input. - conditions (tp.Dict[str, ConditionType]): Dict of conditions. - Returns: - tp.Tuple[Tensor, Tensor]: The first tensor is the transformer input - after the conditions have been fused. The second output tensor is the tensor - used for cross-attention or None if no cross attention inputs exist. - """ - B, T, _ = input.shape - - if 'offsets' in self._streaming_state: - first_step = False - offsets = self._streaming_state['offsets'] - else: - first_step = True - offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) - - assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ - f"given conditions contain unknown attributes for fuser, " \ - f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" - cross_attention_output = None - for cond_type, (cond, cond_mask) in conditions.items(): - op = self.cond2fuse[cond_type] - if op == "sum": - input += cond - elif op == "input_interpolate": - cond = rearrange(cond, "b t d -> b d t") - cond = F.interpolate(cond, size=input.shape[1]) - input += rearrange(cond, "b d t -> b t d") - elif op == "prepend": - if first_step: - input = torch.cat([cond, input], dim=1) - elif op == "cross": - if cross_attention_output is not None: - cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) - else: - cross_attention_output = cond - else: - raise ValueError(f"unknown op ({op})") - - if self.cross_attention_pos_emb and cross_attention_output is not None: - positions = torch.arange( - cross_attention_output.shape[1], - device=cross_attention_output.device - ).view(1, -1, 1) - pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) - cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return input, cross_attention_output diff --git a/spaces/tomofi/MMOCR/docs/en/stats.py b/spaces/tomofi/MMOCR/docs/en/stats.py deleted file mode 100644 index 3dee5929448279f503e6f83cf3da10f61fe7c59f..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/docs/en/stats.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) OpenMMLab. All rights reserved. -import functools as func -import glob -import re -from os.path import basename, splitext - -import numpy as np -import titlecase - - -def title2anchor(name): - return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-', - name.strip().lower())).strip('-') - - -# Count algorithms - -files = sorted(glob.glob('*_models.md')) - -stats = [] - -for f in files: - with open(f, 'r') as content_file: - content = content_file.read() - - # Remove the blackquote notation from the paper link under the title - # for better layout in readthedocs - expr = r'(^## \s*?.*?\s+?)>\s*?(\[.*?\]\(.*?\))' - content = re.sub(expr, r'\1\2', content, flags=re.MULTILINE) - with open(f, 'w') as content_file: - content_file.write(content) - - # title - title = content.split('\n')[0].replace('#', '') - - # count papers - exclude_papertype = ['ABSTRACT', 'IMAGE'] - exclude_expr = ''.join(f'(?!{s})' for s in exclude_papertype) - expr = rf''\ - r'\s*\n.*?\btitle\s*=\s*{(.*?)}' - papers = set( - (papertype, titlecase.titlecase(paper.lower().strip())) - for (papertype, paper) in re.findall(expr, content, re.DOTALL)) - print(papers) - # paper links - revcontent = '\n'.join(list(reversed(content.splitlines()))) - paperlinks = {} - for _, p in papers: - q = p.replace('\\', '\\\\').replace('?', '\\?') - paper_link = title2anchor( - re.search( - rf'\btitle\s*=\s*{{\s*{q}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', - revcontent, re.DOTALL | re.IGNORECASE).group(1)) - paperlinks[p] = f'[{p}]({splitext(basename(f))[0]}.html#{paper_link})' - paperlist = '\n'.join( - sorted(f' - [{t}] {paperlinks[x]}' for t, x in papers)) - # count configs - configs = set(x.lower().strip() - for x in re.findall(r'https.*configs/.*\.py', content)) - - # count ckpts - ckpts = set(x.lower().strip() - for x in re.findall(r'https://download.*\.pth', content) - if 'mmocr' in x) - - statsmsg = f""" -## [{title}]({f}) - -* Number of checkpoints: {len(ckpts)} -* Number of configs: {len(configs)} -* Number of papers: {len(papers)} -{paperlist} - - """ - - stats.append((papers, configs, ckpts, statsmsg)) - -allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) -allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) -allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) -msglist = '\n'.join(x for _, _, _, x in stats) - -papertypes, papercounts = np.unique([t for t, _ in allpapers], - return_counts=True) -countstr = '\n'.join( - [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) - -modelzoo = f""" -# Statistics - -* Number of checkpoints: {len(allckpts)} -* Number of configs: {len(allconfigs)} -* Number of papers: {len(allpapers)} -{countstr} - -{msglist} -""" - -with open('modelzoo.md', 'w') as f: - f.write(modelzoo) diff --git a/spaces/tomofi/MMOCR/mmocr/models/textdet/dense_heads/fce_head.py b/spaces/tomofi/MMOCR/mmocr/models/textdet/dense_heads/fce_head.py deleted file mode 100644 index 07855578107ef0538403a6abea7cc5f53fed1c50..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textdet/dense_heads/fce_head.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -from mmcv.runner import BaseModule -from mmdet.core import multi_apply - -from mmocr.models.builder import HEADS -from ..postprocess.utils import poly_nms -from .head_mixin import HeadMixin - - -@HEADS.register_module() -class FCEHead(HeadMixin, BaseModule): - """The class for implementing FCENet head. - - FCENet(CVPR2021): `Fourier Contour Embedding for Arbitrary-shaped Text - Detection `_ - - Args: - in_channels (int): The number of input channels. - scales (list[int]) : The scale of each layer. - fourier_degree (int) : The maximum Fourier transform degree k. - nms_thr (float) : The threshold of nms. - loss (dict): Config of loss for FCENet. - postprocessor (dict): Config of postprocessor for FCENet. - """ - - def __init__(self, - in_channels, - scales, - fourier_degree=5, - nms_thr=0.1, - loss=dict(type='FCELoss', num_sample=50), - postprocessor=dict( - type='FCEPostprocessor', - text_repr_type='poly', - num_reconstr_points=50, - alpha=1.0, - beta=2.0, - score_thr=0.3), - train_cfg=None, - test_cfg=None, - init_cfg=dict( - type='Normal', - mean=0, - std=0.01, - override=[ - dict(name='out_conv_cls'), - dict(name='out_conv_reg') - ]), - **kwargs): - old_keys = [ - 'text_repr_type', 'decoding_type', 'num_reconstr_points', 'alpha', - 'beta', 'score_thr' - ] - for key in old_keys: - if kwargs.get(key, None): - postprocessor[key] = kwargs.get(key) - warnings.warn( - f'{key} is deprecated, please specify ' - 'it in postprocessor config dict. See ' - 'https://github.com/open-mmlab/mmocr/pull/640' - ' for details.', UserWarning) - if kwargs.get('num_sample', None): - loss['num_sample'] = kwargs.get('num_sample') - warnings.warn( - 'num_sample is deprecated, please specify ' - 'it in loss config dict. See ' - 'https://github.com/open-mmlab/mmocr/pull/640' - ' for details.', UserWarning) - BaseModule.__init__(self, init_cfg=init_cfg) - loss['fourier_degree'] = fourier_degree - postprocessor['fourier_degree'] = fourier_degree - postprocessor['nms_thr'] = nms_thr - HeadMixin.__init__(self, loss, postprocessor) - - assert isinstance(in_channels, int) - - self.downsample_ratio = 1.0 - self.in_channels = in_channels - self.scales = scales - self.fourier_degree = fourier_degree - - self.nms_thr = nms_thr - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.out_channels_cls = 4 - self.out_channels_reg = (2 * self.fourier_degree + 1) * 2 - - self.out_conv_cls = nn.Conv2d( - self.in_channels, - self.out_channels_cls, - kernel_size=3, - stride=1, - padding=1) - self.out_conv_reg = nn.Conv2d( - self.in_channels, - self.out_channels_reg, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, feats): - """ - Args: - feats (list[Tensor]): Each tensor has the shape of :math:`(N, C_i, - H_i, W_i)`. - - Returns: - list[[Tensor, Tensor]]: Each pair of tensors corresponds to the - classification result and regression result computed from the input - tensor with the same index. They have the shapes of :math:`(N, - C_{cls,i}, H_i, W_i)` and :math:`(N, C_{out,i}, H_i, W_i)`. - """ - cls_res, reg_res = multi_apply(self.forward_single, feats) - level_num = len(cls_res) - preds = [[cls_res[i], reg_res[i]] for i in range(level_num)] - return preds - - def forward_single(self, x): - cls_predict = self.out_conv_cls(x) - reg_predict = self.out_conv_reg(x) - return cls_predict, reg_predict - - def get_boundary(self, score_maps, img_metas, rescale): - assert len(score_maps) == len(self.scales) - - boundaries = [] - for idx, score_map in enumerate(score_maps): - scale = self.scales[idx] - boundaries = boundaries + self._get_boundary_single( - score_map, scale) - - # nms - boundaries = poly_nms(boundaries, self.nms_thr) - - if rescale: - boundaries = self.resize_boundary( - boundaries, 1.0 / img_metas[0]['scale_factor']) - - results = dict(boundary_result=boundaries) - return results - - def _get_boundary_single(self, score_map, scale): - assert len(score_map) == 2 - assert score_map[1].shape[1] == 4 * self.fourier_degree + 2 - - return self.postprocessor(score_map, scale) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 980f8191d4c07eb35e338bd87e3b73b06b3214ad..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/necks/hrfpn.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/necks/hrfpn.py deleted file mode 100644 index 75e6c9543cca75c91a9c831a455837b9f363f3d3..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/necks/hrfpn.py +++ /dev/null @@ -1,99 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule -from torch.utils.checkpoint import checkpoint - -from ..builder import NECKS - - -@NECKS.register_module() -class HRFPN(BaseModule): - """HRFPN (High Resolution Feature Pyrmamids) - - paper: `High-Resolution Representations for Labeling Pixels and Regions - `_. - - Args: - in_channels (list): number of channels for each branch. - out_channels (int): output channels of feature pyramids. - num_outs (int): number of output stages. - pooling_type (str): pooling for generating feature pyramids - from {MAX, AVG}. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - stride (int): stride of 3x3 convolutional layers - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs=5, - pooling_type='AVG', - conv_cfg=None, - norm_cfg=None, - with_cp=False, - stride=1, - init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): - super(HRFPN, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.reduction_conv = ConvModule( - sum(in_channels), - out_channels, - kernel_size=1, - conv_cfg=self.conv_cfg, - act_cfg=None) - - self.fpn_convs = nn.ModuleList() - for i in range(self.num_outs): - self.fpn_convs.append( - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - stride=stride, - conv_cfg=self.conv_cfg, - act_cfg=None)) - - if pooling_type == 'MAX': - self.pooling = F.max_pool2d - else: - self.pooling = F.avg_pool2d - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == self.num_ins - outs = [inputs[0]] - for i in range(1, self.num_ins): - outs.append( - F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) - out = torch.cat(outs, dim=1) - if out.requires_grad and self.with_cp: - out = checkpoint(self.reduction_conv, out) - else: - out = self.reduction_conv(out) - outs = [out] - for i in range(1, self.num_outs): - outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) - outputs = [] - - for i in range(self.num_outs): - if outs[i].requires_grad and self.with_cp: - tmp_out = checkpoint(self.fpn_convs[i], outs[i]) - else: - tmp_out = self.fpn_convs[i](outs[i]) - outputs.append(tmp_out) - return tuple(outputs) diff --git a/spaces/torileatherman/news_headline_sentiment/app.py b/spaces/torileatherman/news_headline_sentiment/app.py deleted file mode 100644 index 78598e0696a2447913ea43e463daf4bc4bb0d5c2..0000000000000000000000000000000000000000 --- a/spaces/torileatherman/news_headline_sentiment/app.py +++ /dev/null @@ -1,104 +0,0 @@ -import gradio as gr -from datasets import load_dataset, Dataset -import pandas as pd -from huggingface_hub import create_repo -from huggingface_hub import login -login(token='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') - -# Load batch predictions data set -dataset = load_dataset("torileatherman/sentiment_analysis_batch_predictions", split='train') -predictions_df = pd.DataFrame(dataset) -grouped_predictions = predictions_df.groupby(predictions_df.Prediction) -positive_preds = grouped_predictions.get_group(2) -neutral_preds = grouped_predictions.get_group(1) -negative_preds = grouped_predictions.get_group(0) - -predictions_df['Prediction'] = predictions_df['Prediction'].map({0: 'Negative', 1: 'Neutral', 2: 'Positive'}) - -# Load training data set -dataset = load_dataset("torileatherman/sentiment_analysis_training", split='train') -training_df = pd.DataFrame(dataset) -random_sample = {} - -# Number of articles shown -n = 5 - -def article_selection(sentiment): - if sentiment == "Positive": - predictions = positive_preds - predictions_shuffled = predictions.sample(frac=1,weights=predictions['Confidence']) - top3 = predictions_shuffled[0:n] - top3_result = top3[['Headline_string','Url']] - top3_result.rename(columns = {'Headline_string':'Headlines', 'Url':'URL'}) - return top3_result - - elif sentiment == "Negative": - predictions = negative_preds - predictions_shuffled = predictions.sample(frac=1,weights=predictions['Confidence']) - top3 = predictions_shuffled[0:n] - top3_result = top3[['Headline_string','Url']] - top3_result.rename(columns = {'Headline_string':'Headlines', 'Url':'URL'}) - return top3_result - else: - predictions = neutral_preds - predictions_shuffled = predictions.sample(frac=1,weights=predictions['Confidence']) - top3 = predictions_shuffled[0:n] - top3_result = top3[['Headline_string','Url']] - top3_result.rename(columns = {'Headline_string':'Headlines', 'Url':'URL'}) - return top3_result - -def manual_label(): - # Selecting random row from batch data - global random_sample - random_sample = predictions_df.sample() - random_headline = random_sample['Headline_string'].iloc[0] - random_prediction = random_sample['Prediction'].iloc[0] - - return random_headline, random_prediction - - -def thanks(sentiment): - - # Create int label - mapping = gender = {'Negative': 0,'Neutral': 1, 'Positive':2} - sentiment = int(mapping[sentiment]) - - global training_df - # Append training data set - training_df = training_df.append({'Sentiment': sentiment, 'Headline_string': random_sample['Headline_string'].iloc[0], 'Headline': random_sample['Headline'].iloc[0] }, ignore_index=True) - training_df = training_df.drop_duplicates(subset='Headline_string').reset_index(drop=True) - - # Upload training data set - ds = Dataset.from_pandas(training_df) - try: - ds.push_to_hub("torileatherman/sentiment_analysis_training") - except StopIteration: - pass - - return f"""Thank you for making our model better! """ - -description1 = "This application recommends news articles depending on the sentiment of the headline. Enter your preference of what type of news articles you would like recommended to you today: Positive, Negative, or Neutral." - - -suggestion_demo = gr.Interface( - fn=article_selection, - title = 'Recommending News Articles', - inputs = gr.Dropdown(["Positive","Negative","Neutral"], label="What type of news articles would you like recommended?"), - outputs = "dataframe", - #outputs = [gr.Textbox(label="Recommended News Articles (1/3)"),gr.Textbox(label="Recommended News Articles (2/3)"),gr.Textbox(label="Recommended News Articles (3/3)")], - description = "This application recommends news articles depending on the sentiment of the headline. Enter your preference of what type of news articles you would like recommended to you today: Positive, Negative, or Neutral." -) - -with gr.Blocks() as manual_label_demo: - gr.Markdown("

      Label our Data

      This application will show you a recent news headline and our predicted sentiment. To help us improve our model, choose the real sentiment of this headline from our dropdown and submit!") - generate_btn = gr.Button('Show me a headline!') - generate_btn.click(fn=manual_label, outputs=[gr.Textbox(label="News Headline"),gr.Textbox(label="Our Predicted Sentiment")]) - drop_down_label = gr.Dropdown(["Positive","Negative","Neutral"], label="Select the true sentiment of the news article.") - submit_btn = gr.Button('Submit your sentiment!') - submit_btn.click(fn=thanks, inputs=drop_down_label, outputs=gr.Textbox(label = ' ')) - - -demo = gr.TabbedInterface([suggestion_demo, manual_label_demo], ["Get recommended news articles", "Help improve our model"]) - - -demo.launch() \ No newline at end of file diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/scripts/download_models.sh b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/scripts/download_models.sh deleted file mode 100644 index 84297d7b8b9a78d241edcd5adaf7d9aa273790de..0000000000000000000000000000000000000000 --- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/scripts/download_models.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -wget -O models/ldm/celeba256/celeba-256.zip https://ommer-lab.com/files/latent-diffusion/celeba.zip -wget -O models/ldm/ffhq256/ffhq-256.zip https://ommer-lab.com/files/latent-diffusion/ffhq.zip -wget -O models/ldm/lsun_churches256/lsun_churches-256.zip https://ommer-lab.com/files/latent-diffusion/lsun_churches.zip -wget -O models/ldm/lsun_beds256/lsun_beds-256.zip https://ommer-lab.com/files/latent-diffusion/lsun_bedrooms.zip -wget -O models/ldm/text2img256/model.zip https://ommer-lab.com/files/latent-diffusion/text2img.zip -wget -O models/ldm/cin256/model.zip https://ommer-lab.com/files/latent-diffusion/cin.zip -wget -O models/ldm/semantic_synthesis512/model.zip https://ommer-lab.com/files/latent-diffusion/semantic_synthesis.zip -wget -O models/ldm/semantic_synthesis256/model.zip https://ommer-lab.com/files/latent-diffusion/semantic_synthesis256.zip -wget -O models/ldm/bsr_sr/model.zip https://ommer-lab.com/files/latent-diffusion/sr_bsr.zip -wget -O models/ldm/layout2img-openimages256/model.zip https://ommer-lab.com/files/latent-diffusion/layout2img_model.zip -wget -O models/ldm/inpainting_big/model.zip https://ommer-lab.com/files/latent-diffusion/inpainting_big.zip - - - -cd models/ldm/celeba256 -unzip -o celeba-256.zip - -cd ../ffhq256 -unzip -o ffhq-256.zip - -cd ../lsun_churches256 -unzip -o lsun_churches-256.zip - -cd ../lsun_beds256 -unzip -o lsun_beds-256.zip - -cd ../text2img256 -unzip -o model.zip - -cd ../cin256 -unzip -o model.zip - -cd ../semantic_synthesis512 -unzip -o model.zip - -cd ../semantic_synthesis256 -unzip -o model.zip - -cd ../bsr_sr -unzip -o model.zip - -cd ../layout2img-openimages256 -unzip -o model.zip - -cd ../inpainting_big -unzip -o model.zip - -cd ../.. diff --git a/spaces/trttung1610/musicgen/audiocraft/grids/compression/debug.py b/spaces/trttung1610/musicgen/audiocraft/grids/compression/debug.py deleted file mode 100644 index 5612ff5688d85fede0e605b244919e8081cb1da9..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/grids/compression/debug.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Grid search file, simply list all the exp you want in `explorer`. -Any new exp added there will be scheduled. -You can cancel and experiment by commenting its line. - -This grid is a minimal example for debugging compression task -and how to override parameters directly in a grid. -Learn more about dora grids: https://github.com/facebookresearch/dora -""" - -from ._explorers import CompressionExplorer -from ...environment import AudioCraftEnvironment - - -@CompressionExplorer -def explorer(launcher): - partitions = AudioCraftEnvironment.get_slurm_partitions(['team', 'global']) - launcher.slurm_(gpus=2, partition=partitions) - launcher.bind_(solver='compression/debug') - - with launcher.job_array(): - # base debug task using config from solver=compression/debug - launcher() - # we can override parameters in the grid to launch additional xps - launcher({'rvq.bins': 2048, 'rvq.n_q': 4}) diff --git a/spaces/tyang/simcse-mpnet-fuzz-tfidf/app.py b/spaces/tyang/simcse-mpnet-fuzz-tfidf/app.py deleted file mode 100644 index d3413ac9584639ecb69356606da0442f9432fd7e..0000000000000000000000000000000000000000 --- a/spaces/tyang/simcse-mpnet-fuzz-tfidf/app.py +++ /dev/null @@ -1,98 +0,0 @@ -import torch -from scipy.spatial.distance import cosine -from transformers import AutoModel, AutoTokenizer -from thefuzz import fuzz -from sklearn.feature_extraction.text import TfidfVectorizer -from sklearn.metrics.pairwise import cosine_similarity -import gradio as gr - - -tokenizer_simcse = AutoTokenizer.from_pretrained("princeton-nlp/sup-simcse-bert-base-uncased") -model_simcse = AutoModel.from_pretrained("princeton-nlp/sup-simcse-bert-base-uncased") -tokenizer_mpnet = AutoTokenizer.from_pretrained('sentence-transformers/stsb-mpnet-base-v2') -model_mpnet = AutoModel.from_pretrained('sentence-transformers/stsb-mpnet-base-v2') -vectorizer = TfidfVectorizer() - - -def mean_pooling(model_output, attention_mask): - token_embeddings = model_output[0] - input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - - -def thefuzz(text1, text2): - score = fuzz.token_sort_ratio(text1, text2) - return {'levenshtein distance of sorted tokens':score/100} - - -def tfidf(text1, text2): - t1_tfidf = vectorizer.fit_transform([text1]) - t2_tfidf = vectorizer.transform([text2]) - cosine_sim = cosine_similarity(t1_tfidf, t2_tfidf).flatten()[0] - return {'cosine similarity of tf-idf vectors':str(round(cosine_sim,2))} - - -def simcse(text1, text2): - texts = [text1,text2] - inputs = tokenizer_simcse(texts, padding=True, truncation=True, return_tensors="pt") - with torch.no_grad(): - embeddings = model_simcse(**inputs, output_hidden_states=True, return_dict=True).pooler_output - cosine_sim = 1 - cosine(embeddings[0], embeddings[1]) - return {"cosine similarity of simcse embeddings":str(round(cosine_sim,2))} - - -def mpnet(text1, text2): - encoded_input = tokenizer_mpnet([text1,text2], padding=True, truncation=True, return_tensors='pt') - with torch.no_grad(): - model_output = model_mpnet(**encoded_input) - sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) - cosine_sim = 1 - cosine(sentence_embeddings[0], sentence_embeddings[1]) - return {"cosine similarity of stsb-mpnet embeddings":str(round(cosine_sim,2))} - - -def get_scores(text1, text2): - fuzz_out = thefuzz(text1, text2) - tfidf_out = tfidf(text1, text2) - simcse_out = simcse(text1, text2) - mpnet_out = mpnet(text1, text2) - - return simcse_out, mpnet_out, fuzz_out, tfidf_out - -inputs = [ - gr.Textbox(lines=5, label="Input Text One"), - gr.Textbox(lines=5, label="Input Text Two") -] -outputs = [ - gr.Label(label="Cosine similarity based on SimCSE embeddings"), - gr.Label(label="Cosine similarity based on stsb-mpnet embeddings"), - gr.Label(label="Token sort ratio using Levenshtein distance"), - gr.Label(label="Cosine similarity based on tf-idf vectors"), -] -title = "SimCSE vs MPNet vs thefuzz vs TF-IDF" -description = "Demo for comparing semantic text similarity methods. Princeton-NLP SimCSE, stsb-mpnet-base-v2 from sentence-transformers (MPnet from Microsoft as the backbone), thefuzz from SeatGeek, and TF-IDF. Interface by Troy Yang." -article = "

      SimCSE: Simple Contrastive Learning of Sentence Embeddings | stsb-mpnet-base-v2 HuggingFace model card | thefuzz: Fuzzy String Matching in Python

      " -examples = [ - ["There's a kid on a skateboard.","A kid is skateboarding."], - ['There is no boy standing in front of the blue building in the space reserved for handicapped people', 'A boy is standing in front of the blue building in the space reserved for handicapped people'], - ['People wearing costumes are gathering in a forest and are looking in the same direction','Masked people are looking in the same direction in a forest'], - ['Two large persons are sitting on a park bench and they have a bottle of soda between them','Two large persons are standing near a park bench and they have nothing between them'], -['A young man with brown hair and shades is sitting in front of some cans of soda', - 'A young man with brown hair and sunglasses is sitting in front of some cans of soda'] -,['A young lady with light brown hair is wearing a red necklace, a sweatshirt and earrings and is smiling', - 'There is no young lady with light brown hair wearing a red necklace, a sweatshirt and earrings and smiling'] -,['A woman wearing a blue and white uniform with a white and blue hat is keeping her mouth open and is near others dressed in the same fashion', - 'A woman wearing casual clothing is keeping her mouth closed and is near other people dressed differently'] -,['The man with brown hair is wearing sunglasses and is sitting listlessly at a table with cans of soda and other drinks', - 'The man with brown hair is wearing sunglasses and is sitting at a table with cans of soda and other drinks'] -,['There is no man wearing clothes that are covered with paint or is sitting outside in a busy area writing something', - 'A man is wearing clothes that are covered with paint and is sitting outside in a busy area writing something'] -,['The shirtless man in striped shorts and sunglasses is not standing near a man in a white shirt and sunglasses', - 'The shirtless man in striped shorts and sunglasses is standing near a man in a white shirt and sunglasses'] -,['The shirtless man in striped shorts and sunglasses is standing near a man in a white shirt and sunglasses', - 'The shirtless man in striped shorts and sunglasses is standing near a person in a white shirt and sunglasses'] -,['A young boy is wearing a blue patterned swim suit, a black and yellow swim cap and has blue swim goggles on her head', - 'A young girl is wearing a blue patterned swim suit, a black and yellow swim cap and has blue swimming goggles on her head'] -] - -gr.Interface(get_scores, inputs, outputs, title=title, description=description, article=article, - theme="darkdefault", examples=examples, flagging_options=["strongly related","related", "neutral", "unrelated", "stongly unrelated"]).launch()#()# \ No newline at end of file diff --git a/spaces/ulysses115/Nogizaka46-so/onnx/onnx_export.py b/spaces/ulysses115/Nogizaka46-so/onnx/onnx_export.py deleted file mode 100644 index 976bfe97a213d1390bdc044b5d86cab84d10e63b..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/Nogizaka46-so/onnx/onnx_export.py +++ /dev/null @@ -1,73 +0,0 @@ -import argparse -import time -import numpy as np -import onnx -from onnxsim import simplify -import onnxruntime as ort -import onnxoptimizer -import torch -from model_onnx import SynthesizerTrn -import utils -from hubert import hubert_model_onnx - -def main(HubertExport,NetExport): - - path = "NyaruTaffy" - - if(HubertExport): - device = torch.device("cuda") - hubert_soft = utils.get_hubert_model() - test_input = torch.rand(1, 1, 16000) - input_names = ["source"] - output_names = ["embed"] - torch.onnx.export(hubert_soft.to(device), - test_input.to(device), - "hubert3.0.onnx", - dynamic_axes={ - "source": { - 2: "sample_length" - } - }, - verbose=False, - opset_version=13, - input_names=input_names, - output_names=output_names) - if(NetExport): - device = torch.device("cuda") - hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - SVCVITS = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", SVCVITS, None) - _ = SVCVITS.eval().to(device) - for i in SVCVITS.parameters(): - i.requires_grad = False - test_hidden_unit = torch.rand(1, 50, 256) - test_lengths = torch.LongTensor([50]) - test_pitch = torch.rand(1, 50) - test_sid = torch.LongTensor([0]) - input_names = ["hidden_unit", "lengths", "pitch", "sid"] - output_names = ["audio", ] - SVCVITS.eval() - torch.onnx.export(SVCVITS, - ( - test_hidden_unit.to(device), - test_lengths.to(device), - test_pitch.to(device), - test_sid.to(device) - ), - f"checkpoints/{path}/model.onnx", - dynamic_axes={ - "hidden_unit": [0, 1], - "pitch": [1] - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names) - - -if __name__ == '__main__': - main(False,True) diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/BEST Keygenxf3dsMax2012x64exe.md b/spaces/usbethFlerru/sovits-modelsV2/example/BEST Keygenxf3dsMax2012x64exe.md deleted file mode 100644 index a02cb8579248148078d46a8bdb9910751b567c39..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/BEST Keygenxf3dsMax2012x64exe.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Keygenxf3dsMax2012x64exe


      Download Filehttps://urlcod.com/2uyX7U



      - -Keygenxf3dsMax2012x64exe · fifa 16 super deluxe edition crack only · Adobe Audition CC 2018 11.0.2.2 (x64) Crack full version · Ayalum Njanum Thammil ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/user238921933/stable-diffusion-webui/modules/postprocessing.py b/spaces/user238921933/stable-diffusion-webui/modules/postprocessing.py deleted file mode 100644 index 21e32af9866abfc02288f9f04a5195f1700de1b0..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/postprocessing.py +++ /dev/null @@ -1,103 +0,0 @@ -import os - -from PIL import Image - -from modules import shared, images, devices, scripts, scripts_postprocessing, ui_common, generation_parameters_copypaste -from modules.shared import opts - - -def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): - devices.torch_gc() - - shared.state.begin() - shared.state.job = 'extras' - - image_data = [] - image_names = [] - outputs = [] - - if extras_mode == 1: - for img in image_folder: - image = Image.open(img) - image_data.append(image) - image_names.append(os.path.splitext(img.orig_name)[0]) - elif extras_mode == 2: - assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled' - assert input_dir, 'input directory not selected' - - image_list = shared.listfiles(input_dir) - for filename in image_list: - try: - image = Image.open(filename) - except Exception: - continue - image_data.append(image) - image_names.append(filename) - else: - assert image, 'image not selected' - - image_data.append(image) - image_names.append(None) - - if extras_mode == 2 and output_dir != '': - outpath = output_dir - else: - outpath = opts.outdir_samples or opts.outdir_extras_samples - - infotext = '' - - for image, name in zip(image_data, image_names): - shared.state.textinfo = name - - existing_pnginfo = image.info or {} - - pp = scripts_postprocessing.PostprocessedImage(image.convert("RGB")) - - scripts.scripts_postproc.run(pp, args) - - if opts.use_original_name_batch and name is not None: - basename = os.path.splitext(os.path.basename(name))[0] - else: - basename = '' - - infotext = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in pp.info.items() if v is not None]) - - if opts.enable_pnginfo: - pp.image.info = existing_pnginfo - pp.image.info["postprocessing"] = infotext - - if save_output: - images.save_image(pp.image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None) - - if extras_mode != 2 or show_extras_results: - outputs.append(pp.image) - - devices.torch_gc() - - return outputs, ui_common.plaintext_to_html(infotext), '' - - -def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True): - """old handler for API""" - - args = scripts.scripts_postproc.create_args_for_run({ - "Upscale": { - "upscale_mode": resize_mode, - "upscale_by": upscaling_resize, - "upscale_to_width": upscaling_resize_w, - "upscale_to_height": upscaling_resize_h, - "upscale_crop": upscaling_crop, - "upscaler_1_name": extras_upscaler_1, - "upscaler_2_name": extras_upscaler_2, - "upscaler_2_visibility": extras_upscaler_2_visibility, - }, - "GFPGAN": { - "gfpgan_visibility": gfpgan_visibility, - }, - "CodeFormer": { - "codeformer_visibility": codeformer_visibility, - "codeformer_weight": codeformer_weight, - }, - }) - - return run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output=save_output) diff --git a/spaces/versae/modernisa/app.py b/spaces/versae/modernisa/app.py deleted file mode 100644 index e26130d79a8e3b3952472401305c7e79239b1db2..0000000000000000000000000000000000000000 --- a/spaces/versae/modernisa/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import random -import gradio as gr -from transformers import AutoTokenizer, pipeline, T5ForConditionalGeneration - -model_name = "versae/byt5-base-finetuned-modernisa" # "versae/modernisa-pre" -tokenizer = AutoTokenizer.from_pretrained(model_name) -model = T5ForConditionalGeneration.from_pretrained(model_name) - -def modernisa(lines=None, file_obj=None): - is_file_valid = False - if file_obj: - is_file_valid = file_obj.name.endswith(".txt") - if is_file_valid: - with open(file_obj.name) as file: - lines = file.read() - generated_text = [] - if lines: - lines = [line for line in lines.strip().split('\n') if line.strip()] - text2text_generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer) - outputs = [] - for line in lines: - outputs += text2text_generator([line], max_length=150) - generated_text = [output["generated_text"] for output in outputs] - if is_file_valid and file_obj: - output_file = f"{file_obj.name.rsplit('.', 1)[0]}_modernized.txt" - else: - output_file = "modernized.txt" - with open(output_file, "w") as output: - output.write("\n".join(generated_text)) - return "
      ".join(generated_text), output_file - - -samples = [ -"""Otra vez, Don Iuan, me dad, -y otras mil vezes los braços. -Otra, y otras mil sean lazos -de nuestra antigua amistad. -Como venis? -Yo me siento -tan alegre, tan vfano, -tan venturoso, tan vano, -que no podrà el pensamiento -encareceros jamàs -las venturas que posseo, -porque el pensamiento creo""" -] - -gr.Interface( - fn=modernisa, - inputs=[ - gr.inputs.Textbox(lines=12, label="Enter Spanish Golden Age text", default=random.choice(samples)), - gr.inputs.File(file_count="single", label="Or upload a plain text file (.txt)", type="file", optional=True), - ], - outputs=[ - gr.outputs.HTML(label="Modern Spanish"), - gr.outputs.File(label="Download file"), - ] -).launch(inline=False) diff --git a/spaces/vumichien/Generate_human_motion/pyrender/tests/unit/test_cameras.py b/spaces/vumichien/Generate_human_motion/pyrender/tests/unit/test_cameras.py deleted file mode 100644 index 7544ad8f8e3ee55236fd2e32dbc12065153cbe5b..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/pyrender/tests/unit/test_cameras.py +++ /dev/null @@ -1,164 +0,0 @@ -import numpy as np -import pytest - -from pyrender import PerspectiveCamera, OrthographicCamera - - -def test_perspective_camera(): - - # Set up constants - znear = 0.05 - zfar = 100 - yfov = np.pi / 3.0 - width = 1000.0 - height = 500.0 - aspectRatio = 640.0 / 480.0 - - # Test basics - with pytest.raises(TypeError): - p = PerspectiveCamera() - - p = PerspectiveCamera(yfov=yfov) - assert p.yfov == yfov - assert p.znear == 0.05 - assert p.zfar is None - assert p.aspectRatio is None - p.name = 'asdf' - p.name = None - - with pytest.raises(ValueError): - p.yfov = 0.0 - - with pytest.raises(ValueError): - p.yfov = -1.0 - - with pytest.raises(ValueError): - p.znear = -1.0 - - p.znear = 0.0 - p.znear = 0.05 - p.zfar = 100.0 - assert p.zfar == 100.0 - - with pytest.raises(ValueError): - p.zfar = 0.03 - - with pytest.raises(ValueError): - p.zfar = 0.05 - - p.aspectRatio = 10.0 - assert p.aspectRatio == 10.0 - - with pytest.raises(ValueError): - p.aspectRatio = 0.0 - - with pytest.raises(ValueError): - p.aspectRatio = -1.0 - - # Test matrix getting/setting - - # NF - p.znear = 0.05 - p.zfar = 100 - p.aspectRatio = None - - with pytest.raises(ValueError): - p.get_projection_matrix() - - assert np.allclose( - p.get_projection_matrix(width, height), - np.array([ - [1.0 / (width / height * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0], - [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0], - [0.0, 0.0, (zfar + znear) / (znear - zfar), - (2 * zfar * znear) / (znear - zfar)], - [0.0, 0.0, -1.0, 0.0] - ]) - ) - - # NFA - p.aspectRatio = aspectRatio - assert np.allclose( - p.get_projection_matrix(width, height), - np.array([ - [1.0 / (aspectRatio * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0], - [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0], - [0.0, 0.0, (zfar + znear) / (znear - zfar), - (2 * zfar * znear) / (znear - zfar)], - [0.0, 0.0, -1.0, 0.0] - ]) - ) - assert np.allclose( - p.get_projection_matrix(), p.get_projection_matrix(width, height) - ) - - # N - p.zfar = None - p.aspectRatio = None - assert np.allclose( - p.get_projection_matrix(width, height), - np.array([ - [1.0 / (width / height * np.tan(yfov / 2.0)), 0.0, 0.0, 0.0], - [0.0, 1.0 / np.tan(yfov / 2.0), 0.0, 0.0], - [0.0, 0.0, -1.0, -2.0 * znear], - [0.0, 0.0, -1.0, 0.0] - ]) - ) - - -def test_orthographic_camera(): - xm = 1.0 - ym = 2.0 - n = 0.05 - f = 100.0 - - with pytest.raises(TypeError): - c = OrthographicCamera() - - c = OrthographicCamera(xmag=xm, ymag=ym) - - assert c.xmag == xm - assert c.ymag == ym - assert c.znear == 0.05 - assert c.zfar == 100.0 - assert c.name is None - - with pytest.raises(TypeError): - c.ymag = None - - with pytest.raises(ValueError): - c.ymag = 0.0 - - with pytest.raises(ValueError): - c.ymag = -1.0 - - with pytest.raises(TypeError): - c.xmag = None - - with pytest.raises(ValueError): - c.xmag = 0.0 - - with pytest.raises(ValueError): - c.xmag = -1.0 - - with pytest.raises(TypeError): - c.znear = None - - with pytest.raises(ValueError): - c.znear = 0.0 - - with pytest.raises(ValueError): - c.znear = -1.0 - - with pytest.raises(ValueError): - c.zfar = 0.01 - - assert np.allclose( - c.get_projection_matrix(), - np.array([ - [1.0 / xm, 0, 0, 0], - [0, 1.0 / ym, 0, 0], - [0, 0, 2.0 / (n - f), (f + n) / (n - f)], - [0, 0, 0, 1.0] - ]) - ) diff --git a/spaces/vumichien/canvas_controlnet/annotator/canny/__init__.py b/spaces/vumichien/canvas_controlnet/annotator/canny/__init__.py deleted file mode 100644 index cb0da951dc838ec9dec2131007e036113281800b..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/canny/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -import cv2 - - -class CannyDetector: - def __call__(self, img, low_threshold, high_threshold): - return cv2.Canny(img, low_threshold, high_threshold) diff --git a/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/kkms_kssw.py b/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/kkms_kssw.py deleted file mode 100644 index cf5db1c21efc1237309a473ec8c8dae4d1f599c7..0000000000000000000000000000000000000000 --- a/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/kkms_kssw.py +++ /dev/null @@ -1,77 +0,0 @@ -import os - -import src.constants as constants_utils -import src.langchain_utils as langchain_utils -import src.weather as weather_utils -import src.mandi_price as mandi_utils -import src.translator as translator_utils -import src.web_crawler as web_crawler_utils - -import logging -logging.basicConfig( - format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S" -) -logger = logging.getLogger(__name__) - -import warnings -warnings.filterwarnings('ignore') - - - -class KKMS_KSSW: - def __init__(self): - self.index_type = constants_utils.INDEX_TYPE - self.load_from_existing_index_store = constants_utils.LOAD_FROM_EXISTING_INDEX_STORE - - # Instantiate langchain_utils class object - self.langchain_utils_obj = langchain_utils.LANGCHAIN_UTILS( - index_type=self.index_type, - load_from_existing_index_store=self.load_from_existing_index_store - ) - # Instantiate Mandi Price utils class object - self.mandi_utils_obj = mandi_utils.MANDI_PRICE() - # Instantiate Weather class object - self.weather_utils_obj = weather_utils.WEATHER() - # Instantiate translator_utils class object - self.translator_utils_obj = translator_utils.TRANSLATOR() - - - - # Initialize index (vector store) - def load_create_index(self): - logger.info(f"Load/Create index") - self.langchain_utils_obj.load_create_index() - - - # Upload data and update the index - def upload_data( - self, - doc_type, - files_or_urls, - index_category - ): - logger.info(f"Uploading data") - self.langchain_utils_obj.upload_data( - doc_type=doc_type, - files_or_urls=files_or_urls, - index_category=index_category - ) - - - # Define query on index to retrieve the most relevant top K documents from the vector store - def query( - self, - question, - question_category - ): - ''' - Args: - mode: can be any of [default, embedding] - response_mode: can be any of [default, compact, tree_summarize] - ''' - logger.info(f"Querying from index/vector store") - - return self.langchain_utils_obj.query( - question=question, - question_category=question_category - ) diff --git a/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/models/export.py b/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/models/export.py deleted file mode 100644 index 168d24d50a5a6b9937c868497f3854ca85fd2d9b..0000000000000000000000000000000000000000 --- a/spaces/weiren119/AudiogramDigitization/src/digitizer/yolov5/models/export.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats - -Usage: - $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 -""" - -import argparse - -import torch -import torch.nn as nn - -from models.common import Conv -from models.experimental import attempt_load -from utils.activations import Hardswish -from utils.general import set_logging - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - opt = parser.parse_args() - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand - print(opt) - set_logging() - - # Input - img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection - - # Load PyTorch model - model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model - - # Update model - for k, m in model.named_modules(): - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability - if isinstance(m, Conv) and isinstance(m.act, nn.Hardswish): - m.act = Hardswish() # assign activation - # if isinstance(m, Detect): - # m.forward = m.forward_export # assign forward (optional) - model.model[-1].export = True # set Detect() layer export=True - y = model(img) # dry run - - # TorchScript export - try: - print('\nStarting TorchScript export with torch %s...' % torch.__version__) - f = opt.weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img) - ts.save(f) - print('TorchScript export success, saved as %s' % f) - except Exception as e: - print('TorchScript export failure: %s' % e) - - # ONNX export - try: - import onnx - - print('\nStarting ONNX export with onnx %s...' % onnx.__version__) - f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], - output_names=['classes', 'boxes'] if y is None else ['output']) - - # Checks - onnx_model = onnx.load(f) # load onnx model - onnx.checker.check_model(onnx_model) # check onnx model - # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model - print('ONNX export success, saved as %s' % f) - except Exception as e: - print('ONNX export failure: %s' % e) - - # CoreML export - try: - import coremltools as ct - - print('\nStarting CoreML export with coremltools %s...' % ct.__version__) - # convert model from torchscript and apply pixel scaling as per detect.py - model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = opt.weights.replace('.pt', '.mlmodel') # filename - model.save(f) - print('CoreML export success, saved as %s' % f) - except Exception as e: - print('CoreML export failure: %s' % e) - - # Finish - print('\nExport complete. Visualize with https://github.com/lutzroeder/netron.') diff --git a/spaces/wf-genius/Control-A-Video/model/video_diffusion/models/controlnet3d.py b/spaces/wf-genius/Control-A-Video/model/video_diffusion/models/controlnet3d.py deleted file mode 100644 index 86d7f1b2802b4b808115d62f57fb5eee75f2abc4..0000000000000000000000000000000000000000 --- a/spaces/wf-genius/Control-A-Video/model/video_diffusion/models/controlnet3d.py +++ /dev/null @@ -1,580 +0,0 @@ -# Copyright 2023 Bytedance Ltd. and/or its affiliates - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Tuple, Union - -import torch -from torch import nn -from torch.nn import functional as F - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.utils import BaseOutput, logging -from diffusers.models.cross_attention import AttnProcessor -from diffusers.models.embeddings import TimestepEmbedding, Timesteps -from diffusers.models.modeling_utils import ModelMixin - -from .unet_3d_blocks import ( - CrossAttnDownBlockPseudo3D, - DownBlockPseudo3D, - UNetMidBlockPseudo3DCrossAttn, - get_down_block, -) -from .resnet import PseudoConv3d -from diffusers.models.cross_attention import AttnProcessor -from typing import Dict -from .unet_3d_blocks_control import ControlNetPseudoZeroConv3dBlock, ControlNetInputHintBlock -import glob -import os -import json - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name -@dataclass -class ControlNetOutput(BaseOutput): - down_block_res_samples: Tuple[torch.Tensor] - mid_block_res_sample: torch.Tensor - - -class ControlNetConditioningEmbedding(nn.Module): - """ - Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN - [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized - training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the - convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides - (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full - model) to encode image-space conditions ... into feature maps ..." - """ - - def __init__( - self, - conditioning_embedding_channels: int, - conditioning_channels: int = 3, - block_out_channels: Tuple[int] = (16, 32, 96, 256), - ): - super().__init__() - - self.conv_in = PseudoConv3d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) - - self.blocks = nn.ModuleList([]) - - for i in range(len(block_out_channels) - 1): - channel_in = block_out_channels[i] - channel_out = block_out_channels[i + 1] - self.blocks.append(PseudoConv3d(channel_in, channel_in, kernel_size=3, padding=1)) - self.blocks.append(PseudoConv3d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) - - # self.conv_out = zero_module( - # PseudoConv3d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) - # ) - self.conv_out = PseudoConv3d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) - - def forward(self, conditioning): - embedding = self.conv_in(conditioning) - embedding = F.silu(embedding) - - for block in self.blocks: - embedding = block(embedding) - embedding = F.silu(embedding) - - embedding = self.conv_out(embedding) - - return embedding - - -class ControlNet3DModel(ModelMixin, ConfigMixin): - _supports_gradient_checkpointing = True - - @register_to_config - def __init__( - self, - in_channels: int = 4, - flip_sin_to_cos: bool = True, - freq_shift: int = 0, - down_block_types: Tuple[str] = ( - "CrossAttnDownBlockPseudo3D", - "CrossAttnDownBlockPseudo3D", - "CrossAttnDownBlockPseudo3D", - "DownBlockPseudo3D", - ), - only_cross_attention: Union[bool, Tuple[bool]] = False, - block_out_channels: Tuple[int] = (320, 640, 1280, 1280), - layers_per_block: int = 2, - downsample_padding: int = 1, - mid_block_scale_factor: float = 1, - act_fn: str = "silu", - norm_num_groups: Optional[int] = 32, - norm_eps: float = 1e-5, - cross_attention_dim: int = 1280, - attention_head_dim: Union[int, Tuple[int]] = 8, - use_linear_projection: bool = False, - class_embed_type: Optional[str] = None, - num_class_embeds: Optional[int] = None, - upcast_attention: bool = False, - resnet_time_scale_shift: str = "default", - projection_class_embeddings_input_dim: Optional[int] = None, - controlnet_conditioning_channel_order: str = "rgb", - conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256), - ): - super().__init__() - - # Check inputs - if len(block_out_channels) != len(down_block_types): - raise ValueError( - f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." - ) - - if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): - raise ValueError( - f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." - ) - - if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): - raise ValueError( - f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." - ) - - # input - conv_in_kernel = 3 - conv_in_padding = (conv_in_kernel - 1) // 2 - self.conv_in = PseudoConv3d( - in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding - ) - - # time - time_embed_dim = block_out_channels[0] * 4 - - self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) - timestep_input_dim = block_out_channels[0] - - self.time_embedding = TimestepEmbedding( - timestep_input_dim, - time_embed_dim, - act_fn=act_fn, - ) - - # class embedding - if class_embed_type is None and num_class_embeds is not None: - self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) - elif class_embed_type == "timestep": - self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) - elif class_embed_type == "identity": - self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) - elif class_embed_type == "projection": - if projection_class_embeddings_input_dim is None: - raise ValueError( - "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" - ) - # The projection `class_embed_type` is the same as the timestep `class_embed_type` except - # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings - # 2. it projects from an arbitrary input dimension. - # - # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. - # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. - # As a result, `TimestepEmbedding` can be passed arbitrary vectors. - self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) - else: - self.class_embedding = None - - # control net conditioning embedding - self.controlnet_cond_embedding = ControlNetConditioningEmbedding( - conditioning_embedding_channels=block_out_channels[0], - block_out_channels=conditioning_embedding_out_channels, - ) - - self.down_blocks = nn.ModuleList([]) - self.controlnet_down_blocks = nn.ModuleList([]) - - if isinstance(only_cross_attention, bool): - only_cross_attention = [only_cross_attention] * len(down_block_types) - - if isinstance(attention_head_dim, int): - attention_head_dim = (attention_head_dim,) * len(down_block_types) - - # down - output_channel = block_out_channels[0] - - controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1) - # controlnet_block = zero_module(controlnet_block) - self.controlnet_down_blocks.append(controlnet_block) - - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - is_final_block = i == len(block_out_channels) - 1 - - down_block = get_down_block( - down_block_type, - num_layers=layers_per_block, - in_channels=input_channel, - out_channels=output_channel, - temb_channels=time_embed_dim, - add_downsample=not is_final_block, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attention_head_dim[i], - downsample_padding=downsample_padding, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention[i], - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - self.down_blocks.append(down_block) - - for _ in range(layers_per_block): - controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1) - # controlnet_block = zero_module(controlnet_block) - self.controlnet_down_blocks.append(controlnet_block) - - if not is_final_block: - controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1) - # controlnet_block = zero_module(controlnet_block) - self.controlnet_down_blocks.append(controlnet_block) - - # mid - mid_block_channel = block_out_channels[-1] - - controlnet_block = PseudoConv3d(mid_block_channel, mid_block_channel, kernel_size=1) - # controlnet_block = zero_module(controlnet_block) - self.controlnet_mid_block = controlnet_block - - self.mid_block = UNetMidBlockPseudo3DCrossAttn( - in_channels=mid_block_channel, - temb_channels=time_embed_dim, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - output_scale_factor=mid_block_scale_factor, - resnet_time_scale_shift=resnet_time_scale_shift, - cross_attention_dim=cross_attention_dim, - attn_num_head_channels=attention_head_dim[-1], - resnet_groups=norm_num_groups, - use_linear_projection=use_linear_projection, - upcast_attention=upcast_attention, - ) - - @property - # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors - def attn_processors(self) -> Dict[str, AttnProcessor]: - r""" - Returns: - `dict` of attention processors: A dictionary containing all attention processors used in the model with - indexed by its weight name. - """ - # set recursively - processors = {} - - def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]): - if hasattr(module, "set_processor"): - processors[f"{name}.processor"] = module.processor - - for sub_name, child in module.named_children(): - fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) - - return processors - - for name, module in self.named_children(): - fn_recursive_add_processors(name, module, processors) - - return processors - - # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor - def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]): - r""" - Parameters: - `processor (`dict` of `AttnProcessor` or `AttnProcessor`): - The instantiated processor class or a dictionary of processor classes that will be set as the processor - of **all** `CrossAttention` layers. - In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.: - - """ - count = len(self.attn_processors.keys()) - - if isinstance(processor, dict) and len(processor) != count: - raise ValueError( - f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" - f" number of attention layers: {count}. Please make sure to pass {count} processor classes." - ) - - def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): - if hasattr(module, "set_processor"): - if not isinstance(processor, dict): - module.set_processor(processor) - else: - module.set_processor(processor.pop(f"{name}.processor")) - - for sub_name, child in module.named_children(): - fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) - - for name, module in self.named_children(): - fn_recursive_attn_processor(name, module, processor) - - # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice - def set_attention_slice(self, slice_size): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - `"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is - provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` - must be a multiple of `slice_size`. - """ - sliceable_head_dims = [] - - def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module): - if hasattr(module, "set_attention_slice"): - sliceable_head_dims.append(module.sliceable_head_dim) - - for child in module.children(): - fn_recursive_retrieve_slicable_dims(child) - - # retrieve number of attention layers - for module in self.children(): - fn_recursive_retrieve_slicable_dims(module) - - num_slicable_layers = len(sliceable_head_dims) - - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = [dim // 2 for dim in sliceable_head_dims] - elif slice_size == "max": - # make smallest slice possible - slice_size = num_slicable_layers * [1] - - slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size - - if len(slice_size) != len(sliceable_head_dims): - raise ValueError( - f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" - f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." - ) - - for i in range(len(slice_size)): - size = slice_size[i] - dim = sliceable_head_dims[i] - if size is not None and size > dim: - raise ValueError(f"size {size} has to be smaller or equal to {dim}.") - - # Recursively walk through all the children. - # Any children which exposes the set_attention_slice method - # gets the message - def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): - if hasattr(module, "set_attention_slice"): - module.set_attention_slice(slice_size.pop()) - - for child in module.children(): - fn_recursive_set_attention_slice(child, slice_size) - - reversed_slice_size = list(reversed(slice_size)) - for module in self.children(): - fn_recursive_set_attention_slice(module, reversed_slice_size) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D)): - module.gradient_checkpointing = value - - def forward( - self, - sample: torch.FloatTensor, - timestep: Union[torch.Tensor, float, int], - encoder_hidden_states: torch.Tensor, - controlnet_cond: torch.FloatTensor, - class_labels: Optional[torch.Tensor] = None, - timestep_cond: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - return_dict: bool = True, - ) -> Union[ControlNetOutput, Tuple]: - # check channel order - channel_order = self.config.controlnet_conditioning_channel_order - - if channel_order == "rgb": - # in rgb order by default - ... - elif channel_order == "bgr": - controlnet_cond = torch.flip(controlnet_cond, dims=[1]) - else: - raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") - - # prepare attention_mask - if attention_mask is not None: - attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 - attention_mask = attention_mask.unsqueeze(1) - - # 1. time - timesteps = timestep - if not torch.is_tensor(timesteps): - # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can - # This would be a good case for the `match` statement (Python 3.10+) - is_mps = sample.device.type == "mps" - if isinstance(timestep, float): - dtype = torch.float32 if is_mps else torch.float64 - else: - dtype = torch.int32 if is_mps else torch.int64 - timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) - elif len(timesteps.shape) == 0: - timesteps = timesteps[None].to(sample.device) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timesteps = timesteps.expand(sample.shape[0]) - - t_emb = self.time_proj(timesteps) - - # timesteps does not contain any weights and will always return f32 tensors - # but time_embedding might actually be running in fp16. so we need to cast here. - # there might be better ways to encapsulate this. - t_emb = t_emb.to(dtype=self.dtype) - - emb = self.time_embedding(t_emb, timestep_cond) - - if self.class_embedding is not None: - if class_labels is None: - raise ValueError("class_labels should be provided when num_class_embeds > 0") - - if self.config.class_embed_type == "timestep": - class_labels = self.time_proj(class_labels) - - class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) - emb = emb + class_emb - - # 2. pre-process - sample = self.conv_in(sample) - - controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) - # print(sample.shape, controlnet_cond.shape) - - sample += controlnet_cond - # 3. down - - down_block_res_samples = (sample,) - for downsample_block in self.down_blocks: - if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: - sample, res_samples = downsample_block( - hidden_states=sample, - temb=emb, - encoder_hidden_states=encoder_hidden_states, - attention_mask=attention_mask, - ) - else: - sample, res_samples = downsample_block(hidden_states=sample, temb=emb) - - down_block_res_samples += res_samples - - # 4. mid - if self.mid_block is not None: - sample = self.mid_block( - sample, - emb, - encoder_hidden_states=encoder_hidden_states, - attention_mask=attention_mask, - ) - - # 5. Control net blocks - - controlnet_down_block_res_samples = () - - for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): - down_block_res_sample = controlnet_block(down_block_res_sample) - controlnet_down_block_res_samples += (down_block_res_sample,) - - down_block_res_samples = controlnet_down_block_res_samples - - mid_block_res_sample = self.controlnet_mid_block(sample) - - if not return_dict: - return (down_block_res_samples, mid_block_res_sample) - - return ControlNetOutput( - down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample - ) - - @classmethod - def from_2d_model(cls, model_path, condition_on_fps=False, controlnet_hint_channels: Optional[int] = None,): - ''' - load a 2d model and convert it to a pseudo 3d model - ''' - config_path = os.path.join(model_path, "config.json") - if not os.path.isfile(config_path): - raise RuntimeError(f"{config_path} does not exist") - with open(config_path, "r") as f: - config = json.load(f) - - config.pop("_class_name") - config.pop("_diffusers_version") - - block_replacer = { - "CrossAttnDownBlock2D": "CrossAttnDownBlockPseudo3D", - "DownBlock2D": "DownBlockPseudo3D", - "UNetMidBlock2DCrossAttn": "UNetMidBlockPseudo3DCrossAttn", - } - - def convert_2d_to_3d_block(block): - return block_replacer[block] if block in block_replacer else block - - config["down_block_types"] = [ - convert_2d_to_3d_block(block) for block in config["down_block_types"] - ] - - if "mid_block_type" in config: - config["mid_block_type"] = convert_2d_to_3d_block(config["mid_block_type"]) - - if condition_on_fps: - config["fps_embed_type"] = "timestep" # 和timestep保持一致的type。 - - if controlnet_hint_channels: - config["controlnet_hint_channels"] = controlnet_hint_channels - - print(config) - - model = cls(**config) # 调用自身(init), 传入config参数全换成3d的setting - state_dict_path_condidates = glob.glob(os.path.join(model_path, "*.bin")) - if state_dict_path_condidates: - state_dict = torch.load(state_dict_path_condidates[0], map_location="cpu") - model.load_2d_state_dict(state_dict=state_dict) - - return model - - def load_2d_state_dict(self, state_dict, **kwargs): - ''' - 2D 部分的参数名完全不变。 - ''' - state_dict_3d = self.state_dict() - # print("diff params list:", list(set(state_dict_3d.keys()) - set(state_dict.keys()))) - - for k, v in state_dict.items(): - if k not in state_dict_3d: - raise KeyError(f"2d state_dict key {k} does not exist in 3d model") - - for k, v in state_dict_3d.items(): - if "_temporal" in k: - continue - if "gamma" in k: - continue - if k not in state_dict: - raise KeyError(f"3d state_dict key {k} does not exist in 2d model") - state_dict_3d.update(state_dict) - self.load_state_dict(state_dict_3d, strict=True, **kwargs) - - -def zero_module(module): - for p in module.parameters(): - nn.init.zeros_(p) - return module diff --git a/spaces/wong26/faster-whisper-webui/src/hooks/subTaskProgressListener.py b/spaces/wong26/faster-whisper-webui/src/hooks/subTaskProgressListener.py deleted file mode 100644 index 9a8eaa876fcd18032875d67535e0558494842c60..0000000000000000000000000000000000000000 --- a/spaces/wong26/faster-whisper-webui/src/hooks/subTaskProgressListener.py +++ /dev/null @@ -1,37 +0,0 @@ -from src.hooks.progressListener import ProgressListener - -from typing import Union - -class SubTaskProgressListener(ProgressListener): - """ - A sub task listener that reports the progress of a sub task to a base task listener - Parameters - ---------- - base_task_listener : ProgressListener - The base progress listener to accumulate overall progress in. - base_task_total : float - The maximum total progress that will be reported to the base progress listener. - sub_task_start : float - The starting progress of a sub task, in respect to the base progress listener. - sub_task_total : float - The total amount of progress a sub task will report to the base progress listener. - """ - def __init__( - self, - base_task_listener: ProgressListener, - base_task_total: float, - sub_task_start: float, - sub_task_total: float, - ): - self.base_task_listener = base_task_listener - self.base_task_total = base_task_total - self.sub_task_start = sub_task_start - self.sub_task_total = sub_task_total - - def on_progress(self, current: Union[int, float], total: Union[int, float]): - sub_task_progress_frac = current / total - sub_task_progress = self.sub_task_start + self.sub_task_total * sub_task_progress_frac - self.base_task_listener.on_progress(sub_task_progress, self.base_task_total) - - def on_finished(self): - self.base_task_listener.on_progress(self.sub_task_start + self.sub_task_total, self.base_task_total) \ No newline at end of file diff --git a/spaces/wpeebles/DiT/diffusion/diffusion_utils.py b/spaces/wpeebles/DiT/diffusion/diffusion_utils.py deleted file mode 100644 index e493a6a3ecb91e553a53cc7eadee5cc0d1753060..0000000000000000000000000000000000000000 --- a/spaces/wpeebles/DiT/diffusion/diffusion_utils.py +++ /dev/null @@ -1,88 +0,0 @@ -# Modified from OpenAI's diffusion repos -# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py -# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion -# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py - -import torch as th -import numpy as np - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - Compute the KL divergence between two gaussians. - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, th.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for th.exp(). - logvar1, logvar2 = [ - x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + th.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * th.exp(-logvar2) - ) - - -def approx_standard_normal_cdf(x): - """ - A fast approximation of the cumulative distribution function of the - standard normal. - """ - return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) - - -def continuous_gaussian_log_likelihood(x, *, means, log_scales): - """ - Compute the log-likelihood of a continuous Gaussian distribution. - :param x: the targets - :param means: the Gaussian mean Tensor. - :param log_scales: the Gaussian log stddev Tensor. - :return: a tensor like x of log probabilities (in nats). - """ - centered_x = x - means - inv_stdv = th.exp(-log_scales) - normalized_x = centered_x * inv_stdv - log_probs = th.distributions.Normal(th.zeros_like(x), th.ones_like(x)).log_prob(normalized_x) - return log_probs - - -def discretized_gaussian_log_likelihood(x, *, means, log_scales): - """ - Compute the log-likelihood of a Gaussian distribution discretizing to a - given image. - :param x: the target images. It is assumed that this was uint8 values, - rescaled to the range [-1, 1]. - :param means: the Gaussian mean Tensor. - :param log_scales: the Gaussian log stddev Tensor. - :return: a tensor like x of log probabilities (in nats). - """ - assert x.shape == means.shape == log_scales.shape - centered_x = x - means - inv_stdv = th.exp(-log_scales) - plus_in = inv_stdv * (centered_x + 1.0 / 255.0) - cdf_plus = approx_standard_normal_cdf(plus_in) - min_in = inv_stdv * (centered_x - 1.0 / 255.0) - cdf_min = approx_standard_normal_cdf(min_in) - log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) - log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) - cdf_delta = cdf_plus - cdf_min - log_probs = th.where( - x < -0.999, - log_cdf_plus, - th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), - ) - assert log_probs.shape == x.shape - return log_probs diff --git a/spaces/wwwwwwww2/bingo/src/pages/api/image.ts b/spaces/wwwwwwww2/bingo/src/pages/api/image.ts deleted file mode 100644 index 26fdb31076a9c71e70d1725a630844b27f5a3221..0000000000000000000000000000000000000000 --- a/spaces/wwwwwwww2/bingo/src/pages/api/image.ts +++ /dev/null @@ -1,38 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' -import { createImage } from '@/lib/bots/bing/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const { prompt, id } = req.query - if (!prompt) { - return res.json({ - result: { - value: 'Image', - message: 'No Prompt' - } - }) - } - try { - const headers = createHeaders(req.cookies, 'image') - - debug('headers', headers) - const response = await createImage(String(prompt), String(id), { - ...headers, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - }) - res.writeHead(200, { - 'Content-Type': 'text/plain; charset=UTF-8', - }) - return res.end(response) - } catch (e) { - return res.json({ - result: { - value: 'Error', - message: `${e}` - } - }) - } -} diff --git a/spaces/wy213/213a/src/components/chat-attachments.tsx b/spaces/wy213/213a/src/components/chat-attachments.tsx deleted file mode 100644 index ef43d4e262935d263b6099138c56f7daade5299d..0000000000000000000000000000000000000000 --- a/spaces/wy213/213a/src/components/chat-attachments.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import Image from 'next/image' -import ClearIcon from '@/assets/images/clear.svg' -import RefreshIcon from '@/assets/images/refresh.svg' -import { FileItem } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' -import { useBing } from '@/lib/hooks/use-bing' - -type ChatAttachmentsProps = Pick, 'attachmentList' | 'setAttachmentList' | 'uploadImage'> - -export function ChatAttachments({ attachmentList = [], setAttachmentList, uploadImage }: ChatAttachmentsProps) { - return attachmentList.length ? ( -
      - {attachmentList.map(file => ( -
      - {file.status === 'loading' && ( -
      -
      -
      ) - } - {file.status !== 'error' && ( -
      - -
      ) - } - {file.status === 'error' && ( -
      - refresh uploadImage(file.url)} /> -
      - )} - -
      - ))} -
      - ) : null -} diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/data/datasets/image/university1652.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/data/datasets/image/university1652.py deleted file mode 100644 index ce1e386b04b904dca17fb5c0b1373e648cc995ec..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/torchreid/data/datasets/image/university1652.py +++ /dev/null @@ -1,110 +0,0 @@ -from __future__ import division, print_function, absolute_import -import os -import glob -import os.path as osp -import gdown - -from ..dataset import ImageDataset - - -class University1652(ImageDataset): - """University-1652. - - Reference: - - Zheng et al. University-1652: A Multi-view Multi-source Benchmark for Drone-based Geo-localization. ACM MM 2020. - - URL: ``_ - OneDrive: - https://studentutsedu-my.sharepoint.com/:u:/g/personal/12639605_student_uts_edu_au/Ecrz6xK-PcdCjFdpNb0T0s8B_9J5ynaUy3q63_XumjJyrA?e=z4hpcz - [Backup] GoogleDrive: - https://drive.google.com/file/d/1iVnP4gjw-iHXa0KerZQ1IfIO0i1jADsR/view?usp=sharing - [Backup] Baidu Yun: - https://pan.baidu.com/s/1H_wBnWwikKbaBY1pMPjoqQ password: hrqp - - Dataset statistics: - - buildings: 1652 (train + query). - - The dataset split is as follows: - | Split | #imgs | #buildings | #universities| - | -------- | ----- | ----| ----| - | Training | 50,218 | 701 | 33 | - | Query_drone | 37,855 | 701 | 39 | - | Query_satellite | 701 | 701 | 39| - | Query_ground | 2,579 | 701 | 39| - | Gallery_drone | 51,355 | 951 | 39| - | Gallery_satellite | 951 | 951 | 39| - | Gallery_ground | 2,921 | 793 | 39| - - cameras: None. - - datamanager = torchreid.data.ImageDataManager( - root='reid-data', - sources='university1652', - targets='university1652', - height=256, - width=256, - batch_size_train=32, - batch_size_test=100, - transforms=['random_flip', 'random_crop'] - ) - """ - dataset_dir = 'university1652' - dataset_url = 'https://drive.google.com/uc?id=1iVnP4gjw-iHXa0KerZQ1IfIO0i1jADsR' - - def __init__(self, root='', **kwargs): - self.root = osp.abspath(osp.expanduser(root)) - self.dataset_dir = osp.join(self.root, self.dataset_dir) - print(self.dataset_dir) - if not os.path.isdir(self.dataset_dir): - os.mkdir(self.dataset_dir) - gdown.download( - self.dataset_url, self.dataset_dir + 'data.zip', quiet=False - ) - os.system('unzip %s' % (self.dataset_dir + 'data.zip')) - self.train_dir = osp.join( - self.dataset_dir, 'University-Release/train/' - ) - self.query_dir = osp.join( - self.dataset_dir, 'University-Release/test/query_drone' - ) - self.gallery_dir = osp.join( - self.dataset_dir, 'University-Release/test/gallery_satellite' - ) - - required_files = [ - self.dataset_dir, self.train_dir, self.query_dir, self.gallery_dir - ] - self.check_before_run(required_files) - - self.fake_camid = 0 - train = self.process_dir(self.train_dir, relabel=True, train=True) - query = self.process_dir(self.query_dir, relabel=False) - gallery = self.process_dir(self.gallery_dir, relabel=False) - - super(University1652, self).__init__(train, query, gallery, **kwargs) - - def process_dir(self, dir_path, relabel=False, train=False): - IMG_EXTENSIONS = ( - '.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', - '.webp' - ) - if train: - img_paths = glob.glob(osp.join(dir_path, '*/*/*')) - else: - img_paths = glob.glob(osp.join(dir_path, '*/*')) - pid_container = set() - for img_path in img_paths: - if not img_path.lower().endswith(IMG_EXTENSIONS): - continue - pid = int(os.path.basename(os.path.dirname(img_path))) - pid_container.add(pid) - pid2label = {pid: label for label, pid in enumerate(pid_container)} - data = [] - # no camera for university - for img_path in img_paths: - if not img_path.lower().endswith(IMG_EXTENSIONS): - continue - pid = int(os.path.basename(os.path.dirname(img_path))) - if relabel: - pid = pid2label[pid] - data.append((img_path, pid, self.fake_camid)) - self.fake_camid += 1 - return data diff --git a/spaces/xiang-wuu/yolov5/data/scripts/get_coco.sh b/spaces/xiang-wuu/yolov5/data/scripts/get_coco.sh deleted file mode 100644 index 0210c8ebbda4c8f2f2c03f064b73f8f51d3a7425..0000000000000000000000000000000000000000 --- a/spaces/xiang-wuu/yolov5/data/scripts/get_coco.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Download COCO 2017 dataset http://cocodataset.org -# Example usage: bash data/scripts/get_coco.sh -# parent -# ├── yolov5 -# └── datasets -# └── coco ← downloads here - -# Download/unzip labels -d='../datasets' # unzip directory -url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB -echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & - -# Download/unzip images -d='../datasets/coco/images' # unzip directory -url=http://images.cocodataset.org/zips/ -f1='train2017.zip' # 19G, 118k images -f2='val2017.zip' # 1G, 5k images -f3='test2017.zip' # 7G, 41k images (optional) -for f in $f1 $f2; do - echo 'Downloading' $url$f '...' - curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & -done -wait # finish background tasks diff --git a/spaces/xu1998hz/sescore_english_coco/app.py b/spaces/xu1998hz/sescore_english_coco/app.py deleted file mode 100644 index 6afe99c765959abbf8b089f1afc77e88503b3cb5..0000000000000000000000000000000000000000 --- a/spaces/xu1998hz/sescore_english_coco/app.py +++ /dev/null @@ -1,73 +0,0 @@ -import evaluate -import sys -from pathlib import Path -from evaluate.utils import infer_gradio_input_types, json_to_string_type, parse_readme, parse_gradio_data, parse_test_cases - - -def launch_gradio_widget(metric): - """Launches `metric` widget with Gradio.""" - - try: - import gradio as gr - except ImportError as error: - logger.error("To create a metric widget with Gradio make sure gradio is installed.") - raise error - - local_path = Path(sys.path[0]) - # if there are several input types, use first as default. - if isinstance(metric.features, list): - (feature_names, feature_types) = zip(*metric.features[0].items()) - else: - (feature_names, feature_types) = zip(*metric.features.items()) - gradio_input_types = infer_gradio_input_types(feature_types) - - def compute(data): - return metric.compute(**parse_gradio_data(data, gradio_input_types)) - - header_html = '''
      - -
      -

      About SEScore

      - -

      SEScore is a reference-based text-generation evaluation metric that requires no pre-human-annotated error data, - described in our paper "Not All Errors are Equal: Learning Text Generation Metrics using - Stratified Error Synthesis" from EMNLP 2022.

      - -

      Its effectiveness over prior methods like BLEU, BERTScore, BARTScore, PRISM, COMET and BLEURT has been demonstrated on a diverse set of language generation tasks, including - translation, captioning, and web text generation. Readers have even described SEScore as "one unsupervised evaluation to rule them all" - and we are very excited to share it with you!

      - -

      Try it yourself!

      -

      Provide sample (gold) reference text and (model output) predicted text below and see how SEScore rates them! It is most performant - in a relative ranking setting, so in general it will rank better predictions higher than worse ones. Providing useful - absolute numbers based on SEScore is an ongoing direction of investigation.

      - '''.replace('\n',' ') - - - tail_markdown = parse_readme(local_path / "description.md") - - - iface = gr.Interface( - fn=compute, - inputs=gr.inputs.Dataframe( - headers=feature_names, - col_count=len(feature_names), - row_count=2, - datatype=json_to_string_type(gradio_input_types), - ), - outputs=gr.outputs.Textbox(label=metric.name), - description=header_html, - #title=f"SEScore Metric Usage Example", - article=tail_markdown, - # TODO: load test cases and use them to populate examples - # examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)] - ) - - print(dir(iface)) - - iface.launch() - - - -module = evaluate.load("xu1998hz/sescore") -launch_gradio_widget(module) diff --git a/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_model/op/__init__.py b/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_model/op/__init__.py deleted file mode 100644 index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000 --- a/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_model/op/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .fused_act import FusedLeakyReLU, fused_leaky_relu -from .upfirdn2d import upfirdn2d diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/image_transforms.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/image_transforms.py deleted file mode 100644 index 3cea0c2d17698b3e9261ac83395e71dc71d0a4e0..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/image_transforms.py +++ /dev/null @@ -1,796 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings -from typing import Iterable, List, Optional, Tuple, Union - -import numpy as np - -from .image_utils import ( - ChannelDimension, - ImageInput, - get_channel_dimension_axis, - get_image_size, - infer_channel_dimension_format, -) -from .utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor -from .utils.import_utils import ( - is_flax_available, - is_tf_available, - is_torch_available, - is_vision_available, - requires_backends, -) - - -if is_vision_available(): - import PIL - - from .image_utils import PILImageResampling - -if is_torch_available(): - import torch - -if is_tf_available(): - import tensorflow as tf - -if is_flax_available(): - import jax.numpy as jnp - - -def to_channel_dimension_format( - image: np.ndarray, - channel_dim: Union[ChannelDimension, str], - input_channel_dim: Optional[Union[ChannelDimension, str]] = None, -) -> np.ndarray: - """ - Converts `image` to the channel dimension format specified by `channel_dim`. - - Args: - image (`numpy.ndarray`): - The image to have its channel dimension set. - channel_dim (`ChannelDimension`): - The channel dimension format to use. - input_channel_dim (`ChannelDimension`, *optional*): - The channel dimension format of the input image. If not provided, it will be inferred from the input image. - - Returns: - `np.ndarray`: The image with the channel dimension set to `channel_dim`. - """ - if not isinstance(image, np.ndarray): - raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") - - if input_channel_dim is None: - input_channel_dim = infer_channel_dimension_format(image) - - target_channel_dim = ChannelDimension(channel_dim) - if input_channel_dim == target_channel_dim: - return image - - if target_channel_dim == ChannelDimension.FIRST: - image = image.transpose((2, 0, 1)) - elif target_channel_dim == ChannelDimension.LAST: - image = image.transpose((1, 2, 0)) - else: - raise ValueError("Unsupported channel dimension format: {}".format(channel_dim)) - - return image - - -def rescale( - image: np.ndarray, - scale: float, - data_format: Optional[ChannelDimension] = None, - dtype: np.dtype = np.float32, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> np.ndarray: - """ - Rescales `image` by `scale`. - - Args: - image (`np.ndarray`): - The image to rescale. - scale (`float`): - The scale to use for rescaling the image. - data_format (`ChannelDimension`, *optional*): - The channel dimension format of the image. If not provided, it will be the same as the input image. - dtype (`np.dtype`, *optional*, defaults to `np.float32`): - The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature - extractors. - input_data_format (`ChannelDimension`, *optional*): - The channel dimension format of the input image. If not provided, it will be inferred from the input image. - - Returns: - `np.ndarray`: The rescaled image. - """ - if not isinstance(image, np.ndarray): - raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") - - rescaled_image = image * scale - if data_format is not None: - rescaled_image = to_channel_dimension_format(rescaled_image, data_format, input_data_format) - - rescaled_image = rescaled_image.astype(dtype) - - return rescaled_image - - -def _rescale_for_pil_conversion(image): - """ - Detects whether or not the image needs to be rescaled before being converted to a PIL image. - - The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be - rescaled. - """ - if image.dtype == np.uint8: - do_rescale = False - elif np.allclose(image, image.astype(int)): - if np.all(0 <= image) and np.all(image <= 255): - do_rescale = False - else: - raise ValueError( - "The image to be converted to a PIL image contains values outside the range [0, 255], " - f"got [{image.min()}, {image.max()}] which cannot be converted to uint8." - ) - elif np.all(0 <= image) and np.all(image <= 1): - do_rescale = True - else: - raise ValueError( - "The image to be converted to a PIL image contains values outside the range [0, 1], " - f"got [{image.min()}, {image.max()}] which cannot be converted to uint8." - ) - return do_rescale - - -def to_pil_image( - image: Union[np.ndarray, "PIL.Image.Image", "torch.Tensor", "tf.Tensor", "jnp.ndarray"], - do_rescale: Optional[bool] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> "PIL.Image.Image": - """ - Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if - needed. - - Args: - image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`): - The image to convert to the `PIL.Image` format. - do_rescale (`bool`, *optional*): - Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default - to `True` if the image type is a floating type and casting to `int` would result in a loss of precision, - and `False` otherwise. - input_data_format (`ChannelDimension`, *optional*): - The channel dimension format of the input image. If unset, will use the inferred format from the input. - - Returns: - `PIL.Image.Image`: The converted image. - """ - requires_backends(to_pil_image, ["vision"]) - - if isinstance(image, PIL.Image.Image): - return image - - # Convert all tensors to numpy arrays before converting to PIL image - if is_torch_tensor(image) or is_tf_tensor(image): - image = image.numpy() - elif is_jax_tensor(image): - image = np.array(image) - elif not isinstance(image, np.ndarray): - raise ValueError("Input image type not supported: {}".format(type(image))) - - # If the channel as been moved to first dim, we put it back at the end. - image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) - - # If there is a single channel, we squeeze it, as otherwise PIL can't handle it. - image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image - - # PIL.Image can only store uint8 values so we rescale the image to be between 0 and 255 if needed. - do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale - - if do_rescale: - image = rescale(image, 255) - - image = image.astype(np.uint8) - return PIL.Image.fromarray(image) - - -# Logic adapted from torchvision resizing logic: https://github.com/pytorch/vision/blob/511924c1ced4ce0461197e5caa64ce5b9e558aab/torchvision/transforms/functional.py#L366 -def get_resize_output_image_size( - input_image: np.ndarray, - size: Union[int, Tuple[int, int], List[int], Tuple[int]], - default_to_square: bool = True, - max_size: Optional[int] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> tuple: - """ - Find the target (height, width) dimension of the output image after resizing given the input image and the desired - size. - - Args: - input_image (`np.ndarray`): - The image to resize. - size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]): - The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to - this. - - If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If - `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this - number. i.e, if height > width, then image will be rescaled to (size * height / width, size). - default_to_square (`bool`, *optional*, defaults to `True`): - How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square - (`size`,`size`). If set to `False`, will replicate - [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize) - with support for resizing only the smallest edge and providing an optional `max_size`. - max_size (`int`, *optional*): - The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater - than `max_size` after being resized according to `size`, then the image is resized again so that the longer - edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter - than `size`. Only used if `default_to_square` is `False`. - input_data_format (`ChannelDimension`, *optional*): - The channel dimension format of the input image. If unset, will use the inferred format from the input. - - Returns: - `tuple`: The target (height, width) dimension of the output image after resizing. - """ - if isinstance(size, (tuple, list)): - if len(size) == 2: - return tuple(size) - elif len(size) == 1: - # Perform same logic as if size was an int - size = size[0] - else: - raise ValueError("size must have 1 or 2 elements if it is a list or tuple") - - if default_to_square: - return (size, size) - - height, width = get_image_size(input_image, input_data_format) - short, long = (width, height) if width <= height else (height, width) - requested_new_short = size - - new_short, new_long = requested_new_short, int(requested_new_short * long / short) - - if max_size is not None: - if max_size <= requested_new_short: - raise ValueError( - f"max_size = {max_size} must be strictly greater than the requested " - f"size for the smaller edge size = {size}" - ) - if new_long > max_size: - new_short, new_long = int(max_size * new_short / new_long), max_size - - return (new_long, new_short) if width <= height else (new_short, new_long) - - -def resize( - image, - size: Tuple[int, int], - resample: "PILImageResampling" = None, - reducing_gap: Optional[int] = None, - data_format: Optional[ChannelDimension] = None, - return_numpy: bool = True, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> np.ndarray: - """ - Resizes `image` to `(height, width)` specified by `size` using the PIL library. - - Args: - image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): - The image to resize. - size (`Tuple[int, int]`): - The size to use for resizing the image. - resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`): - The filter to user for resampling. - reducing_gap (`int`, *optional*): - Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to - the fair resampling. See corresponding Pillow documentation for more details. - data_format (`ChannelDimension`, *optional*): - The channel dimension format of the output image. If unset, will use the inferred format from the input. - return_numpy (`bool`, *optional*, defaults to `True`): - Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is - returned. - input_data_format (`ChannelDimension`, *optional*): - The channel dimension format of the input image. If unset, will use the inferred format from the input. - - Returns: - `np.ndarray`: The resized image. - """ - requires_backends(resize, ["vision"]) - - resample = resample if resample is not None else PILImageResampling.BILINEAR - - if not len(size) == 2: - raise ValueError("size must have 2 elements") - - # For all transformations, we want to keep the same data format as the input image unless otherwise specified. - # The resized image from PIL will always have channels last, so find the input format first. - if input_data_format is None: - input_data_format = infer_channel_dimension_format(image) - data_format = input_data_format if data_format is None else data_format - - # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use - # the pillow library to resize the image and then convert back to numpy - do_rescale = False - if not isinstance(image, PIL.Image.Image): - do_rescale = _rescale_for_pil_conversion(image) - image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format) - height, width = size - # PIL images are in the format (width, height) - resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap) - - if return_numpy: - resized_image = np.array(resized_image) - # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image - # so we need to add it back if necessary. - resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image - # The image is always in channels last format after converting from a PIL image - resized_image = to_channel_dimension_format( - resized_image, data_format, input_channel_dim=ChannelDimension.LAST - ) - # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to - # rescale it back to the original range. - resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image - return resized_image - - -def normalize( - image: np.ndarray, - mean: Union[float, Iterable[float]], - std: Union[float, Iterable[float]], - data_format: Optional[ChannelDimension] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> np.ndarray: - """ - Normalizes `image` using the mean and standard deviation specified by `mean` and `std`. - - image = (image - mean) / std - - Args: - image (`np.ndarray`): - The image to normalize. - mean (`float` or `Iterable[float]`): - The mean to use for normalization. - std (`float` or `Iterable[float]`): - The standard deviation to use for normalization. - data_format (`ChannelDimension`, *optional*): - The channel dimension format of the output image. If unset, will use the inferred format from the input. - input_data_format (`ChannelDimension`, *optional*): - The channel dimension format of the input image. If unset, will use the inferred format from the input. - """ - if not isinstance(image, np.ndarray): - raise ValueError("image must be a numpy array") - - if input_data_format is None: - input_data_format = infer_channel_dimension_format(image) - channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format) - num_channels = image.shape[channel_axis] - - if isinstance(mean, Iterable): - if len(mean) != num_channels: - raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}") - else: - mean = [mean] * num_channels - mean = np.array(mean, dtype=image.dtype) - - if isinstance(std, Iterable): - if len(std) != num_channels: - raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(std)}") - else: - std = [std] * num_channels - std = np.array(std, dtype=image.dtype) - - if input_data_format == ChannelDimension.LAST: - image = (image - mean) / std - else: - image = ((image.T - mean) / std).T - - image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image - return image - - -def center_crop( - image: np.ndarray, - size: Tuple[int, int], - data_format: Optional[Union[str, ChannelDimension]] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, - return_numpy: Optional[bool] = None, -) -> np.ndarray: - """ - Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to - the size given, it will be padded (so the returned result will always be of size `size`). - - Args: - image (`np.ndarray`): - The image to crop. - size (`Tuple[int, int]`): - The target size for the cropped image. - data_format (`str` or `ChannelDimension`, *optional*): - The channel dimension format for the output image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - If unset, will use the inferred format of the input image. - input_data_format (`str` or `ChannelDimension`, *optional*): - The channel dimension format for the input image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - If unset, will use the inferred format of the input image. - return_numpy (`bool`, *optional*): - Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the - previous ImageFeatureExtractionMixin method. - - Unset: will return the same type as the input image. - - `True`: will return a numpy array. - - `False`: will return a `PIL.Image.Image` object. - Returns: - `np.ndarray`: The cropped image. - """ - requires_backends(center_crop, ["vision"]) - - if return_numpy is not None: - warnings.warn("return_numpy is deprecated and will be removed in v.4.33", FutureWarning) - - return_numpy = True if return_numpy is None else return_numpy - - if not isinstance(image, np.ndarray): - raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") - - if not isinstance(size, Iterable) or len(size) != 2: - raise ValueError("size must have 2 elements representing the height and width of the output image") - - if input_data_format is None: - input_data_format = infer_channel_dimension_format(image) - output_data_format = data_format if data_format is not None else input_data_format - - # We perform the crop in (C, H, W) format and then convert to the output format - image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) - - orig_height, orig_width = get_image_size(image, ChannelDimension.FIRST) - crop_height, crop_width = size - crop_height, crop_width = int(crop_height), int(crop_width) - - # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result. - top = (orig_height - crop_height) // 2 - bottom = top + crop_height - # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result. - left = (orig_width - crop_width) // 2 - right = left + crop_width - - # Check if cropped area is within image boundaries - if top >= 0 and bottom <= orig_height and left >= 0 and right <= orig_width: - image = image[..., top:bottom, left:right] - image = to_channel_dimension_format(image, output_data_format, ChannelDimension.FIRST) - return image - - # Otherwise, we may need to pad if the image is too small. Oh joy... - new_height = max(crop_height, orig_height) - new_width = max(crop_width, orig_width) - new_shape = image.shape[:-2] + (new_height, new_width) - new_image = np.zeros_like(image, shape=new_shape) - - # If the image is too small, pad it with zeros - top_pad = (new_height - orig_height) // 2 - bottom_pad = top_pad + orig_height - left_pad = (new_width - orig_width) // 2 - right_pad = left_pad + orig_width - new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image - - top += top_pad - bottom += top_pad - left += left_pad - right += left_pad - - new_image = new_image[..., max(0, top) : min(new_height, bottom), max(0, left) : min(new_width, right)] - new_image = to_channel_dimension_format(new_image, output_data_format, ChannelDimension.FIRST) - - if not return_numpy: - new_image = to_pil_image(new_image) - - return new_image - - -def _center_to_corners_format_torch(bboxes_center: "torch.Tensor") -> "torch.Tensor": - center_x, center_y, width, height = bboxes_center.unbind(-1) - bbox_corners = torch.stack( - # top left x, top left y, bottom right x, bottom right y - [(center_x - 0.5 * width), (center_y - 0.5 * height), (center_x + 0.5 * width), (center_y + 0.5 * height)], - dim=-1, - ) - return bbox_corners - - -def _center_to_corners_format_numpy(bboxes_center: np.ndarray) -> np.ndarray: - center_x, center_y, width, height = bboxes_center.T - bboxes_corners = np.stack( - # top left x, top left y, bottom right x, bottom right y - [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height], - axis=-1, - ) - return bboxes_corners - - -def _center_to_corners_format_tf(bboxes_center: "tf.Tensor") -> "tf.Tensor": - center_x, center_y, width, height = tf.unstack(bboxes_center, axis=-1) - bboxes_corners = tf.stack( - # top left x, top left y, bottom right x, bottom right y - [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height], - axis=-1, - ) - return bboxes_corners - - -# 2 functions below inspired by https://github.com/facebookresearch/detr/blob/master/util/box_ops.py -def center_to_corners_format(bboxes_center: TensorType) -> TensorType: - """ - Converts bounding boxes from center format to corners format. - - center format: contains the coordinate for the center of the box and its width, height dimensions - (center_x, center_y, width, height) - corners format: contains the coodinates for the top-left and bottom-right corners of the box - (top_left_x, top_left_y, bottom_right_x, bottom_right_y) - """ - # Function is used during model forward pass, so we use the input framework if possible, without - # converting to numpy - if is_torch_tensor(bboxes_center): - return _center_to_corners_format_torch(bboxes_center) - elif isinstance(bboxes_center, np.ndarray): - return _center_to_corners_format_numpy(bboxes_center) - elif is_tf_tensor(bboxes_center): - return _center_to_corners_format_tf(bboxes_center) - - raise ValueError(f"Unsupported input type {type(bboxes_center)}") - - -def _corners_to_center_format_torch(bboxes_corners: "torch.Tensor") -> "torch.Tensor": - top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.unbind(-1) - b = [ - (top_left_x + bottom_right_x) / 2, # center x - (top_left_y + bottom_right_y) / 2, # center y - (bottom_right_x - top_left_x), # width - (bottom_right_y - top_left_y), # height - ] - return torch.stack(b, dim=-1) - - -def _corners_to_center_format_numpy(bboxes_corners: np.ndarray) -> np.ndarray: - top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.T - bboxes_center = np.stack( - [ - (top_left_x + bottom_right_x) / 2, # center x - (top_left_y + bottom_right_y) / 2, # center y - (bottom_right_x - top_left_x), # width - (bottom_right_y - top_left_y), # height - ], - axis=-1, - ) - return bboxes_center - - -def _corners_to_center_format_tf(bboxes_corners: "tf.Tensor") -> "tf.Tensor": - top_left_x, top_left_y, bottom_right_x, bottom_right_y = tf.unstack(bboxes_corners, axis=-1) - bboxes_center = tf.stack( - [ - (top_left_x + bottom_right_x) / 2, # center x - (top_left_y + bottom_right_y) / 2, # center y - (bottom_right_x - top_left_x), # width - (bottom_right_y - top_left_y), # height - ], - axis=-1, - ) - return bboxes_center - - -def corners_to_center_format(bboxes_corners: TensorType) -> TensorType: - """ - Converts bounding boxes from corners format to center format. - - corners format: contains the coodinates for the top-left and bottom-right corners of the box - (top_left_x, top_left_y, bottom_right_x, bottom_right_y) - center format: contains the coordinate for the center of the box and its the width, height dimensions - (center_x, center_y, width, height) - """ - # Inverse function accepts different input types so implemented here too - if is_torch_tensor(bboxes_corners): - return _corners_to_center_format_torch(bboxes_corners) - elif isinstance(bboxes_corners, np.ndarray): - return _corners_to_center_format_numpy(bboxes_corners) - elif is_tf_tensor(bboxes_corners): - return _corners_to_center_format_tf(bboxes_corners) - - raise ValueError(f"Unsupported input type {type(bboxes_corners)}") - - -# 2 functions below copied from https://github.com/cocodataset/panopticapi/blob/master/panopticapi/utils.py -# Copyright (c) 2018, Alexander Kirillov -# All rights reserved. -def rgb_to_id(color): - """ - Converts RGB color to unique ID. - """ - if isinstance(color, np.ndarray) and len(color.shape) == 3: - if color.dtype == np.uint8: - color = color.astype(np.int32) - return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] - return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) - - -def id_to_rgb(id_map): - """ - Converts unique ID to RGB color. - """ - if isinstance(id_map, np.ndarray): - id_map_copy = id_map.copy() - rgb_shape = tuple(list(id_map.shape) + [3]) - rgb_map = np.zeros(rgb_shape, dtype=np.uint8) - for i in range(3): - rgb_map[..., i] = id_map_copy % 256 - id_map_copy //= 256 - return rgb_map - color = [] - for _ in range(3): - color.append(id_map % 256) - id_map //= 256 - return color - - -class PaddingMode(ExplicitEnum): - """ - Enum class for the different padding modes to use when padding images. - """ - - CONSTANT = "constant" - REFLECT = "reflect" - REPLICATE = "replicate" - SYMMETRIC = "symmetric" - - -def pad( - image: np.ndarray, - padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]], - mode: PaddingMode = PaddingMode.CONSTANT, - constant_values: Union[float, Iterable[float]] = 0.0, - data_format: Optional[Union[str, ChannelDimension]] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> np.ndarray: - """ - Pads the `image` with the specified (height, width) `padding` and `mode`. - - Args: - image (`np.ndarray`): - The image to pad. - padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`): - Padding to apply to the edges of the height, width axes. Can be one of three formats: - - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - - `((before, after),)` yields same before and after pad for height and width. - - `(pad,)` or int is a shortcut for before = after = pad width for all axes. - mode (`PaddingMode`): - The padding mode to use. Can be one of: - - `"constant"`: pads with a constant value. - - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the - vector along each axis. - - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. - constant_values (`float` or `Iterable[float]`, *optional*): - The value to use for the padding if `mode` is `"constant"`. - data_format (`str` or `ChannelDimension`, *optional*): - The channel dimension format for the output image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - If unset, will use same as the input image. - input_data_format (`str` or `ChannelDimension`, *optional*): - The channel dimension format for the input image. Can be one of: - - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - If unset, will use the inferred format of the input image. - - Returns: - `np.ndarray`: The padded image. - - """ - if input_data_format is None: - input_data_format = infer_channel_dimension_format(image) - - def _expand_for_data_format(values): - """ - Convert values to be in the format expected by np.pad based on the data format. - """ - if isinstance(values, (int, float)): - values = ((values, values), (values, values)) - elif isinstance(values, tuple) and len(values) == 1: - values = ((values[0], values[0]), (values[0], values[0])) - elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int): - values = (values, values) - elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple): - values = values - else: - raise ValueError(f"Unsupported format: {values}") - - # add 0 for channel dimension - values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0)) - - # Add additional padding if there's a batch dimension - values = (0, *values) if image.ndim == 4 else values - return values - - padding = _expand_for_data_format(padding) - - if mode == PaddingMode.CONSTANT: - constant_values = _expand_for_data_format(constant_values) - image = np.pad(image, padding, mode="constant", constant_values=constant_values) - elif mode == PaddingMode.REFLECT: - image = np.pad(image, padding, mode="reflect") - elif mode == PaddingMode.REPLICATE: - image = np.pad(image, padding, mode="edge") - elif mode == PaddingMode.SYMMETRIC: - image = np.pad(image, padding, mode="symmetric") - else: - raise ValueError(f"Invalid padding mode: {mode}") - - image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image - return image - - -# TODO (Amy): Accept 1/3/4 channel numpy array as input and return np.array as default -def convert_to_rgb(image: ImageInput) -> ImageInput: - """ - Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image - as is. - - Args: - image (Image): - The image to convert. - """ - requires_backends(convert_to_rgb, ["vision"]) - - if not isinstance(image, PIL.Image.Image): - return image - - image = image.convert("RGB") - return image - - -def flip_channel_order( - image: np.ndarray, - data_format: Optional[ChannelDimension] = None, - input_data_format: Optional[Union[str, ChannelDimension]] = None, -) -> np.ndarray: - """ - Flips the channel order of the image. - - If the image is in RGB format, it will be converted to BGR and vice versa. - - Args: - image (`np.ndarray`): - The image to flip. - data_format (`ChannelDimension`, *optional*): - The channel dimension format for the output image. Can be one of: - - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - If unset, will use same as the input image. - input_data_format (`ChannelDimension`, *optional*): - The channel dimension format for the input image. Can be one of: - - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - If unset, will use the inferred format of the input image. - """ - input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format - - if input_data_format == ChannelDimension.LAST: - image = image[..., ::-1] - elif input_data_format == ChannelDimension.FIRST: - image = image[::-1, ...] - else: - raise ValueError(f"Unsupported channel dimension: {input_data_format}") - - if data_format is not None: - image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) - return image diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/kernels/yoso/common.h b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/kernels/yoso/common.h deleted file mode 100644 index e5085c88dd3ea9a12eec264a8c48946bf2b80b23..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/kernels/yoso/common.h +++ /dev/null @@ -1,10 +0,0 @@ - -#define min(a, b) ((a)<(b)?(a):(b)) -#define max(a, b) ((a)>(b)?(a):(b)) -#define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0)) -#define select(cond, a, b) ((cond)?(a):(b)) -#define PI 3.141592 -#define EPSILON 1e-8 -#define MAX_VAL 1e12 -#define MIN_VAL -1e12 -#define EMPTY_VALUE -1 diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/marian/tokenization_marian.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/marian/tokenization_marian.py deleted file mode 100644 index f064b49a8397b96b9ba9f8da47b400048d762635..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/marian/tokenization_marian.py +++ /dev/null @@ -1,413 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import json -import os -import re -import warnings -from pathlib import Path -from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple, Union - -import sentencepiece - -from ...tokenization_utils import PreTrainedTokenizer -from ...utils import logging - - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = { - "source_spm": "source.spm", - "target_spm": "target.spm", - "vocab": "vocab.json", - "target_vocab_file": "target_vocab.json", - "tokenizer_config_file": "tokenizer_config.json", -} - -PRETRAINED_VOCAB_FILES_MAP = { - "source_spm": { - "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/source.spm" - }, - "target_spm": { - "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/target.spm" - }, - "vocab": { - "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json" - }, - "tokenizer_config_file": { - "Helsinki-NLP/opus-mt-en-de": ( - "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/tokenizer_config.json" - ) - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"Helsinki-NLP/opus-mt-en-de": 512} -PRETRAINED_INIT_CONFIGURATION = {} - -SPIECE_UNDERLINE = "▁" - -# Example URL https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json - - -class MarianTokenizer(PreTrainedTokenizer): - r""" - Construct a Marian tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). - - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - - Args: - source_spm (`str`): - [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that - contains the vocabulary for the source language. - target_spm (`str`): - [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that - contains the vocabulary for the target language. - source_lang (`str`, *optional*): - A string representing the source language. - target_lang (`str`, *optional*): - A string representing the target language. - unk_token (`str`, *optional*, defaults to `""`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - eos_token (`str`, *optional*, defaults to `""`): - The end of sequence token. - pad_token (`str`, *optional*, defaults to `""`): - The token used for padding, for example when batching sequences of different lengths. - model_max_length (`int`, *optional*, defaults to 512): - The maximum sentence length the model accepts. - additional_special_tokens (`List[str]`, *optional*, defaults to `["", ""]`): - Additional special tokens used by the tokenizer. - sp_model_kwargs (`dict`, *optional*): - Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for - SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, - to set: - - - `enable_sampling`: Enable subword regularization. - - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - - - `nbest_size = {0,1}`: No sampling is performed. - - `nbest_size > 1`: samples from the nbest_size results. - - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) - using forward-filtering-and-backward-sampling algorithm. - - - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for - BPE-dropout. - - Examples: - - ```python - >>> from transformers import MarianForCausalLM, MarianTokenizer - - >>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") - >>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") - >>> src_texts = ["I am a small frog.", "Tom asked his teacher for advice."] - >>> tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional - >>> inputs = tokenizer(src_texts, text_target=tgt_texts, return_tensors="pt", padding=True) - - >>> outputs = model(**inputs) # should work - ```""" - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - language_code_re = re.compile(">>.+<<") # type: re.Pattern - - def __init__( - self, - source_spm, - target_spm, - vocab, - target_vocab_file=None, - source_lang=None, - target_lang=None, - unk_token="", - eos_token="
      ", - pad_token="", - model_max_length=512, - sp_model_kwargs: Optional[Dict[str, Any]] = None, - separate_vocabs=False, - **kwargs, - ) -> None: - self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs - - assert Path(source_spm).exists(), f"cannot find spm source {source_spm}" - - self.separate_vocabs = separate_vocabs - self.encoder = load_json(vocab) - if unk_token not in self.encoder: - raise KeyError(" token must be in the vocab") - assert pad_token in self.encoder - - if separate_vocabs: - self.target_encoder = load_json(target_vocab_file) - self.decoder = {v: k for k, v in self.target_encoder.items()} - self.supported_language_codes = [] - else: - self.decoder = {v: k for k, v in self.encoder.items()} - self.supported_language_codes: list = [k for k in self.encoder if k.startswith(">>") and k.endswith("<<")] - - self.source_lang = source_lang - self.target_lang = target_lang - self.spm_files = [source_spm, target_spm] - - # load SentencePiece model for pre-processing - self.spm_source = load_spm(source_spm, self.sp_model_kwargs) - self.spm_target = load_spm(target_spm, self.sp_model_kwargs) - self.current_spm = self.spm_source - self.current_encoder = self.encoder - - # Multilingual target side: default to using first supported language code. - - self._setup_normalizer() - - super().__init__( - # bos_token=bos_token, unused. Start decoding with config.decoder_start_token_id - source_lang=source_lang, - target_lang=target_lang, - unk_token=unk_token, - eos_token=eos_token, - pad_token=pad_token, - model_max_length=model_max_length, - sp_model_kwargs=self.sp_model_kwargs, - target_vocab_file=target_vocab_file, - separate_vocabs=separate_vocabs, - **kwargs, - ) - - def _setup_normalizer(self): - try: - from sacremoses import MosesPunctNormalizer - - self.punc_normalizer = MosesPunctNormalizer(self.source_lang).normalize - except (ImportError, FileNotFoundError): - warnings.warn("Recommended: pip install sacremoses.") - self.punc_normalizer = lambda x: x - - def normalize(self, x: str) -> str: - """Cover moses empty string edge case. They return empty list for '' input!""" - return self.punc_normalizer(x) if x else "" - - def _convert_token_to_id(self, token): - return self.current_encoder.get(token, self.current_encoder[self.unk_token]) - - def remove_language_code(self, text: str): - """Remove language codes like >>fr<< before sentencepiece""" - match = self.language_code_re.match(text) - code: list = [match.group(0)] if match else [] - return code, self.language_code_re.sub("", text) - - def _tokenize(self, text: str) -> List[str]: - code, text = self.remove_language_code(text) - pieces = self.current_spm.encode(text, out_type=str) - return code + pieces - - def _convert_id_to_token(self, index: int) -> str: - """Converts an index (integer) in a token (str) using the decoder.""" - return self.decoder.get(index, self.unk_token) - - def batch_decode(self, sequences, **kwargs): - """ - Convert a list of lists of token ids into a list of strings by calling decode. - - Args: - sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): - List of tokenized input ids. Can be obtained using the `__call__` method. - skip_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not to remove special tokens in the decoding. - clean_up_tokenization_spaces (`bool`, *optional*): - Whether or not to clean up the tokenization spaces. If `None`, will default to - `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). - use_source_tokenizer (`bool`, *optional*, defaults to `False`): - Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence - problems). - kwargs (additional keyword arguments, *optional*): - Will be passed to the underlying model specific decode method. - - Returns: - `List[str]`: The list of decoded sentences. - """ - return super().batch_decode(sequences, **kwargs) - - def decode(self, token_ids, **kwargs): - """ - Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special - tokens and clean up tokenization spaces. - - Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. - - Args: - token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): - List of tokenized input ids. Can be obtained using the `__call__` method. - skip_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not to remove special tokens in the decoding. - clean_up_tokenization_spaces (`bool`, *optional*): - Whether or not to clean up the tokenization spaces. If `None`, will default to - `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). - use_source_tokenizer (`bool`, *optional*, defaults to `False`): - Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence - problems). - kwargs (additional keyword arguments, *optional*): - Will be passed to the underlying model specific decode method. - - Returns: - `str`: The decoded sentence. - """ - return super().decode(token_ids, **kwargs) - - def convert_tokens_to_string(self, tokens: List[str]) -> str: - """Uses source spm if _decode_use_source_tokenizer is True, and target spm otherwise""" - sp_model = self.spm_source if self._decode_use_source_tokenizer else self.spm_target - current_sub_tokens = [] - out_string = "" - for token in tokens: - # make sure that special tokens are not decoded using sentencepiece model - if token in self.all_special_tokens: - out_string += sp_model.decode_pieces(current_sub_tokens) + token + " " - current_sub_tokens = [] - else: - current_sub_tokens.append(token) - out_string += sp_model.decode_pieces(current_sub_tokens) - out_string = out_string.replace(SPIECE_UNDERLINE, " ") - return out_string.strip() - - def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: - """Build model inputs from a sequence by appending eos_token_id.""" - if token_ids_1 is None: - return token_ids_0 + [self.eos_token_id] - # We don't expect to process pairs, but leave the pair logic for API consistency - return token_ids_0 + token_ids_1 + [self.eos_token_id] - - def _switch_to_input_mode(self): - self.current_spm = self.spm_source - self.current_encoder = self.encoder - - def _switch_to_target_mode(self): - self.current_spm = self.spm_target - if self.separate_vocabs: - self.current_encoder = self.target_encoder - - @property - def vocab_size(self) -> int: - return len(self.encoder) - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") - return - saved_files = [] - - if self.separate_vocabs: - out_src_vocab_file = os.path.join( - save_directory, - (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab"], - ) - out_tgt_vocab_file = os.path.join( - save_directory, - (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["target_vocab_file"], - ) - save_json(self.encoder, out_src_vocab_file) - save_json(self.target_encoder, out_tgt_vocab_file) - saved_files.append(out_src_vocab_file) - saved_files.append(out_tgt_vocab_file) - else: - out_vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab"] - ) - save_json(self.encoder, out_vocab_file) - saved_files.append(out_vocab_file) - - for spm_save_filename, spm_orig_path, spm_model in zip( - [VOCAB_FILES_NAMES["source_spm"], VOCAB_FILES_NAMES["target_spm"]], - self.spm_files, - [self.spm_source, self.spm_target], - ): - spm_save_path = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + spm_save_filename - ) - if os.path.abspath(spm_orig_path) != os.path.abspath(spm_save_path) and os.path.isfile(spm_orig_path): - copyfile(spm_orig_path, spm_save_path) - saved_files.append(spm_save_path) - elif not os.path.isfile(spm_orig_path): - with open(spm_save_path, "wb") as fi: - content_spiece_model = spm_model.serialized_model_proto() - fi.write(content_spiece_model) - saved_files.append(spm_save_path) - - return tuple(saved_files) - - def get_vocab(self) -> Dict: - return self.get_src_vocab() - - def get_src_vocab(self): - return dict(self.encoder, **self.added_tokens_encoder) - - def get_tgt_vocab(self): - return dict(self.target_encoder, **self.added_tokens_decoder) - - def __getstate__(self) -> Dict: - state = self.__dict__.copy() - state.update( - {k: None for k in ["spm_source", "spm_target", "current_spm", "punc_normalizer", "target_vocab_file"]} - ) - return state - - def __setstate__(self, d: Dict) -> None: - self.__dict__ = d - - # for backward compatibility - if not hasattr(self, "sp_model_kwargs"): - self.sp_model_kwargs = {} - - self.spm_source, self.spm_target = (load_spm(f, self.sp_model_kwargs) for f in self.spm_files) - self.current_spm = self.spm_source - self._setup_normalizer() - - def num_special_tokens_to_add(self, *args, **kwargs): - """Just EOS""" - return 1 - - def _special_token_mask(self, seq): - all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp - all_special_ids.remove(self.unk_token_id) # is only sometimes special - return [1 if x in all_special_ids else 0 for x in seq] - - def get_special_tokens_mask( - self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False - ) -> List[int]: - """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" - if already_has_special_tokens: - return self._special_token_mask(token_ids_0) - elif token_ids_1 is None: - return self._special_token_mask(token_ids_0) + [1] - else: - return self._special_token_mask(token_ids_0 + token_ids_1) + [1] - - -def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor: - spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs) - spm.Load(path) - return spm - - -def save_json(data, path: str) -> None: - with open(path, "w") as f: - json.dump(data, f, indent=2) - - -def load_json(path: str) -> Union[Dict, List]: - with open(path, "r") as f: - return json.load(f) diff --git a/spaces/ynhe/AskAnything/models/grit_src/grit/data/datasets/grit_coco.py b/spaces/ynhe/AskAnything/models/grit_src/grit/data/datasets/grit_coco.py deleted file mode 100644 index fea81f7dd8ad2c27dac8438753b845ab64cef81e..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/grit/data/datasets/grit_coco.py +++ /dev/null @@ -1,112 +0,0 @@ -import logging -import os -from fvcore.common.timer import Timer -from detectron2.structures import BoxMode -from fvcore.common.file_io import PathManager -from detectron2.data import DatasetCatalog, MetadataCatalog -from lvis import LVIS - -logger = logging.getLogger(__name__) - -__all__ = ["load_GRiTcoco_json", "register_GRiTcoco_instances"] - - -def register_GRiTcoco_instances(name, metadata, json_file, image_root): - """ - """ - DatasetCatalog.register(name, lambda: load_GRiTcoco_json( - json_file, image_root, name)) - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, - evaluator_type="coco", **metadata - ) - - -def get_GRiTcoco_meta(): - categories = [{'supercategory': 'object', 'id': 1, 'name': 'object'}] - categories = sorted(categories, key=lambda x: x["id"]) - thing_classes = [k["name"] for k in categories] - meta = {"thing_classes": thing_classes} - return meta - - -def load_GRiTcoco_json(json_file, image_root, dataset_name=None): - ''' - Load COCO class name text for object description for GRiT - ''' - - json_file = PathManager.get_local_path(json_file) - - timer = Timer() - lvis_api = LVIS(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format( - json_file, timer.seconds())) - - class_names = {} - sort_cat = sorted(lvis_api.dataset['categories'], key=lambda x: x['id']) - for x in sort_cat: - class_names[x['id']] = x['name'] - - img_ids = sorted(lvis_api.imgs.keys()) - imgs = lvis_api.load_imgs(img_ids) - anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] - - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), \ - "Annotation ids in '{}' are not unique".format(json_file) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in the LVIS v1 format from {}".format( - len(imgs_anns), json_file)) - - dataset_dicts = [] - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - if "file_name" in img_dict: - file_name = img_dict["file_name"] - record["file_name"] = os.path.join(image_root, file_name) - - record["height"] = int(img_dict["height"]) - record["width"] = int(img_dict["width"]) - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - assert anno["image_id"] == image_id - if anno.get('iscrowd', 0) > 0: - continue - obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} - obj["category_id"] = 0 - obj["object_description"] = class_names[anno['category_id']] - if 'segmentation' in anno: - segm = anno["segmentation"] - valid_segm = [poly for poly in segm \ - if len(poly) % 2 == 0 and len(poly) >= 6] - if not len(segm) == len(valid_segm): - print('Annotation contains an invalid polygon with < 3 points') - assert len(segm) > 0 - obj["segmentation"] = segm - objs.append(obj) - record["annotations"] = objs - if len(record["annotations"]) == 0: - continue - record["task"] = "ObjectDet" - dataset_dicts.append(record) - - return dataset_dicts - - -_CUSTOM_SPLITS_LVIS = { - "GRiT_coco2017_train": ("coco/train2017/", "coco/annotations/instances_train2017.json"), -} - - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items(): - register_GRiTcoco_instances( - key, - get_GRiTcoco_meta(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/modeling/test_time_augmentation.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/modeling/test_time_augmentation.py deleted file mode 100644 index 373e6bf00a39c040ff1da49d6dcd39a54a0b69a7..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/modeling/test_time_augmentation.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import numpy as np -from contextlib import contextmanager -from itertools import count -from typing import List -import torch -from fvcore.transforms import HFlipTransform, NoOpTransform -from torch import nn -from torch.nn.parallel import DistributedDataParallel - -from detectron2.config import configurable -from detectron2.data.detection_utils import read_image -from detectron2.data.transforms import ( - RandomFlip, - ResizeShortestEdge, - ResizeTransform, - apply_augmentations, -) -from detectron2.structures import Boxes, Instances - -from .meta_arch import GeneralizedRCNN -from .postprocessing import detector_postprocess -from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image - -__all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"] - - -class DatasetMapperTTA: - """ - Implement test-time augmentation for detection data. - It is a callable which takes a dataset dict from a detection dataset, - and returns a list of dataset dicts where the images - are augmented from the input image by the transformations defined in the config. - This is used for test-time augmentation. - """ - - @configurable - def __init__(self, min_sizes: List[int], max_size: int, flip: bool): - """ - Args: - min_sizes: list of short-edge size to resize the image to - max_size: maximum height or width of resized images - flip: whether to apply flipping augmentation - """ - self.min_sizes = min_sizes - self.max_size = max_size - self.flip = flip - - @classmethod - def from_config(cls, cfg): - return { - "min_sizes": cfg.TEST.AUG.MIN_SIZES, - "max_size": cfg.TEST.AUG.MAX_SIZE, - "flip": cfg.TEST.AUG.FLIP, - } - - def __call__(self, dataset_dict): - """ - Args: - dict: a dict in standard model input format. See tutorials for details. - - Returns: - list[dict]: - a list of dicts, which contain augmented version of the input image. - The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``. - Each dict has field "transforms" which is a TransformList, - containing the transforms that are used to generate this image. - """ - numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy() - shape = numpy_image.shape - orig_shape = (dataset_dict["height"], dataset_dict["width"]) - if shape[:2] != orig_shape: - # It transforms the "original" image in the dataset to the input image - pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1]) - else: - pre_tfm = NoOpTransform() - - # Create all combinations of augmentations to use - aug_candidates = [] # each element is a list[Augmentation] - for min_size in self.min_sizes: - resize = ResizeShortestEdge(min_size, self.max_size) - aug_candidates.append([resize]) # resize only - if self.flip: - flip = RandomFlip(prob=1.0) - aug_candidates.append([resize, flip]) # resize + flip - - # Apply all the augmentations - ret = [] - for aug in aug_candidates: - new_image, tfms = apply_augmentations(aug, np.copy(numpy_image)) - torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1))) - - dic = copy.deepcopy(dataset_dict) - dic["transforms"] = pre_tfm + tfms - dic["image"] = torch_image - ret.append(dic) - return ret - - -class GeneralizedRCNNWithTTA(nn.Module): - """ - A GeneralizedRCNN with test-time augmentation enabled. - Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`. - """ - - def __init__(self, cfg, model, tta_mapper=None, batch_size=3): - """ - Args: - cfg (CfgNode): - model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. - tta_mapper (callable): takes a dataset dict and returns a list of - augmented versions of the dataset dict. Defaults to - `DatasetMapperTTA(cfg)`. - batch_size (int): batch the augmented images into this batch size for inference. - """ - super().__init__() - if isinstance(model, DistributedDataParallel): - model = model.module - assert isinstance( - model, GeneralizedRCNN - ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model)) - self.cfg = cfg.clone() - assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet" - assert ( - not self.cfg.MODEL.LOAD_PROPOSALS - ), "TTA for pre-computed proposals is not supported yet" - - self.model = model - - if tta_mapper is None: - tta_mapper = DatasetMapperTTA(cfg) - self.tta_mapper = tta_mapper - self.batch_size = batch_size - - @contextmanager - def _turn_off_roi_heads(self, attrs): - """ - Open a context where some heads in `model.roi_heads` are temporarily turned off. - Args: - attr (list[str]): the attribute in `model.roi_heads` which can be used - to turn off a specific head, e.g., "mask_on", "keypoint_on". - """ - roi_heads = self.model.roi_heads - old = {} - for attr in attrs: - try: - old[attr] = getattr(roi_heads, attr) - except AttributeError: - # The head may not be implemented in certain ROIHeads - pass - - if len(old.keys()) == 0: - yield - else: - for attr in old.keys(): - setattr(roi_heads, attr, False) - yield - for attr in old.keys(): - setattr(roi_heads, attr, old[attr]) - - def _batch_inference(self, batched_inputs, detected_instances=None): - """ - Execute inference on a list of inputs, - using batch size = self.batch_size, instead of the length of the list. - - Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference` - """ - if detected_instances is None: - detected_instances = [None] * len(batched_inputs) - - outputs = [] - inputs, instances = [], [] - for idx, input, instance in zip(count(), batched_inputs, detected_instances): - inputs.append(input) - instances.append(instance) - if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1: - outputs.extend( - self.model.inference( - inputs, - instances if instances[0] is not None else None, - do_postprocess=False, - ) - ) - inputs, instances = [], [] - return outputs - - def __call__(self, batched_inputs): - """ - Same input/output format as :meth:`GeneralizedRCNN.forward` - """ - - def _maybe_read_image(dataset_dict): - ret = copy.copy(dataset_dict) - if "image" not in ret: - image = read_image(ret.pop("file_name"), self.model.input_format) - image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW - ret["image"] = image - if "height" not in ret and "width" not in ret: - ret["height"] = image.shape[1] - ret["width"] = image.shape[2] - return ret - - return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs] - - def _inference_one_image(self, input): - """ - Args: - input (dict): one dataset dict with "image" field being a CHW tensor - - Returns: - dict: one output dict - """ - orig_shape = (input["height"], input["width"]) - augmented_inputs, tfms = self._get_augmented_inputs(input) - # Detect boxes from all augmented versions - with self._turn_off_roi_heads(["mask_on", "keypoint_on"]): - # temporarily disable roi heads - all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms) - # merge all detected boxes to obtain final predictions for boxes - merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape) - - if self.cfg.MODEL.MASK_ON: - # Use the detected boxes to obtain masks - augmented_instances = self._rescale_detected_boxes( - augmented_inputs, merged_instances, tfms - ) - # run forward on the detected boxes - outputs = self._batch_inference(augmented_inputs, augmented_instances) - # Delete now useless variables to avoid being out of memory - del augmented_inputs, augmented_instances - # average the predictions - merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms) - merged_instances = detector_postprocess(merged_instances, *orig_shape) - return {"instances": merged_instances} - else: - return {"instances": merged_instances} - - def _get_augmented_inputs(self, input): - augmented_inputs = self.tta_mapper(input) - tfms = [x.pop("transforms") for x in augmented_inputs] - return augmented_inputs, tfms - - def _get_augmented_boxes(self, augmented_inputs, tfms): - # 1: forward with all augmented images - outputs = self._batch_inference(augmented_inputs) - # 2: union the results - all_boxes = [] - all_scores = [] - all_classes = [] - for output, tfm in zip(outputs, tfms): - # Need to inverse the transforms on boxes, to obtain results on original image - pred_boxes = output.pred_boxes.tensor - original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy()) - all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device)) - - all_scores.extend(output.scores) - all_classes.extend(output.pred_classes) - all_boxes = torch.cat(all_boxes, dim=0) - return all_boxes, all_scores, all_classes - - def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw): - # select from the union of all results - num_boxes = len(all_boxes) - num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES - # +1 because fast_rcnn_inference expects background scores as well - all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device) - for idx, cls, score in zip(count(), all_classes, all_scores): - all_scores_2d[idx, cls] = score - - merged_instances, _ = fast_rcnn_inference_single_image( - all_boxes, - all_scores_2d, - shape_hw, - 1e-8, - self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, - self.cfg.TEST.DETECTIONS_PER_IMAGE, - ) - - return merged_instances - - def _rescale_detected_boxes(self, augmented_inputs, merged_instances, tfms): - augmented_instances = [] - for input, tfm in zip(augmented_inputs, tfms): - # Transform the target box to the augmented image's coordinate space - pred_boxes = merged_instances.pred_boxes.tensor.cpu().numpy() - pred_boxes = torch.from_numpy(tfm.apply_box(pred_boxes)) - - aug_instances = Instances( - image_size=input["image"].shape[1:3], - pred_boxes=Boxes(pred_boxes), - pred_classes=merged_instances.pred_classes, - scores=merged_instances.scores, - ) - augmented_instances.append(aug_instances) - return augmented_instances - - def _reduce_pred_masks(self, outputs, tfms): - # Should apply inverse transforms on masks. - # We assume only resize & flip are used. pred_masks is a scale-invariant - # representation, so we handle flip specially - for output, tfm in zip(outputs, tfms): - if any(isinstance(t, HFlipTransform) for t in tfm.transforms): - output.pred_masks = output.pred_masks.flip(dims=[3]) - all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0) - avg_pred_masks = torch.mean(all_pred_masks, dim=0) - return avg_pred_masks diff --git a/spaces/yo2266911/uma_voice/text/__init__.py b/spaces/yo2266911/uma_voice/text/__init__.py deleted file mode 100644 index 48ae82f3e40ecd1bf17a7de78d87790327af3362..0000000000000000000000000000000000000000 --- a/spaces/yo2266911/uma_voice/text/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/ysharma/ChatGPT4/README.md b/spaces/ysharma/ChatGPT4/README.md deleted file mode 100644 index 23a5833bb6e68acd7b34dfd1eb21bd63fd09d4a7..0000000000000000000000000000000000000000 --- a/spaces/ysharma/ChatGPT4/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Chat-with-GPT4 -emoji: 🚀 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: ysharma/ChatGPTwithAPI ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yufiofficial/MusicGenQ/audiocraft/utils/__init__.py b/spaces/yufiofficial/MusicGenQ/audiocraft/utils/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/yufiofficial/MusicGenQ/audiocraft/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp b/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp deleted file mode 100644 index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000 --- a/spaces/yunfei0710/gpt-academic/crazy_functions/test_project/cpp/libJPG/jpgd.cpp +++ /dev/null @@ -1,3276 +0,0 @@ -// jpgd.cpp - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -// Last updated Apr. 16, 2011 -// Alex Evans: Linear memory allocator (taken from jpge.h). -// -// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2. -// -// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling. -// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain" -// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html - -#include "jpgd.h" -#include - -#include -// BEGIN EPIC MOD -#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0 -// END EPIC MOD - -#ifdef _MSC_VER -#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable -#endif - -// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling). -// This is slower, but results in higher quality on images with highly saturated colors. -#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1 - -#define JPGD_TRUE (1) -#define JPGD_FALSE (0) - -#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b)) -#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b)) - -namespace jpgd { - - static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); } - static inline void jpgd_free(void *p) { FMemory::Free(p); } - -// BEGIN EPIC MOD -//@UE3 - use UE3 BGRA encoding instead of assuming RGBA - // stolen from IImageWrapper.h - enum ERGBFormatJPG - { - Invalid = -1, - RGBA = 0, - BGRA = 1, - Gray = 2, - }; - static ERGBFormatJPG jpg_format; -// END EPIC MOD - - // DCT coefficients are stored in this sequence. - static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; - - enum JPEG_MARKER - { - M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8, - M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC, - M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7, - M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF, - M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0 - }; - - enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 }; - -#define CONST_BITS 13 -#define PASS1_BITS 2 -#define SCALEDONE ((int32)1) - -#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */ -#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */ -#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */ -#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */ -#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */ -#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */ -#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */ -#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */ -#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */ -#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */ -#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */ -#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */ - -#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n)) -#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n)) - -#define MULTIPLY(var, cnst) ((var) * (cnst)) - -#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i)) - - // Compiler creates a fast path 1D IDCT for X non-zero columns - template - struct Row - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - // ACCESS_COL() will be optimized at compile time to either an array access, or 0. -#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0) - - const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS; - const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS); - pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS); - pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS); - pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS); - pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS); - pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS); - pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS); - pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS); - } - }; - - template <> - struct Row<0> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { -#ifdef _MSC_VER - pTemp; pSrc; -#endif - } - }; - - template <> - struct Row<1> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - const int dcval = (pSrc[0] << PASS1_BITS); - - pTemp[0] = dcval; - pTemp[1] = dcval; - pTemp[2] = dcval; - pTemp[3] = dcval; - pTemp[4] = dcval; - pTemp[5] = dcval; - pTemp[6] = dcval; - pTemp[7] = dcval; - } - }; - - // Compiler creates a fast path 1D IDCT for X non-zero rows - template - struct Col - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - // ACCESS_ROW() will be optimized at compile time to either an array access, or 0. -#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0) - - const int z2 = ACCESS_ROW(2); - const int z3 = ACCESS_ROW(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS; - const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*0] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*7] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*1] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*6] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*2] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*5] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*3] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*4] = (uint8)CLAMP(i); - } - }; - - template <> - struct Col<1> - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3); - const uint8 dcval_clamped = (uint8)CLAMP(dcval); - pDst_ptr[0*8] = dcval_clamped; - pDst_ptr[1*8] = dcval_clamped; - pDst_ptr[2*8] = dcval_clamped; - pDst_ptr[3*8] = dcval_clamped; - pDst_ptr[4*8] = dcval_clamped; - pDst_ptr[5*8] = dcval_clamped; - pDst_ptr[6*8] = dcval_clamped; - pDst_ptr[7*8] = dcval_clamped; - } - }; - - static const uint8 s_idct_row_table[] = - { - 1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0, - 4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0, - 6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0, - 6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0, - 8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2, - 8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2, - 8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4, - 8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8, - }; - - static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; - - void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag) - { - JPGD_ASSERT(block_max_zag >= 1); - JPGD_ASSERT(block_max_zag <= 64); - - if (block_max_zag == 1) - { - int k = ((pSrc_ptr[0] + 4) >> 3) + 128; - k = CLAMP(k); - k = k | (k<<8); - k = k | (k<<16); - - for (int i = 8; i > 0; i--) - { - *(int*)&pDst_ptr[0] = k; - *(int*)&pDst_ptr[4] = k; - pDst_ptr += 8; - } - return; - } - - int temp[64]; - - const jpgd_block_t* pSrc = pSrc_ptr; - int* pTemp = temp; - - const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8]; - int i; - for (i = 8; i > 0; i--, pRow_tab++) - { - switch (*pRow_tab) - { - case 0: Row<0>::idct(pTemp, pSrc); break; - case 1: Row<1>::idct(pTemp, pSrc); break; - case 2: Row<2>::idct(pTemp, pSrc); break; - case 3: Row<3>::idct(pTemp, pSrc); break; - case 4: Row<4>::idct(pTemp, pSrc); break; - case 5: Row<5>::idct(pTemp, pSrc); break; - case 6: Row<6>::idct(pTemp, pSrc); break; - case 7: Row<7>::idct(pTemp, pSrc); break; - case 8: Row<8>::idct(pTemp, pSrc); break; - } - - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - - const int nonzero_rows = s_idct_col_table[block_max_zag - 1]; - for (i = 8; i > 0; i--) - { - switch (nonzero_rows) - { - case 1: Col<1>::idct(pDst_ptr, pTemp); break; - case 2: Col<2>::idct(pDst_ptr, pTemp); break; - case 3: Col<3>::idct(pDst_ptr, pTemp); break; - case 4: Col<4>::idct(pDst_ptr, pTemp); break; - case 5: Col<5>::idct(pDst_ptr, pTemp); break; - case 6: Col<6>::idct(pDst_ptr, pTemp); break; - case 7: Col<7>::idct(pDst_ptr, pTemp); break; - case 8: Col<8>::idct(pDst_ptr, pTemp); break; - } - - pTemp++; - pDst_ptr++; - } - } - - void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr) - { - int temp[64]; - int* pTemp = temp; - const jpgd_block_t* pSrc = pSrc_ptr; - - for (int i = 4; i > 0; i--) - { - Row<4>::idct(pTemp, pSrc); - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - for (int i = 8; i > 0; i--) - { - Col<4>::idct(pDst_ptr, pTemp); - pTemp++; - pDst_ptr++; - } - } - - // Retrieve one character from the input stream. - inline uint jpeg_decoder::get_char() - { - // Any bytes remaining in buffer? - if (!m_in_buf_left) - { - // Try to get more bytes. - prep_in_buffer(); - // Still nothing to get? - if (!m_in_buf_left) - { - // Pad the end of the stream with 0xFF 0xD9 (EOI marker) - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Same as previous method, except can indicate if the character is a pad character or not. - inline uint jpeg_decoder::get_char(bool *pPadding_flag) - { - if (!m_in_buf_left) - { - prep_in_buffer(); - if (!m_in_buf_left) - { - *pPadding_flag = true; - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - *pPadding_flag = false; - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Inserts a previously retrieved character back into the input buffer. - inline void jpeg_decoder::stuff_char(uint8 q) - { - *(--m_pIn_buf_ofs) = q; - m_in_buf_left++; - } - - // Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered. - inline uint8 jpeg_decoder::get_octet() - { - bool padding_flag; - int c = get_char(&padding_flag); - - if (c == 0xFF) - { - if (padding_flag) - return 0xFF; - - c = get_char(&padding_flag); - if (padding_flag) - { - stuff_char(0xFF); - return 0xFF; - } - - if (c == 0x00) - return 0xFF; - else - { - stuff_char(static_cast(c)); - stuff_char(0xFF); - return 0xFF; - } - } - - return static_cast(c); - } - - // Retrieves a variable number of bits from the input stream. Does not recognize markers. - inline uint jpeg_decoder::get_bits(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - uint c1 = get_char(); - uint c2 = get_char(); - m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2; - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered. - inline uint jpeg_decoder::get_bits_no_markers(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF)) - { - uint c1 = get_octet(); - uint c2 = get_octet(); - m_bit_buf |= (c1 << 8) | c2; - } - else - { - m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1]; - m_in_buf_left -= 2; - m_pIn_buf_ofs += 2; - } - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0) - { - // Decode more bits, use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - } - else - get_bits_no_markers(pH->code_size[symbol]); - - return symbol; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0) - { - // Use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - - extra_bits = get_bits_no_markers(symbol & 0xF); - } - else - { - JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0)); - - if (symbol & 0x8000) - { - get_bits_no_markers((symbol >> 8) & 31); - extra_bits = symbol >> 16; - } - else - { - int code_size = (symbol >> 8) & 31; - int num_extra_bits = symbol & 0xF; - int bits = code_size + num_extra_bits; - if (bits <= (m_bits_left + 16)) - extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1); - else - { - get_bits_no_markers(code_size); - extra_bits = get_bits_no_markers(num_extra_bits); - } - } - - symbol &= 0xFF; - } - - return symbol; - } - - // Tables and macro used to fully decode the DPCM differences. - static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 }; - static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 }; - static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) }; -#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x)) - - // Clamps a value between 0-255. - inline uint8 jpeg_decoder::clamp(int i) - { - if (static_cast(i) > 255) - i = (((~i) >> 31) & 0xFF); - - return static_cast(i); - } - - namespace DCT_Upsample - { - struct Matrix44 - { - typedef int Element_Type; - enum { NUM_ROWS = 4, NUM_COLS = 4 }; - - Element_Type v[NUM_ROWS][NUM_COLS]; - - inline int rows() const { return NUM_ROWS; } - inline int cols() const { return NUM_COLS; } - - inline const Element_Type & at(int r, int c) const { return v[r][c]; } - inline Element_Type & at(int r, int c) { return v[r][c]; } - - inline Matrix44() { } - - inline Matrix44& operator += (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) += a.at(r, 0); - at(r, 1) += a.at(r, 1); - at(r, 2) += a.at(r, 2); - at(r, 3) += a.at(r, 3); - } - return *this; - } - - inline Matrix44& operator -= (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) -= a.at(r, 0); - at(r, 1) -= a.at(r, 1); - at(r, 2) -= a.at(r, 2); - at(r, 3) -= a.at(r, 3); - } - return *this; - } - - friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) + b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) + b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) + b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) + b.at(r, 3); - } - return ret; - } - - friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) - b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) - b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) - b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) - b.at(r, 3); - } - return ret; - } - - static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) + b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) + b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) + b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) + b.at(r, 3)); - } - } - - static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) - b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) - b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) - b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) - b.at(r, 3)); - } - } - }; - - const int FRACT_BITS = 10; - const int SCALE = 1 << FRACT_BITS; - - typedef int Temp_Type; -#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS) -#define F(i) ((int)((i) * SCALE + .5f)) - - // Any decent C++ compiler will optimize this at compile time to a 0, or an array access. -#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8]) - - // NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix - template - struct P_Q - { - static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X000 = AT(0, 0); - const Temp_Type X001 = AT(0, 1); - const Temp_Type X002 = AT(0, 2); - const Temp_Type X003 = AT(0, 3); - const Temp_Type X004 = AT(0, 4); - const Temp_Type X005 = AT(0, 5); - const Temp_Type X006 = AT(0, 6); - const Temp_Type X007 = AT(0, 7); - const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0)); - const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1)); - const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2)); - const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3)); - const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4)); - const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5)); - const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6)); - const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7)); - const Temp_Type X020 = AT(4, 0); - const Temp_Type X021 = AT(4, 1); - const Temp_Type X022 = AT(4, 2); - const Temp_Type X023 = AT(4, 3); - const Temp_Type X024 = AT(4, 4); - const Temp_Type X025 = AT(4, 5); - const Temp_Type X026 = AT(4, 6); - const Temp_Type X027 = AT(4, 7); - const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0)); - const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1)); - const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2)); - const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3)); - const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4)); - const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5)); - const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6)); - const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7)); - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - P.at(0, 0) = X000; - P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f)); - P.at(0, 2) = X004; - P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f)); - P.at(1, 0) = X010; - P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f)); - P.at(1, 2) = X014; - P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f)); - P.at(2, 0) = X020; - P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f)); - P.at(2, 2) = X024; - P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f)); - P.at(3, 0) = X030; - P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f)); - P.at(3, 2) = X034; - P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f)); - // 40 muls 24 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f)); - Q.at(0, 1) = X002; - Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f)); - Q.at(0, 3) = X006; - Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f)); - Q.at(1, 1) = X012; - Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f)); - Q.at(1, 3) = X016; - Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f)); - Q.at(2, 1) = X022; - Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f)); - Q.at(2, 3) = X026; - Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f)); - Q.at(3, 1) = X032; - Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f)); - Q.at(3, 3) = X036; - // 40 muls 24 adds - } - }; - - template - struct R_S - { - static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0)); - const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1)); - const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2)); - const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3)); - const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4)); - const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5)); - const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6)); - const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7)); - const Temp_Type X110 = AT(2, 0); - const Temp_Type X111 = AT(2, 1); - const Temp_Type X112 = AT(2, 2); - const Temp_Type X113 = AT(2, 3); - const Temp_Type X114 = AT(2, 4); - const Temp_Type X115 = AT(2, 5); - const Temp_Type X116 = AT(2, 6); - const Temp_Type X117 = AT(2, 7); - const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0)); - const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1)); - const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2)); - const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3)); - const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4)); - const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5)); - const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6)); - const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7)); - const Temp_Type X130 = AT(6, 0); - const Temp_Type X131 = AT(6, 1); - const Temp_Type X132 = AT(6, 2); - const Temp_Type X133 = AT(6, 3); - const Temp_Type X134 = AT(6, 4); - const Temp_Type X135 = AT(6, 5); - const Temp_Type X136 = AT(6, 6); - const Temp_Type X137 = AT(6, 7); - // 80 muls 48 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - R.at(0, 0) = X100; - R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f)); - R.at(0, 2) = X104; - R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f)); - R.at(1, 0) = X110; - R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f)); - R.at(1, 2) = X114; - R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f)); - R.at(2, 0) = X120; - R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f)); - R.at(2, 2) = X124; - R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f)); - R.at(3, 0) = X130; - R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f)); - R.at(3, 2) = X134; - R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f)); - // 40 muls 24 adds - // 4x4 = 4x8 times 8x4, matrix 1 is constant - S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f)); - S.at(0, 1) = X102; - S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f)); - S.at(0, 3) = X106; - S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f)); - S.at(1, 1) = X112; - S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f)); - S.at(1, 3) = X116; - S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f)); - S.at(2, 1) = X122; - S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f)); - S.at(2, 3) = X126; - S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f)); - S.at(3, 1) = X132; - S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f)); - S.at(3, 3) = X136; - // 40 muls 24 adds - } - }; - } // end namespace DCT_Upsample - - // Unconditionally frees all allocated m_blocks. - void jpeg_decoder::free_all_blocks() - { - m_pStream = NULL; - for (mem_block *b = m_pMem_blocks; b; ) - { - mem_block *n = b->m_pNext; - jpgd_free(b); - b = n; - } - m_pMem_blocks = NULL; - } - - // This method handles all errors. - // It could easily be changed to use C++ exceptions. - void jpeg_decoder::stop_decoding(jpgd_status status) - { - m_error_code = status; - free_all_blocks(); - longjmp(m_jmp_state, status); - - // we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit - // that this function doesn't return, otherwise we get this error: - // - // error : function declared 'noreturn' should not return - exit(1); - } - - void *jpeg_decoder::alloc(size_t nSize, bool zero) - { - nSize = (JPGD_MAX(nSize, 1) + 3) & ~3; - char *rv = NULL; - for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext) - { - if ((b->m_used_count + nSize) <= b->m_size) - { - rv = b->m_data + b->m_used_count; - b->m_used_count += nSize; - break; - } - } - if (!rv) - { - int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047); - mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity); - if (!b) stop_decoding(JPGD_NOTENOUGHMEM); - b->m_pNext = m_pMem_blocks; m_pMem_blocks = b; - b->m_used_count = nSize; - b->m_size = capacity; - rv = b->m_data; - } - if (zero) memset(rv, 0, nSize); - return rv; - } - - void jpeg_decoder::word_clear(void *p, uint16 c, uint n) - { - uint8 *pD = (uint8*)p; - const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF; - while (n) - { - pD[0] = l; pD[1] = h; pD += 2; - n--; - } - } - - // Refill the input buffer. - // This method will sit in a loop until (A) the buffer is full or (B) - // the stream's read() method reports and end of file condition. - void jpeg_decoder::prep_in_buffer() - { - m_in_buf_left = 0; - m_pIn_buf_ofs = m_in_buf; - - if (m_eof_flag) - return; - - do - { - int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag); - if (bytes_read == -1) - stop_decoding(JPGD_STREAM_READ); - - m_in_buf_left += bytes_read; - } while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag)); - - m_total_bytes_read += m_in_buf_left; - - // Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid). - // (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.) - word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64); - } - - // Read a Huffman code table. - void jpeg_decoder::read_dht_marker() - { - int i, index, count; - uint8 huff_num[17]; - uint8 huff_val[256]; - - uint num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= 2; - - while (num_left) - { - index = get_bits(8); - - huff_num[0] = 0; - - count = 0; - - for (i = 1; i <= 16; i++) - { - huff_num[i] = static_cast(get_bits(8)); - count += huff_num[i]; - } - - if (count > 255) - stop_decoding(JPGD_BAD_DHT_COUNTS); - - for (i = 0; i < count; i++) - huff_val[i] = static_cast(get_bits(8)); - - i = 1 + 16 + count; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= i; - - if ((index & 0x10) > 0x10) - stop_decoding(JPGD_BAD_DHT_INDEX); - - index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1); - - if (index >= JPGD_MAX_HUFF_TABLES) - stop_decoding(JPGD_BAD_DHT_INDEX); - - if (!m_huff_num[index]) - m_huff_num[index] = (uint8 *)alloc(17); - - if (!m_huff_val[index]) - m_huff_val[index] = (uint8 *)alloc(256); - - m_huff_ac[index] = (index & 0x10) != 0; - memcpy(m_huff_num[index], huff_num, 17); - memcpy(m_huff_val[index], huff_val, 256); - } - } - - // Read a quantization table. - void jpeg_decoder::read_dqt_marker() - { - int n, i, prec; - uint num_left; - uint temp; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DQT_MARKER); - - num_left -= 2; - - while (num_left) - { - n = get_bits(8); - prec = n >> 4; - n &= 0x0F; - - if (n >= JPGD_MAX_QUANT_TABLES) - stop_decoding(JPGD_BAD_DQT_TABLE); - - if (!m_quant[n]) - m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t)); - - // read quantization entries, in zag order - for (i = 0; i < 64; i++) - { - temp = get_bits(8); - - if (prec) - temp = (temp << 8) + get_bits(8); - - m_quant[n][i] = static_cast(temp); - } - - i = 64 + 1; - - if (prec) - i += 64; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DQT_LENGTH); - - num_left -= i; - } - } - - // Read the start of frame (SOF) marker. - void jpeg_decoder::read_sof_marker() - { - int i; - uint num_left; - - num_left = get_bits(16); - - if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */ - stop_decoding(JPGD_BAD_PRECISION); - - m_image_y_size = get_bits(16); - - if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT)) - stop_decoding(JPGD_BAD_HEIGHT); - - m_image_x_size = get_bits(16); - - if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH)) - stop_decoding(JPGD_BAD_WIDTH); - - m_comps_in_frame = get_bits(8); - - if (m_comps_in_frame > JPGD_MAX_COMPONENTS) - stop_decoding(JPGD_TOO_MANY_COMPONENTS); - - if (num_left != (uint)(m_comps_in_frame * 3 + 8)) - stop_decoding(JPGD_BAD_SOF_LENGTH); - - for (i = 0; i < m_comps_in_frame; i++) - { - m_comp_ident[i] = get_bits(8); - m_comp_h_samp[i] = get_bits(4); - m_comp_v_samp[i] = get_bits(4); - m_comp_quant[i] = get_bits(8); - } - } - - // Used to skip unrecognized markers. - void jpeg_decoder::skip_variable_marker() - { - uint num_left; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_VARIABLE_MARKER); - - num_left -= 2; - - while (num_left) - { - get_bits(8); - num_left--; - } - } - - // Read a define restart interval (DRI) marker. - void jpeg_decoder::read_dri_marker() - { - if (get_bits(16) != 4) - stop_decoding(JPGD_BAD_DRI_LENGTH); - - m_restart_interval = get_bits(16); - } - - // Read a start of scan (SOS) marker. - void jpeg_decoder::read_sos_marker() - { - uint num_left; - int i, ci, n, c, cc; - - num_left = get_bits(16); - - n = get_bits(8); - - m_comps_in_scan = n; - - num_left -= 3; - - if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) ) - stop_decoding(JPGD_BAD_SOS_LENGTH); - - for (i = 0; i < n; i++) - { - cc = get_bits(8); - c = get_bits(8); - num_left -= 2; - - for (ci = 0; ci < m_comps_in_frame; ci++) - if (cc == m_comp_ident[ci]) - break; - - if (ci >= m_comps_in_frame) - stop_decoding(JPGD_BAD_SOS_COMP_ID); - - m_comp_list[i] = ci; - m_comp_dc_tab[ci] = (c >> 4) & 15; - m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1); - } - - m_spectral_start = get_bits(8); - m_spectral_end = get_bits(8); - m_successive_high = get_bits(4); - m_successive_low = get_bits(4); - - if (!m_progressive_flag) - { - m_spectral_start = 0; - m_spectral_end = 63; - } - - num_left -= 3; - - while (num_left) /* read past whatever is num_left */ - { - get_bits(8); - num_left--; - } - } - - // Finds the next marker. - int jpeg_decoder::next_marker() - { - uint c, bytes; - - bytes = 0; - - do - { - do - { - bytes++; - c = get_bits(8); - } while (c != 0xFF); - - do - { - c = get_bits(8); - } while (c == 0xFF); - - } while (c == 0); - - // If bytes > 0 here, there where extra bytes before the marker (not good). - - return c; - } - - // Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is - // encountered. - int jpeg_decoder::process_markers() - { - int c; - - for ( ; ; ) - { - c = next_marker(); - - switch (c) - { - case M_SOF0: - case M_SOF1: - case M_SOF2: - case M_SOF3: - case M_SOF5: - case M_SOF6: - case M_SOF7: - // case M_JPG: - case M_SOF9: - case M_SOF10: - case M_SOF11: - case M_SOF13: - case M_SOF14: - case M_SOF15: - case M_SOI: - case M_EOI: - case M_SOS: - { - return c; - } - case M_DHT: - { - read_dht_marker(); - break; - } - // No arithmitic support - dumb patents! - case M_DAC: - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - case M_DQT: - { - read_dqt_marker(); - break; - } - case M_DRI: - { - read_dri_marker(); - break; - } - //case M_APP0: /* no need to read the JFIF marker */ - - case M_JPG: - case M_RST0: /* no parameters */ - case M_RST1: - case M_RST2: - case M_RST3: - case M_RST4: - case M_RST5: - case M_RST6: - case M_RST7: - case M_TEM: - { - stop_decoding(JPGD_UNEXPECTED_MARKER); - break; - } - default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */ - { - skip_variable_marker(); - break; - } - } - } - } - - // Finds the start of image (SOI) marker. - // This code is rather defensive: it only checks the first 512 bytes to avoid - // false positives. - void jpeg_decoder::locate_soi_marker() - { - uint lastchar, thischar; - uint bytesleft; - - lastchar = get_bits(8); - - thischar = get_bits(8); - - /* ok if it's a normal JPEG file without a special header */ - - if ((lastchar == 0xFF) && (thischar == M_SOI)) - return; - - bytesleft = 4096; //512; - - for ( ; ; ) - { - if (--bytesleft == 0) - stop_decoding(JPGD_NOT_JPEG); - - lastchar = thischar; - - thischar = get_bits(8); - - if (lastchar == 0xFF) - { - if (thischar == M_SOI) - break; - else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end - stop_decoding(JPGD_NOT_JPEG); - } - } - - // Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad. - thischar = (m_bit_buf >> 24) & 0xFF; - - if (thischar != 0xFF) - stop_decoding(JPGD_NOT_JPEG); - } - - // Find a start of frame (SOF) marker. - void jpeg_decoder::locate_sof_marker() - { - locate_soi_marker(); - - int c = process_markers(); - - switch (c) - { - case M_SOF2: - m_progressive_flag = JPGD_TRUE; - case M_SOF0: /* baseline DCT */ - case M_SOF1: /* extended sequential DCT */ - { - read_sof_marker(); - break; - } - case M_SOF9: /* Arithmitic coding */ - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - default: - { - stop_decoding(JPGD_UNSUPPORTED_MARKER); - break; - } - } - } - - // Find a start of scan (SOS) marker. - int jpeg_decoder::locate_sos_marker() - { - int c; - - c = process_markers(); - - if (c == M_EOI) - return JPGD_FALSE; - else if (c != M_SOS) - stop_decoding(JPGD_UNEXPECTED_MARKER); - - read_sos_marker(); - - return JPGD_TRUE; - } - - // Reset everything to default/uninitialized state. - void jpeg_decoder::init(jpeg_decoder_stream *pStream) - { - m_pMem_blocks = NULL; - m_error_code = JPGD_SUCCESS; - m_ready_flag = false; - m_image_x_size = m_image_y_size = 0; - m_pStream = pStream; - m_progressive_flag = JPGD_FALSE; - - memset(m_huff_ac, 0, sizeof(m_huff_ac)); - memset(m_huff_num, 0, sizeof(m_huff_num)); - memset(m_huff_val, 0, sizeof(m_huff_val)); - memset(m_quant, 0, sizeof(m_quant)); - - m_scan_type = 0; - m_comps_in_frame = 0; - - memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp)); - memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp)); - memset(m_comp_quant, 0, sizeof(m_comp_quant)); - memset(m_comp_ident, 0, sizeof(m_comp_ident)); - memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks)); - memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks)); - - m_comps_in_scan = 0; - memset(m_comp_list, 0, sizeof(m_comp_list)); - memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab)); - memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab)); - - m_spectral_start = 0; - m_spectral_end = 0; - m_successive_low = 0; - m_successive_high = 0; - m_max_mcu_x_size = 0; - m_max_mcu_y_size = 0; - m_blocks_per_mcu = 0; - m_max_blocks_per_row = 0; - m_mcus_per_row = 0; - m_mcus_per_col = 0; - m_expanded_blocks_per_component = 0; - m_expanded_blocks_per_mcu = 0; - m_expanded_blocks_per_row = 0; - m_freq_domain_chroma_upsample = false; - - memset(m_mcu_org, 0, sizeof(m_mcu_org)); - - m_total_lines_left = 0; - m_mcu_lines_left = 0; - m_real_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_pixel = 0; - - memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs)); - - memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs)); - memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs)); - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_eob_run = 0; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_pIn_buf_ofs = m_in_buf; - m_in_buf_left = 0; - m_eof_flag = false; - m_tem_flag = 0; - - memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start)); - memset(m_in_buf, 0, sizeof(m_in_buf)); - memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end)); - - m_restart_interval = 0; - m_restarts_left = 0; - m_next_restart_num = 0; - - m_max_mcus_per_row = 0; - m_max_blocks_per_mcu = 0; - m_max_mcus_per_col = 0; - - memset(m_last_dc_val, 0, sizeof(m_last_dc_val)); - m_pMCU_coefficients = NULL; - m_pSample_buf = NULL; - - m_total_bytes_read = 0; - - m_pScan_line_0 = NULL; - m_pScan_line_1 = NULL; - - // Ready the input buffer. - prep_in_buffer(); - - // Prime the bit buffer. - m_bits_left = 16; - m_bit_buf = 0; - - get_bits(16); - get_bits(16); - - for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++) - m_mcu_block_max_zag[i] = 64; - } - -#define SCALEBITS 16 -#define ONE_HALF ((int) 1 << (SCALEBITS-1)) -#define FIX(x) ((int) ((x) * (1L<> SCALEBITS; - m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS; - m_crg[i] = (-FIX(0.71414f)) * k; - m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF; - } - } - - // This method throws back into the stream any bytes that where read - // into the bit buffer during initial marker scanning. - void jpeg_decoder::fix_in_buffer() - { - // In case any 0xFF's where pulled into the buffer during marker scanning. - JPGD_ASSERT((m_bits_left & 7) == 0); - - if (m_bits_left == 16) - stuff_char( (uint8)(m_bit_buf & 0xFF)); - - if (m_bits_left >= 8) - stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF)); - - stuff_char((uint8)((m_bit_buf >> 16) & 0xFF)); - stuff_char((uint8)((m_bit_buf >> 24) & 0xFF)); - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - void jpeg_decoder::transform_mcu(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64; - - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - } - - static const uint8 s_max_rc[64] = - { - 17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86, - 102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136 - }; - - void jpeg_decoder::transform_mcu_expand(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64; - - // Y IDCT - int mcu_block; - for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - - // Chroma IDCT, with upsampling - jpgd_block_t temp_block[64]; - - for (int i = 0; i < 2; i++) - { - DCT_Upsample::Matrix44 P, Q, R, S; - - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1); - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64); - - switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1]) - { - case 1*16+1: - DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr); - break; - case 1*16+2: - DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr); - break; - case 2*16+2: - DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+2: - DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+3: - DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr); - break; - case 3*16+4: - DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr); - break; - case 4*16+4: - DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+4: - DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+5: - DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr); - break; - case 5*16+6: - DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr); - break; - case 6*16+6: - DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+6: - DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+7: - DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr); - break; - case 7*16+8: - DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr); - break; - case 8*16+8: - DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr); - break; - default: - JPGD_ASSERT(false); - } - - DCT_Upsample::Matrix44 a(P + Q); P -= Q; - DCT_Upsample::Matrix44& b = P; - DCT_Upsample::Matrix44 c(R + S); R -= S; - DCT_Upsample::Matrix44& d = R; - - DCT_Upsample::Matrix44::add_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::add_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - pSrc_ptr += 64; - } - } - - // Loads and dequantizes the next row of (already decoded) coefficients. - // Progressive images only. - void jpeg_decoder::load_next_row() - { - int i; - jpgd_block_t *p; - jpgd_quant_t *q; - int mcu_row, mcu_block, row_block = 0; - int component_num, component_id; - int block_x_mcu[JPGD_MAX_COMPONENTS]; - - memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - q = m_quant[m_comp_quant[component_id]]; - - p = m_pMCU_coefficients + 64 * mcu_block; - - jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - p[0] = pDC[0]; - memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t)); - - for (i = 63; i > 0; i--) - if (p[g_ZAG[i]]) - break; - - m_mcu_block_max_zag[mcu_block] = i + 1; - - for ( ; i >= 0; i--) - if (p[g_ZAG[i]]) - p[g_ZAG[i]] = static_cast(p[g_ZAG[i]] * q[i]); - - row_block++; - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - - // Restart interval processing. - void jpeg_decoder::process_restart() - { - int i; - int c = 0; - - // Align to a byte boundry - // FIXME: Is this really necessary? get_bits_no_markers() never reads in markers! - //get_bits_no_markers(m_bits_left & 7); - - // Let's scan a little bit to find the marker, but not _too_ far. - // 1536 is a "fudge factor" that determines how much to scan. - for (i = 1536; i > 0; i--) - if (get_char() == 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - for ( ; i > 0; i--) - if ((c = get_char()) != 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Is it the expected marker? If not, something bad happened. - if (c != (m_next_restart_num + M_RST0)) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Reset each component's DC prediction values. - memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - m_restarts_left = m_restart_interval; - - m_next_restart_num = (m_next_restart_num + 1) & 7; - - // Get the bit buffer going again... - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - static inline int dequantize_ac(int c, int q) { c *= q; return c; } - - // Decodes and dequantizes the next row of coefficients. - void jpeg_decoder::decode_next_row() - { - int row_block = 0; - - for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - jpgd_block_t* p = m_pMCU_coefficients; - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64) - { - int component_id = m_mcu_org[mcu_block]; - jpgd_quant_t* q = m_quant[m_comp_quant[component_id]]; - - int r, s; - s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r); - s = HUFF_EXTEND(r, s); - - m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]); - - p[0] = static_cast(s * q[0]); - - int prev_num_set = m_mcu_block_max_zag[mcu_block]; - - huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]]; - - int k; - for (k = 1; k < 64; k++) - { - int extra_bits; - s = huff_decode(pH, extra_bits); - - r = s >> 4; - s &= 15; - - if (s) - { - if (r) - { - if ((k + r) > 63) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(r, prev_num_set - k); - int kt = k; - while (n--) - p[g_ZAG[kt++]] = 0; - } - - k += r; - } - - s = HUFF_EXTEND(extra_bits, s); - - JPGD_ASSERT(k < 64); - - p[g_ZAG[k]] = static_cast(dequantize_ac(s, q[k])); //s * q[k]; - } - else - { - if (r == 15) - { - if ((k + 16) > 64) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(16, prev_num_set - k); - int kt = k; - while (n--) - { - JPGD_ASSERT(kt <= 63); - p[g_ZAG[kt++]] = 0; - } - } - - k += 16 - 1; // - 1 because the loop counter is k - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0); - // END EPIC MOD - } - else - break; - } - } - - if (k < prev_num_set) - { - int kt = k; - while (kt < prev_num_set) - p[g_ZAG[kt++]] = 0; - } - - m_mcu_block_max_zag[mcu_block] = k; - - row_block++; - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - - m_restarts_left--; - } - } - - // YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB - void jpeg_decoder::H1V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int y = s[j]; - int cb = s[64+j]; - int cr = s[128+j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - d += 4; - } - - s += 64*3; - } - } - - // YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H2V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *y = m_pSample_buf + row * 8; - uint8 *c = m_pSample_buf + 2*64 + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 4; j++) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j<<1]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - } - - d0 += 8; - - c++; - } - y += 64; - } - - y += 64*4 - 64*2; - c += 64*4 - 8; - } - } - - // YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H1V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*1 + (row & 7) * 8; - - c = m_pSample_buf + 64*2 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int cb = c[0+j]; - int cr = c[64+j]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - } - - d0 += 4; - d1 += 4; - } - - y += 64*4; - c += 64*4; - } - } - - // YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB - void jpeg_decoder::H2V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*2 + (row & 7) * 8; - - c = m_pSample_buf + 64*4 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 8; j += 2) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+bc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+rc); - d1[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+rc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+bc); - d1[7] = 255; - } - - d0 += 8; - d1 += 8; - - c++; - } - y += 64; - } - - y += 64*6 - 64*2; - c += 64*6 - 8; - } - } - - // Y (1 block per MCU) to 8-bit grayscale - void jpeg_decoder::gray_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - *(uint *)d = *(uint *)s; - *(uint *)(&d[4]) = *(uint *)(&s[4]); - - s += 64; - d += 8; - } - } - - void jpeg_decoder::expanded_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - - uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8; - - uint8* d = m_pScan_line_0; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int k = 0; k < m_max_mcu_x_size; k += 8) - { - const int Y_ofs = k * 8; - const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component; - const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2; - for (int j = 0; j < 8; j++) - { - int y = Py[Y_ofs + j]; - int cb = Py[Cb_ofs + j]; - int cr = Py[Cr_ofs + j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - - d += 4; - } - } - - Py += 64 * m_expanded_blocks_per_mcu; - } - } - - // Find end of image (EOI) marker, so we can return to the user the exact size of the input stream. - void jpeg_decoder::find_eoi() - { - if (!m_progressive_flag) - { - // Attempt to read the EOI marker. - //get_bits_no_markers(m_bits_left & 7); - - // Prime the bit buffer - m_bits_left = 16; - get_bits(16); - get_bits(16); - - // The next marker _should_ be EOI - process_markers(); - } - - m_total_bytes_read -= m_in_buf_left; - } - - int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len) - { - if ((m_error_code) || (!m_ready_flag)) - return JPGD_FAILED; - - if (m_total_lines_left == 0) - return JPGD_DONE; - - if (m_mcu_lines_left == 0) - { - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - if (m_progressive_flag) - load_next_row(); - else - decode_next_row(); - - // Find the EOI marker if that was the last row. - if (m_total_lines_left <= m_max_mcu_y_size) - find_eoi(); - - m_mcu_lines_left = m_max_mcu_y_size; - } - - if (m_freq_domain_chroma_upsample) - { - expanded_convert(); - *pScan_line = m_pScan_line_0; - } - else - { - switch (m_scan_type) - { - case JPGD_YH2V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H2V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH2V1: - { - H2V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_YH1V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H1V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH1V1: - { - H1V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_GRAYSCALE: - { - gray_convert(); - *pScan_line = m_pScan_line_0; - - break; - } - } - } - - *pScan_line_len = m_real_dest_bytes_per_scan_line; - - m_mcu_lines_left--; - m_total_lines_left--; - - return JPGD_SUCCESS; - } - - // Creates the tables needed for efficient Huffman decoding. - void jpeg_decoder::make_huff_table(int index, huff_tables *pH) - { - int p, i, l, si; - uint8 huffsize[257]; - uint huffcode[257]; - uint code; - uint subtree; - int code_size; - int lastp; - int nextfreeentry; - int currententry; - - pH->ac_table = m_huff_ac[index] != 0; - - p = 0; - - for (l = 1; l <= 16; l++) - { - for (i = 1; i <= m_huff_num[index][l]; i++) - huffsize[p++] = static_cast(l); - } - - huffsize[p] = 0; - - lastp = p; - - code = 0; - si = huffsize[0]; - p = 0; - - while (huffsize[p]) - { - while (huffsize[p] == si) - { - huffcode[p++] = code; - code++; - } - - code <<= 1; - si++; - } - - memset(pH->look_up, 0, sizeof(pH->look_up)); - memset(pH->look_up2, 0, sizeof(pH->look_up2)); - memset(pH->tree, 0, sizeof(pH->tree)); - memset(pH->code_size, 0, sizeof(pH->code_size)); - - nextfreeentry = -1; - - p = 0; - - while (p < lastp) - { - i = m_huff_val[index][p]; - code = huffcode[p]; - code_size = huffsize[p]; - - pH->code_size[i] = static_cast(code_size); - - if (code_size <= 8) - { - code <<= (8 - code_size); - - for (l = 1 << (8 - code_size); l > 0; l--) - { - JPGD_ASSERT(i < 256); - - pH->look_up[code] = i; - - bool has_extrabits = false; - int extra_bits = 0; - int num_extra_bits = i & 15; - - int bits_to_fetch = code_size; - if (num_extra_bits) - { - int total_codesize = code_size + num_extra_bits; - if (total_codesize <= 8) - { - has_extrabits = true; - extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize)); - JPGD_ASSERT(extra_bits <= 0x7FFF); - bits_to_fetch += num_extra_bits; - } - } - - if (!has_extrabits) - pH->look_up2[code] = i | (bits_to_fetch << 8); - else - pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8); - - code++; - } - } - else - { - subtree = (code >> (code_size - 8)) & 0xFF; - - currententry = pH->look_up[subtree]; - - if (currententry == 0) - { - pH->look_up[subtree] = currententry = nextfreeentry; - pH->look_up2[subtree] = currententry = nextfreeentry; - - nextfreeentry -= 2; - } - - code <<= (16 - (code_size - 8)); - - for (l = code_size; l > 9; l--) - { - if ((code & 0x8000) == 0) - currententry--; - - if (pH->tree[-currententry - 1] == 0) - { - pH->tree[-currententry - 1] = nextfreeentry; - - currententry = nextfreeentry; - - nextfreeentry -= 2; - } - else - currententry = pH->tree[-currententry - 1]; - - code <<= 1; - } - - if ((code & 0x8000) == 0) - currententry--; - - pH->tree[-currententry - 1] = i; - } - - p++; - } - } - - // Verifies the quantization tables needed for this scan are available. - void jpeg_decoder::check_quant_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL) - stop_decoding(JPGD_UNDEFINED_QUANT_TABLE); - } - - // Verifies that all the Huffman tables needed for this scan are available. - void jpeg_decoder::check_huff_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - { - if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - - if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - } - - for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++) - if (m_huff_num[i]) - { - if (!m_pHuff_tabs[i]) - m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables)); - - make_huff_table(i, m_pHuff_tabs[i]); - } - } - - // Determines the component order inside each MCU. - // Also calcs how many MCU's are on each row, etc. - void jpeg_decoder::calc_mcu_block_order() - { - int component_num, component_id; - int max_h_samp = 0, max_v_samp = 0; - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - if (m_comp_h_samp[component_id] > max_h_samp) - max_h_samp = m_comp_h_samp[component_id]; - - if (m_comp_v_samp[component_id] > max_v_samp) - max_v_samp = m_comp_v_samp[component_id]; - } - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8; - m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8; - } - - if (m_comps_in_scan == 1) - { - m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]]; - m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]]; - } - else - { - m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp; - m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp; - } - - if (m_comps_in_scan == 1) - { - m_mcu_org[0] = m_comp_list[0]; - - m_blocks_per_mcu = 1; - } - else - { - m_blocks_per_mcu = 0; - - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - int num_blocks; - - component_id = m_comp_list[component_num]; - - num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id]; - - while (num_blocks--) - m_mcu_org[m_blocks_per_mcu++] = component_id; - } - } - } - - // Starts a new scan. - int jpeg_decoder::init_scan() - { - if (!locate_sos_marker()) - return JPGD_FALSE; - - calc_mcu_block_order(); - - check_huff_tables(); - - check_quant_tables(); - - memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - if (m_restart_interval) - { - m_restarts_left = m_restart_interval; - m_next_restart_num = 0; - } - - fix_in_buffer(); - - return JPGD_TRUE; - } - - // Starts a frame. Determines if the number of components or sampling factors - // are supported. - void jpeg_decoder::init_frame() - { - int i; - - if (m_comps_in_frame == 1) - { - if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1)) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - m_scan_type = JPGD_GRAYSCALE; - m_max_blocks_per_mcu = 1; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if (m_comps_in_frame == 3) - { - if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) || - ((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) ) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH1V1; - - m_max_blocks_per_mcu = 3; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH2V1; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH1V2; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 16; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH2V2; - m_max_blocks_per_mcu = 6; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 16; - } - else - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - } - else - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size; - m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size; - - // These values are for the *destination* pixels: after conversion. - if (m_scan_type == JPGD_GRAYSCALE) - m_dest_bytes_per_pixel = 1; - else - m_dest_bytes_per_pixel = 4; - - m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel; - - m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel); - - // Initialize two scan line buffers. - m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2)) - m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - - m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu; - - // Should never happen - if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW) - stop_decoding(JPGD_ASSERTION_ERROR); - - // Allocate the coefficient buffer, enough for one MCU - m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t)); - - for (i = 0; i < m_max_blocks_per_mcu; i++) - m_mcu_block_max_zag[i] = 64; - - m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0]; - m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame; - m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu; - // Freq. domain chroma upsampling is only supported for H2V2 subsampling factor. -// BEGIN EPIC MOD -#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING - m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3); -#else - m_freq_domain_chroma_upsample = 0; -#endif -// END EPIC MOD - - if (m_freq_domain_chroma_upsample) - m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64); - else - m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64); - - m_total_lines_left = m_image_y_size; - - m_mcu_lines_left = 0; - - create_look_ups(); - } - - // The coeff_buf series of methods originally stored the coefficients - // into a "virtual" file which was located in EMS, XMS, or a disk file. A cache - // was used to make this process more efficient. Now, we can store the entire - // thing in RAM. - jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y) - { - coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf)); - - cb->block_num_x = block_num_x; - cb->block_num_y = block_num_y; - cb->block_len_x = block_len_x; - cb->block_len_y = block_len_y; - cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t); - cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true); - return cb; - } - - inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y) - { - JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y)); - return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x)); - } - - // The following methods decode the various types of m_blocks encountered - // in progressively encoded images. - void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, r; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0) - { - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - } - - pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]); - - p[0] = static_cast(s << pD->m_successive_low); - } - - void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - if (pD->get_bits_no_markers(1)) - { - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - p[0] |= (1 << pD->m_successive_low); - } - } - - void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int k, s, r; - - if (pD->m_eob_run) - { - pD->m_eob_run--; - return; - } - - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if ((k += r) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - - p[g_ZAG[k]] = static_cast(s << pD->m_successive_low); - } - else - { - if (r == 15) - { - if ((k += 15) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - } - else - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - pD->m_eob_run--; - - break; - } - } - } - } - - void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, k, r; - int p1 = 1 << pD->m_successive_low; - int m1 = (-1) << pD->m_successive_low; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - k = pD->m_spectral_start; - - if (pD->m_eob_run == 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if (s != 1) - pD->stop_decoding(JPGD_DECODE_ERROR); - - if (pD->get_bits_no_markers(1)) - s = p1; - else - s = m1; - } - else - { - if (r != 15) - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - break; - } - } - - do - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - else - { - if (--r < 0) - break; - } - - k++; - - } while (k <= pD->m_spectral_end); - - if ((s) && (k < 64)) - { - p[g_ZAG[k]] = static_cast(s); - } - } - } - - if (pD->m_eob_run > 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - } - - pD->m_eob_run--; - } - } - - // Decode a scan in a progressively encoded image. - void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func) - { - int mcu_row, mcu_col, mcu_block; - int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS]; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++) - { - int component_num, component_id; - - memset(block_x_mcu, 0, sizeof(block_x_mcu)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - - decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - m_restarts_left--; - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - } - - // Decode a progressively encoded image. - void jpeg_decoder::init_progressive() - { - int i; - - if (m_comps_in_frame == 4) - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - // Allocate the coefficient buffers. - for (i = 0; i < m_comps_in_frame; i++) - { - m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1); - m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8); - } - - for ( ; ; ) - { - int dc_only_scan, refinement_scan; - pDecode_block_func decode_block_func; - - if (!init_scan()) - break; - - dc_only_scan = (m_spectral_start == 0); - refinement_scan = (m_successive_high != 0); - - if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63)) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if (dc_only_scan) - { - if (m_spectral_end) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - } - else if (m_comps_in_scan != 1) /* AC scans can only contain one component */ - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if ((refinement_scan) && (m_successive_low != m_successive_high - 1)) - stop_decoding(JPGD_BAD_SOS_SUCCESSIVE); - - if (dc_only_scan) - { - if (refinement_scan) - decode_block_func = decode_block_dc_refine; - else - decode_block_func = decode_block_dc_first; - } - else - { - if (refinement_scan) - decode_block_func = decode_block_ac_refine; - else - decode_block_func = decode_block_ac_first; - } - - decode_scan(decode_block_func); - - m_bits_left = 16; - get_bits(16); - get_bits(16); - } - - m_comps_in_scan = m_comps_in_frame; - - for (i = 0; i < m_comps_in_frame; i++) - m_comp_list[i] = i; - - calc_mcu_block_order(); - } - - void jpeg_decoder::init_sequential() - { - if (!init_scan()) - stop_decoding(JPGD_UNEXPECTED_MARKER); - } - - void jpeg_decoder::decode_start() - { - init_frame(); - - if (m_progressive_flag) - init_progressive(); - else - init_sequential(); - } - - void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream) - { - init(pStream); - locate_sof_marker(); - } - - jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream) - { - if (setjmp(m_jmp_state)) - return; - decode_init(pStream); - } - - int jpeg_decoder::begin_decoding() - { - if (m_ready_flag) - return JPGD_SUCCESS; - - if (m_error_code) - return JPGD_FAILED; - - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - decode_start(); - - m_ready_flag = true; - - return JPGD_SUCCESS; - } - - jpeg_decoder::~jpeg_decoder() - { - free_all_blocks(); - } - - jpeg_decoder_file_stream::jpeg_decoder_file_stream() - { - m_pFile = NULL; - m_eof_flag = false; - m_error_flag = false; - } - - void jpeg_decoder_file_stream::close() - { - if (m_pFile) - { - fclose(m_pFile); - m_pFile = NULL; - } - - m_eof_flag = false; - m_error_flag = false; - } - - jpeg_decoder_file_stream::~jpeg_decoder_file_stream() - { - close(); - } - - bool jpeg_decoder_file_stream::open(const char *Pfilename) - { - close(); - - m_eof_flag = false; - m_error_flag = false; - -#if defined(_MSC_VER) - m_pFile = NULL; - fopen_s(&m_pFile, Pfilename, "rb"); -#else - m_pFile = fopen(Pfilename, "rb"); -#endif - return m_pFile != NULL; - } - - int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - if (!m_pFile) - return -1; - - if (m_eof_flag) - { - *pEOF_flag = true; - return 0; - } - - if (m_error_flag) - return -1; - - int bytes_read = static_cast(fread(pBuf, 1, max_bytes_to_read, m_pFile)); - if (bytes_read < max_bytes_to_read) - { - if (ferror(m_pFile)) - { - m_error_flag = true; - return -1; - } - - m_eof_flag = true; - *pEOF_flag = true; - } - - return bytes_read; - } - - bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size) - { - close(); - m_pSrc_data = pSrc_data; - m_ofs = 0; - m_size = size; - return true; - } - - int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - *pEOF_flag = false; - - if (!m_pSrc_data) - return -1; - - uint bytes_remaining = m_size - m_ofs; - if ((uint)max_bytes_to_read > bytes_remaining) - { - max_bytes_to_read = bytes_remaining; - *pEOF_flag = true; - } - - memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read); - m_ofs += max_bytes_to_read; - - return max_bytes_to_read; - } - - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps) - { - if (!actual_comps) - return NULL; - *actual_comps = 0; - - if ((!pStream) || (!width) || (!height) || (!req_comps)) - return NULL; - - if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4)) - return NULL; - - jpeg_decoder decoder(pStream); - if (decoder.get_error_code() != JPGD_SUCCESS) - return NULL; - - const int image_width = decoder.get_width(), image_height = decoder.get_height(); - *width = image_width; - *height = image_height; - *actual_comps = decoder.get_num_components(); - - if (decoder.begin_decoding() != JPGD_SUCCESS) - return NULL; - - const int dst_bpl = image_width * req_comps; - - uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height); - if (!pImage_data) - return NULL; - - for (int y = 0; y < image_height; y++) - { - const uint8* pScan_line = 0; - uint scan_line_len; - if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS) - { - jpgd_free(pImage_data); - return NULL; - } - - uint8 *pDst = pImage_data + y * dst_bpl; - - if (((req_comps == 4) && (decoder.get_num_components() == 3)) || - ((req_comps == 1) && (decoder.get_num_components() == 1))) - { - memcpy(pDst, pScan_line, dst_bpl); - } - else if (decoder.get_num_components() == 1) - { - if (req_comps == 3) - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst += 3; - } - } - else - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst[3] = 255; - pDst += 4; - } - } - } - else if (decoder.get_num_components() == 3) - { - if (req_comps == 1) - { - const int YR = 19595, YG = 38470, YB = 7471; - for (int x = 0; x < image_width; x++) - { - int r = pScan_line[x*4+0]; - int g = pScan_line[x*4+1]; - int b = pScan_line[x*4+2]; - *pDst++ = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - } - } - else - { - for (int x = 0; x < image_width; x++) - { - pDst[0] = pScan_line[x*4+0]; - pDst[1] = pScan_line[x*4+1]; - pDst[2] = pScan_line[x*4+2]; - pDst += 3; - } - } - } - } - - return pImage_data; - } - -// BEGIN EPIC MOD - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format) - { - jpg_format = (ERGBFormatJPG)format; -// EMD EPIC MOD - jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size); - return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps); - } - - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps) - { - jpgd::jpeg_decoder_file_stream file_stream; - if (!file_stream.open(pSrc_filename)) - return NULL; - return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps); - } - -} // namespace jpgd diff --git a/spaces/yyyyulia/7390_nlp_interactive_v2/app.py b/spaces/yyyyulia/7390_nlp_interactive_v2/app.py deleted file mode 100644 index 3f05b3b59fd80f4e6d045501fd9ebe5832362749..0000000000000000000000000000000000000000 --- a/spaces/yyyyulia/7390_nlp_interactive_v2/app.py +++ /dev/null @@ -1,40 +0,0 @@ -import streamlit as st -import tensorflow as tf -from keras.models import load_model -from keras.preprocessing.sequence import pad_sequences -from keras.preprocessing.text import tokenizer_from_json -from keras.preprocessing.text import Tokenizer -import numpy as np -import json - - -loaded_tokenizer = None - -# Load the standalone Keras model -model = tf.keras.models.load_model('my_model') - -with open('tokenizer.json') as f: - tokenizer_data = json.load(f) - loaded_tokenizer = tokenizer_from_json(tokenizer_data) - -def predict_text(text): - sequences = loaded_tokenizer.texts_to_sequences([text]) - padded_sequences = pad_sequences(sequences, maxlen=150) - prediction = model.predict(padded_sequences) - return prediction - -# Title of the app -st.title('INFO 7390 NLP Model') - -# Ask user for a number -value = st.text_input('Enter your SMS message to classify', value='') - -# Display the doubled number -st.write(f'Your input value is: {value}') - -if value: - prediction = predict_text(value) - spam = prediction[0][0] > 0.5 - st.write(f'Model prediction, is spam? -- {spam}') -else: - st.write('Your need to input your message to get result') diff --git a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/vdecoder/nsf_hifigan/utils.py b/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/vdecoder/nsf_hifigan/utils.py deleted file mode 100644 index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000 --- a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/vdecoder/nsf_hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/zhan66/vits-simple-api/bert_vits2/text/chinese_bert.py b/spaces/zhan66/vits-simple-api/bert_vits2/text/chinese_bert.py deleted file mode 100644 index 3560646e2a01fc2410ac8272410969468c23dccc..0000000000000000000000000000000000000000 --- a/spaces/zhan66/vits-simple-api/bert_vits2/text/chinese_bert.py +++ /dev/null @@ -1,70 +0,0 @@ -import os - -import config -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM -from logger import logger -from utils.download import download_and_verify -from config import DEVICE as device - -URLS = [ - "https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/resolve/main/pytorch_model.bin", -] -TARGET_PATH = os.path.join(config.ABS_PATH, "bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin") -EXPECTED_MD5 = None - -if not os.path.exists(TARGET_PATH): - success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5) - -try: - logger.info("Loading chinese-roberta-wwm-ext-large...") - tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large") - model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large").to( - device) - logger.info("Loading finished.") -except Exception as e: - logger.error(e) - logger.error(f"Please download pytorch_model.bin from hfl/chinese-roberta-wwm-ext-large.") - - -def get_bert_feature(text, word2ph, device=config.DEVICE): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors='pt') - for i in inputs: - inputs[i] = inputs[i].to(device) - res = model(**inputs, output_hidden_states=True) - res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu() - - assert len(word2ph) == len(text) + 2 - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - return phone_level_feature.T - - -if __name__ == '__main__': - import torch - - word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征 - word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, - 2, 2, 2, 1] - - # 计算总帧数 - total_frames = sum(word2phone) - print(word_level_feature.shape) - print(word2phone) - phone_level_feature = [] - for i in range(len(word2phone)): - print(word_level_feature[i].shape) - - # 对每个词重复word2phone[i]次 - repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - print(phone_level_feature.shape) # torch.Size([36, 1024]) diff --git a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/separator.tsx b/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/separator.tsx deleted file mode 100644 index 6c55e0b2ca8e2436658a06748aadbff7cd700db0..0000000000000000000000000000000000000000 --- a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/separator.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SeparatorPrimitive from '@radix-ui/react-separator' - -import { cn } from '@/lib/utils' - -const Separator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->( - ( - { className, orientation = 'horizontal', decorative = true, ...props }, - ref - ) => ( - - ) -) -Separator.displayName = SeparatorPrimitive.Root.displayName - -export { Separator } diff --git a/spaces/zomehwh/sovits-rudolf/cluster/__init__.py b/spaces/zomehwh/sovits-rudolf/cluster/__init__.py deleted file mode 100644 index f1b9bde04e73e9218a5d534227caa4c25332f424..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-rudolf/cluster/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np -import torch -from sklearn.cluster import KMeans - -def get_cluster_model(ckpt_path): - checkpoint = torch.load(ckpt_path) - kmeans_dict = {} - for spk, ckpt in checkpoint.items(): - km = KMeans(ckpt["n_features_in_"]) - km.__dict__["n_features_in_"] = ckpt["n_features_in_"] - km.__dict__["_n_threads"] = ckpt["_n_threads"] - km.__dict__["cluster_centers_"] = ckpt["cluster_centers_"] - kmeans_dict[spk] = km - return kmeans_dict - -def get_cluster_result(model, x, speaker): - """ - x: np.array [t, 256] - return cluster class result - """ - return model[speaker].predict(x) - -def get_cluster_center_result(model, x,speaker): - """x: np.array [t, 256]""" - predict = model[speaker].predict(x) - return model[speaker].cluster_centers_[predict] - -def get_center(model, x,speaker): - return model[speaker].cluster_centers_[x] diff --git a/spaces/zomehwh/vits-models-pcr/commons.py b/spaces/zomehwh/vits-models-pcr/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/vits-models-pcr/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm