- -
-
-
- -
-
- -
- Lincon Legal Text Summarizer - Made Using Legal-Bert - - -
- - - -
- -
-
- Input Legal Text - - -
- -
- -
- -
- - -
-
-
- -
-
-
-
- -
- -
- -
- - - -
- Upload Legal Document - -
- - -
- -
- - - -
-
-
-
- 5 Pages Per Minute -
-
- -
- - - -
-
-
- -
- - - diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_server.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_server.py deleted file mode 100644 index fa46e905caa307f30a242951610193ee2a98692e..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/aiohttp/web_server.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Low level HTTP server.""" -import asyncio -from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa - -from .abc import AbstractStreamWriter -from .helpers import get_running_loop -from .http_parser import RawRequestMessage -from .streams import StreamReader -from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler -from .web_request import BaseRequest - -__all__ = ("Server",) - - -class Server: - def __init__( - self, - handler: _RequestHandler, - *, - request_factory: Optional[_RequestFactory] = None, - loop: Optional[asyncio.AbstractEventLoop] = None, - **kwargs: Any - ) -> None: - self._loop = get_running_loop(loop) - self._connections: Dict[RequestHandler, asyncio.Transport] = {} - self._kwargs = kwargs - self.requests_count = 0 - self.request_handler = handler - self.request_factory = request_factory or self._make_request - - @property - def connections(self) -> List[RequestHandler]: - return list(self._connections.keys()) - - def connection_made( - self, handler: RequestHandler, transport: asyncio.Transport - ) -> None: - self._connections[handler] = transport - - def connection_lost( - self, handler: RequestHandler, exc: Optional[BaseException] = None - ) -> None: - if handler in self._connections: - del self._connections[handler] - - def _make_request( - self, - message: RawRequestMessage, - payload: StreamReader, - protocol: RequestHandler, - writer: AbstractStreamWriter, - task: "asyncio.Task[None]", - ) -> BaseRequest: - return BaseRequest(message, payload, protocol, writer, task, self._loop) - - async def shutdown(self, timeout: Optional[float] = None) -> None: - coros = [conn.shutdown(timeout) for conn in self._connections] - await asyncio.gather(*coros) - self._connections.clear() - - def __call__(self) -> RequestHandler: - return RequestHandler(self, loop=self._loop, **self._kwargs) diff --git a/spaces/ashercn97/AsherTesting/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/ashercn97/AsherTesting/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index b94974f865491731a1251e3e9736e01cbe81b06f..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -name: Feature request -about: Suggest an improvement or new feature for the web UI -title: '' -labels: 'enhancement' -assignees: '' - ---- - -**Description** - -A clear and concise description of what you want to be implemented. - -**Additional Context** - -If applicable, please provide any extra information, external links, or screenshots that could be useful. diff --git a/spaces/auto-academic/auto-draft/latex_templates/ICLR2022/abstract.tex b/spaces/auto-academic/auto-draft/latex_templates/ICLR2022/abstract.tex deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/awacke1/AIArtReviewStreamlit/Article.md b/spaces/awacke1/AIArtReviewStreamlit/Article.md deleted file mode 100644 index c7f042e4c9c0f401731f009842a325e2d1386bf5..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AIArtReviewStreamlit/Article.md +++ /dev/null @@ -1,51 +0,0 @@ - -# Image Generation for Art, Marketing, Ideation, Design, and Use in Business - -A number of multiple AI pipeline element strategies have evolved on the open market which allow you to generate images using a combination of image prompts and word prompts. This brief analysis gives an idea of the prompting capabilities as well as image rendering techniques that are used in the strategy to generate art from human understanding of images and text used to describe a scene. - -First a top five list on state of the art generators both free and paid is worth consideration. - -1) Midjourney - a Discord server based chatboat AI that allows /imagine prompts which can generate multiple images at a time. This is best at parallel creation, high accuracy even photo real creations. -2) Artbreeder - A multiple capability tool which now features a Collager which assists in starting image composition. By far the most innovative approach which does great to combine the right partial elements in a scene. -3) Dreamstudio - A Huggingface derived art program in beta which uses stable diffusion to create highly accurate art and images. -4) Nightcafe - A credit based creation AI app that can do generation of video dives into an AI art piece which can produce some of the best experiences in Video. -5) RunwayML - a quintessential tool in processing morph audio and video tracks which rival most high end video edit tools. - -These 5 tools make up some of the best AI pipeline programs that are cloud based that allow anyone to begin easily building their portfolio of art. - -The prompting capabilities often involve having a set of text based prompts to get started. Most also feature a starter image which could be an example of what you would like to create. - -URL Links: -1) Collager: https://www.artbreeder.com/beta/collage -2) NightCafe: https://creator.nightcafe.studio/explore -3) Midjourney: https://www.midjourney.com/app/users/779773261440614430/ -4) Dreamstudio: https://beta.dreamstudio.ai/dream -5) RunwayML: https://app.runwayml.com/ - -## Getting Started and Organizing Your AI Pipeline and Process - -Any great strategy has a number of steps that combine all capabilities at your disposal. It is useful to note how you can easily fir these together into a process that works for you. - -The techniques worth noted are listed below. Consider how you will use them will make your pipeline easier and more automated to allow you to spend the majority of your time curating what you have made, and ideating what you want to create next. - -1) Source materials: Since prompting requires text and text examples can quickly help you compose good input, its worth considering and documenting some effective prompts. Nightcafe with its integration into email, sends you a copy of your creation plus the prompting text so one option is to use your email account to keep a record of which prompts work for which outputs. -2) Source materials: Discord since its a public chat format allows you to easily see what others are using for prompts in bulk. There are a number of chat channels designed for people new to the platform and often you can copy and paste if you see very effective prompts with material you are looking for. -3) Source materials: Collager is unique in its ability to add additive parts and then dial in the percent of AI you would like with that. This allows you to add a few image elements which help start out your generation. -4) Source materials: Since images and prompts are going to be your mainstay for inputs its worth considering an open standard for storing and retrieving these from anywhere. Github is a good place since markdown language can involve text in table or list format and includes a capability to reference uploaded images within markdown. This is also a good form for portability since you can later fork and download your repository with a few clicks from anywhere. -5) Source materials: Google drive is integrated into the Artbreeder Collager workflow which allows you easily expand your work and even compose albums of the ones you like to place in Google photo albums. The portfolio you save on different sites have different degrees of ease when aggregating your collections. Collager for instance allows right click save for instant saving of your creation. Dreamstudio features a history. Midjourney features a profile site for you to store and review creations even triggering Upscales which important to use to get the highest resolution output for your creations. - -## Social Media integration - -Depending on your target "safe for work" exports of your work, it is sometimes important to know your accepted social media outlets that you can integrate. Cloud based interactions are the key to successful audiences if you want to scale and share your process with others. - -The key social media outlets supported for these tools are here in a sorted link list which start with public open source first: - -1) Github - Github is open at most companies and allow creation of a free space to share your content. -2) LinkedIn - LinkedIn is acceptable use at nearly every company. -3) Twitter - Twitter is supported as a social media outlet at most companies yet can also be used with security restrictions which might limit posting but allow read access. -4) Facebook - Meta's Facebook is a good outlet since it allows creation of large folios of your images along with stories. This venue however is locked down at many organizations. -5) Instagram - Instagram is supported as an output channel for many tools yet has decreased in popularity due to high frequency of ads and pay for likes models. While it can still be one of the best places for domain specific arrangements of images it is likely locked down in most secure organizations. -6) Youtube - For video uploads with automated captioning and long term storage of short and long form video this is an essential for any creation you compose as video. It is also useful to review and compose playlists of videos here for yourself that speed up your learning - Spend some time at Youtube university and keep a record of keyword searches there sometimes along with your playlists to accelerate learning. -7) Gmail - With the baility to move email in and out its useful to create and wrap up details within email. Most email policies come with a content limitation (for example no files larger than 25MB. For this reason get used to creating pproject wrap up archives with winzip or compression software. With the convenience of keyword searching you can usually use this as a base. -8) Last a worth mention is Huggingface.com. Like github as you become more sophisticated in your public open source capabilities, HuggingFace can allow you to wrap up using one of three software development kits which are gadio, streamlit, and HTML5 each with unique AI and UI integration components and features. If you want to create your own AI pipelines this one also has the open source code and models ready to go to help you on your journey. - diff --git a/spaces/awacke1/Embedding-Iframe-HTML5-to-Gradio/README.md b/spaces/awacke1/Embedding-Iframe-HTML5-to-Gradio/README.md deleted file mode 100644 index 79d8d14ebd4d1dcbb4214e8d956e8aac49a0c1b2..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Embedding-Iframe-HTML5-to-Gradio/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Embedding Iframe HTML5 To Gradio -emoji: 🌍 -colorFrom: purple -colorTo: gray -sdk: static -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Generative-AI-Provider/index.html b/spaces/awacke1/Generative-AI-Provider/index.html deleted file mode 100644 index 7de790b1bc7da11bc63290d747705eca571daa36..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Generative-AI-Provider/index.html +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - Provider - - - - - - - - - - - diff --git a/spaces/awacke1/Intrinsic.Bias.Analyzer/app.py b/spaces/awacke1/Intrinsic.Bias.Analyzer/app.py deleted file mode 100644 index 59c2a6830b944cf7f239132e2118d741b1c71682..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Intrinsic.Bias.Analyzer/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import streamlit as st - -def main(): - st.title("SQuAD: Stanford Question Answering Dataset") - st.header("What is SQuAD?") - - st.markdown(""" - Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. - - SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. - - SQuAD 1.1, the previous version of the SQuAD dataset, contains 100,000+ question-answer pairs on 500+ articles. - """) - - st.header("Getting Started") - st.markdown(""" - We've built a few resources to help you get started with the dataset. - - Download a copy of the dataset (distributed under the CC BY-SA 4.0 license): - - To evaluate your models, we have also made available the evaluation script we will use for official evaluation, along with a sample prediction file that the script will take as input. To run the evaluation, use python evaluate-v2.0.py . - - Once you have a built a model that works to your expectations on the dev set, you submit it to get official scores on the dev and a hidden test set. To preserve the integrity of test results, we do not release the test set to the public. Instead, we require you to submit your model so that we can run it on the test set for you. Here's a tutorial walking you through official evaluation of your model: - - Because SQuAD is an ongoing effort, we expect the dataset to evolve. - - To keep up to date with major changes to the dataset, please subscribe: - - email address - """) - - st.header("Have Questions?") - st.markdown(""" - Ask us questions at our google group or at robinjia@stanford.edu. - """) - -if __name__ == "__main__": - main() diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/diffusion/logger/saver.py b/spaces/azusarang/so-vits-svc-models-ba_P/diffusion/logger/saver.py deleted file mode 100644 index ef78b52b6bcd32106f962b731d3784d72d5f0cce..0000000000000000000000000000000000000000 --- a/spaces/azusarang/so-vits-svc-models-ba_P/diffusion/logger/saver.py +++ /dev/null @@ -1,150 +0,0 @@ -''' -author: wayn391@mastertones -''' - -import os -import json -import time -import yaml -import datetime -import torch -import matplotlib.pyplot as plt -from . import utils -from torch.utils.tensorboard import SummaryWriter - -class Saver(object): - def __init__( - self, - args, - initial_global_step=-1): - - self.expdir = args.env.expdir - self.sample_rate = args.data.sampling_rate - - # cold start - self.global_step = initial_global_step - self.init_time = time.time() - self.last_time = time.time() - - # makedirs - os.makedirs(self.expdir, exist_ok=True) - - # path - self.path_log_info = os.path.join(self.expdir, 'log_info.txt') - - # ckpt - os.makedirs(self.expdir, exist_ok=True) - - # writer - self.writer = SummaryWriter(os.path.join(self.expdir, 'logs')) - - # save config - path_config = os.path.join(self.expdir, 'config.yaml') - with open(path_config, "w") as out_config: - yaml.dump(dict(args), out_config) - - - def log_info(self, msg): - '''log method''' - if isinstance(msg, dict): - msg_list = [] - for k, v in msg.items(): - tmp_str = '' - if isinstance(v, int): - tmp_str = '{}: {:,}'.format(k, v) - else: - tmp_str = '{}: {}'.format(k, v) - - msg_list.append(tmp_str) - msg_str = '\n'.join(msg_list) - else: - msg_str = msg - - # dsplay - print(msg_str) - - # save - with open(self.path_log_info, 'a') as fp: - fp.write(msg_str+'\n') - - def log_value(self, dict): - for k, v in dict.items(): - self.writer.add_scalar(k, v, self.global_step) - - def log_spec(self, name, spec, spec_out, vmin=-14, vmax=3.5): - spec_cat = torch.cat([(spec_out - spec).abs() + vmin, spec, spec_out], -1) - spec = spec_cat[0] - if isinstance(spec, torch.Tensor): - spec = spec.cpu().numpy() - fig = plt.figure(figsize=(12, 9)) - plt.pcolor(spec.T, vmin=vmin, vmax=vmax) - plt.tight_layout() - self.writer.add_figure(name, fig, self.global_step) - - def log_audio(self, dict): - for k, v in dict.items(): - self.writer.add_audio(k, v, global_step=self.global_step, sample_rate=self.sample_rate) - - def get_interval_time(self, update=True): - cur_time = time.time() - time_interval = cur_time - self.last_time - if update: - self.last_time = cur_time - return time_interval - - def get_total_time(self, to_str=True): - total_time = time.time() - self.init_time - if to_str: - total_time = str(datetime.timedelta( - seconds=total_time))[:-5] - return total_time - - def save_model( - self, - model, - optimizer, - name='model', - postfix='', - to_json=False): - # path - if postfix: - postfix = '_' + postfix - path_pt = os.path.join( - self.expdir , name+postfix+'.pt') - - # check - print(' [*] model checkpoint saved: {}'.format(path_pt)) - - # save - if optimizer is not None: - torch.save({ - 'global_step': self.global_step, - 'model': model.state_dict(), - 'optimizer': optimizer.state_dict()}, path_pt) - else: - torch.save({ - 'global_step': self.global_step, - 'model': model.state_dict()}, path_pt) - - # to json - if to_json: - path_json = os.path.join( - self.expdir , name+'.json') - utils.to_json(path_params, path_json) - - def delete_model(self, name='model', postfix=''): - # path - if postfix: - postfix = '_' + postfix - path_pt = os.path.join( - self.expdir , name+postfix+'.pt') - - # delete - if os.path.exists(path_pt): - os.remove(path_pt) - print(' [*] model checkpoint deleted: {}'.format(path_pt)) - - def global_step_increment(self): - self.global_step += 1 - - diff --git a/spaces/baixing/hackathon_chatbot_baixing_api/README.md b/spaces/baixing/hackathon_chatbot_baixing_api/README.md deleted file mode 100644 index ac53e2f8ed21b3fdb1c1a41547318a7dfbfa7f1d..0000000000000000000000000000000000000000 --- a/spaces/baixing/hackathon_chatbot_baixing_api/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: hackathon chatbot baixing api -emoji: 🐨 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: cc-by-4.0 -duplicated_from: Elfe/hackathon_chatbot_simple ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/benchmarks/additionWithUpdate.js b/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/benchmarks/additionWithUpdate.js deleted file mode 100644 index af823fc89de6e0658beeb1033259da46546789bb..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/benchmarks/additionWithUpdate.js +++ /dev/null @@ -1,11 +0,0 @@ -function additionWithUpdate() { - var numAdditions = 1e4; - - for (var i = 0; i < numAdditions; ++i) { - var currentTween = new TWEEN.Tween({a: 0.0}); - currentTween.to({a: 1.0}, 1.0); - currentTween.start(0.0); - } - - TWEEN.update(0.5); -} \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/interactive/SelectionHelper.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/interactive/SelectionHelper.js deleted file mode 100644 index 29c01c19de785660f18ad2374e08c37ec8711d9a..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/interactive/SelectionHelper.js +++ /dev/null @@ -1,83 +0,0 @@ -/** - * @author HypnosNova / https://www.threejs.org.cn/gallery - */ - -THREE.SelectionHelper = ( function () { - - function SelectionHelper( selectionBox, renderer, cssClassName ) { - - this.element = document.createElement( 'div' ); - this.element.classList.add( cssClassName ); - this.element.style.pointerEvents = 'none'; - - this.renderer = renderer; - - this.startPoint = { x: 0, y: 0 }; - this.pointTopLeft = { x: 0, y: 0 }; - this.pointBottomRight = { x: 0, y: 0 }; - - this.isDown = false; - - this.renderer.domElement.addEventListener( 'mousedown', function ( event ) { - - this.isDown = true; - this.onSelectStart( event ); - - }.bind( this ), false ); - - this.renderer.domElement.addEventListener( 'mousemove', function ( event ) { - - if ( this.isDown ) { - - this.onSelectMove( event ); - - } - - }.bind( this ), false ); - - this.renderer.domElement.addEventListener( 'mouseup', function ( event ) { - - this.isDown = false; - this.onSelectOver( event ); - - }.bind( this ), false ); - - } - - SelectionHelper.prototype.onSelectStart = function ( event ) { - - this.renderer.domElement.parentElement.appendChild( this.element ); - - this.element.style.left = event.clientX + 'px'; - this.element.style.top = event.clientY + 'px'; - this.element.style.width = '0px'; - this.element.style.height = '0px'; - - this.startPoint.x = event.clientX; - this.startPoint.y = event.clientY; - - }; - - SelectionHelper.prototype.onSelectMove = function ( event ) { - - this.pointBottomRight.x = Math.max( this.startPoint.x, event.clientX ); - this.pointBottomRight.y = Math.max( this.startPoint.y, event.clientY ); - this.pointTopLeft.x = Math.min( this.startPoint.x, event.clientX ); - this.pointTopLeft.y = Math.min( this.startPoint.y, event.clientY ); - - this.element.style.left = this.pointTopLeft.x + 'px'; - this.element.style.top = this.pointTopLeft.y + 'px'; - this.element.style.width = ( this.pointBottomRight.x - this.pointTopLeft.x ) + 'px'; - this.element.style.height = ( this.pointBottomRight.y - this.pointTopLeft.y ) + 'px'; - - }; - - SelectionHelper.prototype.onSelectOver = function () { - - this.element.parentElement.removeChild( this.element ); - - }; - - return SelectionHelper; - -} )(); diff --git a/spaces/bballaek17/ChatGPT4/app.py b/spaces/bballaek17/ChatGPT4/app.py deleted file mode 100644 index 7e09e57ef928fd2451fd0ed1295d0994ca75d026..0000000000000000000000000000000000000000 --- a/spaces/bballaek17/ChatGPT4/app.py +++ /dev/null @@ -1,193 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Huggingface provided GPT4 OpenAI API Key -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -#Inferenec function -def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - print(f"system message is ^^ {system_msg}") - if system_msg.strip() == '': - initial_message = [{"role": "user", "content": f"{inputs}"},] - multi_turn_message = [] - else: - initial_message= [{"role": "system", "content": system_msg}, - {"role": "user", "content": f"{inputs}"},] - multi_turn_message = [{"role": "system", "content": system_msg},] - - if chat_counter == 0 : - payload = { - "model": "gpt-4", - "messages": initial_message , - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - print(f"chat_counter - {chat_counter}") - else: #if chat_counter != 0 : - messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},] - for data in chatbot: - user = {} - user["role"] = "user" - user["content"] = data[0] - assistant = {} - assistant["role"] = "assistant" - assistant["content"] = data[1] - messages.append(user) - messages.append(assistant) - temp = {} - temp["role"] = "user" - temp["content"] = inputs - messages.append(temp) - #messages - payload = { - "model": "gpt-4", - "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0,} - - chat_counter+=1 - - history.append(inputs) - print(f"Logging : payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"Logging : response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - -#Resetting to blank -def reset_textbox(): - return gr.update(value='') - -#to set a component as visible=False -def set_visible_false(): - return gr.update(visible=False) - -#to set a component as visible=True -def set_visible_true(): - return gr.update(visible=True) - -title = """

🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming

""" - -#display message for themes feature -theme_addon_msg = """
🌟 Discover Gradio Themes with this Demo, featuring v3.22.0! Gradio v3.23.0 also enables seamless Theme sharing. You can develop or modify a theme, and send it to the hub using simple theme.push_to_hub(). -
🏆Participate in Gradio's Theme Building Hackathon to exhibit your creative flair and win fabulous rewards! Join here - Gradio-Themes-Party🎨 🏆
-""" - -#Using info to add additional information about System message in GPT4 -system_msg_info = """A conversation could begin with a system message to gently instruct the assistant. -System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'""" - -#Modifying existing Gradio Theme -theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green", - text_size=gr.themes.sizes.text_lg) - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌

""") - gr.HTML(theme_addon_msg) - gr.HTML('''
Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
''') - - with gr.Column(elem_id = "col_container"): - #GPT4 API Key is provided by Huggingface - with gr.Accordion(label="System message:", open=False): - system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="") - accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False) - chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot") - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") - state = gr.State([]) - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #top_p, temperature - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - #Event handling - inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - - inputs.submit(set_visible_false, [], [system_msg]) - b1.click(set_visible_false, [], [system_msg]) - inputs.submit(set_visible_true, [], [accordion_msg]) - b1.click(set_visible_true, [], [accordion_msg]) - - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #Examples - with gr.Accordion(label="Examples for System message:", open=False): - gr.Examples( - examples = [["""You are an AI programming assistant. - - - Follow the user's requirements carefully and to the letter. - - First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail. - - Then output the code in a single code block. - - Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""], - ["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."], - ["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."], - ["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."], - ["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."], - ["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."], - ["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."], - ["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."], - ["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."], - ["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."], - ["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."], - ["You are a helpful assistant that provides detailed and accurate information."], - ["You are an assistant that speaks like Shakespeare."], - ["You are a friendly assistant who uses casual language and humor."], - ["You are a financial advisor who gives expert advice on investments and budgeting."], - ["You are a health and fitness expert who provides advice on nutrition and exercise."], - ["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."], - ["You are a movie critic who shares insightful opinions on films and their themes."], - ["You are a history enthusiast who loves to discuss historical events and figures."], - ["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."], - ["You are an AI poet who can compose creative and evocative poems on any given topic."],], - inputs = system_msg,) - -demo.queue(max_size=99, concurrency_count=20).launch(debug=True) \ No newline at end of file diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327013113.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327013113.py deleted file mode 100644 index 3d2ed6d85dbde28e85d6d8de19a60c493da4f002..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327013113.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -#os.system("pip install gfpgan") - -#os.system("pip freeze") -#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .") -import random -import gradio as gr -from PIL import Image -import torch -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg') - - -import cv2 -import glob -import numpy as np -from basicsr.utils import imwrite -from gfpgan import GFPGANer - -import warnings -warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.') -bg_upsampler = None - - - -# set up GFPGAN restorer -restorer = GFPGANer( - model_path='experiments/pretrained_models/GFPGANv1.3.pth', - upscale=2, - arch='clean', - channel_multiplier=2, - bg_upsampler=bg_upsampler) - - -def inference(img): - input_img = cv2.imread(img, cv2.IMREAD_COLOR) - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=False, only_center_face=False, paste_back=True) - - #return Image.fromarray(restored_faces[0][:,:,::-1]) - return Image.fromarray(restored_img[:, :, ::-1]) - -title = "GFP-GAN" -description = "Gradio demo for GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once" -article = "

Towards Real-World Blind Face Restoration with Generative Facial Prior | Github Repo

visitor badge
" -gr.Interface( - inference, - [gr.inputs.Image(type="filepath", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['lincoln.jpg'], - ['einstein.png'], - ['edison.jpg'], - ['Henry.jpg'], - ['Frida.jpg'] - ] - ).launch(enable_queue=True,cache_examples=True) - - diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/hifacegan_arch.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/hifacegan_arch.py deleted file mode 100644 index de26e4c2c8f2848dd855e409da19b81fef2538bb..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/hifacegan_arch.py +++ /dev/null @@ -1,259 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from basicsr.utils.registry import ARCH_REGISTRY -from .hifacegan_util import BaseNetwork, LIPEncoder, SPADEResnetBlock, get_nonspade_norm_layer - - -class SPADEGenerator(BaseNetwork): - """Generator with SPADEResBlock""" - - def __init__(self, - num_in_ch=3, - num_feat=64, - use_vae=False, - z_dim=256, - crop_size=512, - norm_g='spectralspadesyncbatch3x3', - is_train=True, - init_train_phase=3): # progressive training disabled - super().__init__() - self.nf = num_feat - self.input_nc = num_in_ch - self.is_train = is_train - self.train_phase = init_train_phase - - self.scale_ratio = 5 # hardcoded now - self.sw = crop_size // (2**self.scale_ratio) - self.sh = self.sw # 20210519: By default use square image, aspect_ratio = 1.0 - - if use_vae: - # In case of VAE, we will sample from random z vector - self.fc = nn.Linear(z_dim, 16 * self.nf * self.sw * self.sh) - else: - # Otherwise, we make the network deterministic by starting with - # downsampled segmentation map instead of random z - self.fc = nn.Conv2d(num_in_ch, 16 * self.nf, 3, padding=1) - - self.head_0 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g) - - self.g_middle_0 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g) - self.g_middle_1 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g) - - self.ups = nn.ModuleList([ - SPADEResnetBlock(16 * self.nf, 8 * self.nf, norm_g), - SPADEResnetBlock(8 * self.nf, 4 * self.nf, norm_g), - SPADEResnetBlock(4 * self.nf, 2 * self.nf, norm_g), - SPADEResnetBlock(2 * self.nf, 1 * self.nf, norm_g) - ]) - - self.to_rgbs = nn.ModuleList([ - nn.Conv2d(8 * self.nf, 3, 3, padding=1), - nn.Conv2d(4 * self.nf, 3, 3, padding=1), - nn.Conv2d(2 * self.nf, 3, 3, padding=1), - nn.Conv2d(1 * self.nf, 3, 3, padding=1) - ]) - - self.up = nn.Upsample(scale_factor=2) - - def encode(self, input_tensor): - """ - Encode input_tensor into feature maps, can be overridden in derived classes - Default: nearest downsampling of 2**5 = 32 times - """ - h, w = input_tensor.size()[-2:] - sh, sw = h // 2**self.scale_ratio, w // 2**self.scale_ratio - x = F.interpolate(input_tensor, size=(sh, sw)) - return self.fc(x) - - def forward(self, x): - # In oroginal SPADE, seg means a segmentation map, but here we use x instead. - seg = x - - x = self.encode(x) - x = self.head_0(x, seg) - - x = self.up(x) - x = self.g_middle_0(x, seg) - x = self.g_middle_1(x, seg) - - if self.is_train: - phase = self.train_phase + 1 - else: - phase = len(self.to_rgbs) - - for i in range(phase): - x = self.up(x) - x = self.ups[i](x, seg) - - x = self.to_rgbs[phase - 1](F.leaky_relu(x, 2e-1)) - x = torch.tanh(x) - - return x - - def mixed_guidance_forward(self, input_x, seg=None, n=0, mode='progressive'): - """ - A helper class for subspace visualization. Input and seg are different images. - For the first n levels (including encoder) we use input, for the rest we use seg. - - If mode = 'progressive', the output's like: AAABBB - If mode = 'one_plug', the output's like: AAABAA - If mode = 'one_ablate', the output's like: BBBABB - """ - - if seg is None: - return self.forward(input_x) - - if self.is_train: - phase = self.train_phase + 1 - else: - phase = len(self.to_rgbs) - - if mode == 'progressive': - n = max(min(n, 4 + phase), 0) - guide_list = [input_x] * n + [seg] * (4 + phase - n) - elif mode == 'one_plug': - n = max(min(n, 4 + phase - 1), 0) - guide_list = [seg] * (4 + phase) - guide_list[n] = input_x - elif mode == 'one_ablate': - if n > 3 + phase: - return self.forward(input_x) - guide_list = [input_x] * (4 + phase) - guide_list[n] = seg - - x = self.encode(guide_list[0]) - x = self.head_0(x, guide_list[1]) - - x = self.up(x) - x = self.g_middle_0(x, guide_list[2]) - x = self.g_middle_1(x, guide_list[3]) - - for i in range(phase): - x = self.up(x) - x = self.ups[i](x, guide_list[4 + i]) - - x = self.to_rgbs[phase - 1](F.leaky_relu(x, 2e-1)) - x = torch.tanh(x) - - return x - - -@ARCH_REGISTRY.register() -class HiFaceGAN(SPADEGenerator): - """ - HiFaceGAN: SPADEGenerator with a learnable feature encoder - Current encoder design: LIPEncoder - """ - - def __init__(self, - num_in_ch=3, - num_feat=64, - use_vae=False, - z_dim=256, - crop_size=512, - norm_g='spectralspadesyncbatch3x3', - is_train=True, - init_train_phase=3): - super().__init__(num_in_ch, num_feat, use_vae, z_dim, crop_size, norm_g, is_train, init_train_phase) - self.lip_encoder = LIPEncoder(num_in_ch, num_feat, self.sw, self.sh, self.scale_ratio) - - def encode(self, input_tensor): - return self.lip_encoder(input_tensor) - - -@ARCH_REGISTRY.register() -class HiFaceGANDiscriminator(BaseNetwork): - """ - Inspired by pix2pixHD multiscale discriminator. - Args: - num_in_ch (int): Channel number of inputs. Default: 3. - num_out_ch (int): Channel number of outputs. Default: 3. - conditional_d (bool): Whether use conditional discriminator. - Default: True. - num_d (int): Number of Multiscale discriminators. Default: 3. - n_layers_d (int): Number of downsample layers in each D. Default: 4. - num_feat (int): Channel number of base intermediate features. - Default: 64. - norm_d (str): String to determine normalization layers in D. - Choices: [spectral][instance/batch/syncbatch] - Default: 'spectralinstance'. - keep_features (bool): Keep intermediate features for matching loss, etc. - Default: True. - """ - - def __init__(self, - num_in_ch=3, - num_out_ch=3, - conditional_d=True, - num_d=2, - n_layers_d=4, - num_feat=64, - norm_d='spectralinstance', - keep_features=True): - super().__init__() - self.num_d = num_d - - input_nc = num_in_ch - if conditional_d: - input_nc += num_out_ch - - for i in range(num_d): - subnet_d = NLayerDiscriminator(input_nc, n_layers_d, num_feat, norm_d, keep_features) - self.add_module(f'discriminator_{i}', subnet_d) - - def downsample(self, x): - return F.avg_pool2d(x, kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False) - - # Returns list of lists of discriminator outputs. - # The final result is of size opt.num_d x opt.n_layers_D - def forward(self, x): - result = [] - for _, _net_d in self.named_children(): - out = _net_d(x) - result.append(out) - x = self.downsample(x) - - return result - - -class NLayerDiscriminator(BaseNetwork): - """Defines the PatchGAN discriminator with the specified arguments.""" - - def __init__(self, input_nc, n_layers_d, num_feat, norm_d, keep_features): - super().__init__() - kw = 4 - padw = int(np.ceil((kw - 1.0) / 2)) - nf = num_feat - self.keep_features = keep_features - - norm_layer = get_nonspade_norm_layer(norm_d) - sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, False)]] - - for n in range(1, n_layers_d): - nf_prev = nf - nf = min(nf * 2, 512) - stride = 1 if n == n_layers_d - 1 else 2 - sequence += [[ - norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=stride, padding=padw)), - nn.LeakyReLU(0.2, False) - ]] - - sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] - - # We divide the layers into groups to extract intermediate layer outputs - for n in range(len(sequence)): - self.add_module('model' + str(n), nn.Sequential(*sequence[n])) - - def forward(self, x): - results = [x] - for submodel in self.children(): - intermediate_output = submodel(results[-1]) - results.append(intermediate_output) - - if self.keep_features: - return results[1:] - else: - return results[-1] diff --git a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621104703.py b/spaces/beihai/PDF-Table-Extractor/.history/app_20220621104703.py deleted file mode 100644 index e8ef6826e045b2613e0fb550dcd010495bc402b3..0000000000000000000000000000000000000000 --- a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621104703.py +++ /dev/null @@ -1,53 +0,0 @@ -#-*- coding : utf-8-*- -import base64 -from subprocess import STDOUT -import streamlit as st -import pandas as pd -import camelot as cam # extracting tables from PDFs - -st.title("PDF Table Extractor") - -input_pdf = st.file_uploader(label = "", type = 'pdf') - -background = st.selectbox("表格线条是否隐藏",(False,True)) -extractor_mode = st.selectbox("单页抽取 OR 全文抽取",("单页抽取","全文抽取")) - -def extractor(page,result_name): - tables_all= cam.read_pdf("input.pdf", pages=page, process_background=background) - result_all = pd.ExcelWriter(result_name, engine='xlsxwriter') - for i in range(0,len(tables_all)): - table = tables_all[i].df - sheetname = str(i) - table.to_excel(result_all, sheetname,index=False) - with open('result_all.xlsx','rb') as f: - st.download_button('抽取完成,点击下载!', f,file_name='result_all.xlsx',mime="application/vnd.ms-excel") - - -if input_pdf is not None: - # byte object into a PDF file - with open("input.pdf", "wb") as f: - base64_pdf = base64.b64encode(input_pdf.read()).decode('utf-8') - f.write(base64.b64decode(base64_pdf)) - f.close() - if extractor_mode == "单页抽取": - page_number = st.text_input("请填写表格所在PDF页码,eg: 3", value = 1) - # read the pdf and parse it using stream - tables = cam.read_pdf("input.pdf", pages=page_number, process_background=background) - result = pd.ExcelWriter('result.xlsx', engine='xlsxwriter') - #tables[1].to_excel(result,index=False) - for i in range(0,len(tables)): - table = tables[i].df - sheetname = str(i) - table.to_excel(result, sheetname,index=False) - - with open('result.xlsx','rb') as f: - st.download_button('提取完成,点击下载!', f,file_name='result.xlsx',mime="application/vnd.ms-excel") - if extractor_mode == "全文抽取": - tables_all= cam.read_pdf("input.pdf", pages="all", process_background=background) - result_all = pd.ExcelWriter('result_all.xlsx', engine='xlsxwriter') - for i in range(0,len(tables_all)): - table = tables_all[i].df - sheetname = str(i) - table.to_excel(result_all, sheetname,index=False) - with open('result_all.xlsx','rb') as f: - st.download_button('抽取完成,点击下载!', f,file_name='result_all.xlsx',mime="application/vnd.ms-excel") diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/utils/io.py b/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/utils/io.py deleted file mode 100644 index 2dc9afd24019cd930eef6c21ab9f579313dd3b3a..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/utils/io.py +++ /dev/null @@ -1,133 +0,0 @@ -import os -from typing import Dict -import numpy as np - -# from utils.log import get_logger - - -def write_results(filename, results, data_type): - if data_type == 'mot': - save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n' - elif data_type == 'kitti': - save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n' - else: - raise ValueError(data_type) - - with open(filename, 'w') as f: - for frame_id, tlwhs, track_ids in results: - if data_type == 'kitti': - frame_id -= 1 - for tlwh, track_id in zip(tlwhs, track_ids): - if track_id < 0: - continue - x1, y1, w, h = tlwh - x2, y2 = x1 + w, y1 + h - line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h) - f.write(line) - - -# def write_results(filename, results_dict: Dict, data_type: str): -# if not filename: -# return -# path = os.path.dirname(filename) -# if not os.path.exists(path): -# os.makedirs(path) - -# if data_type in ('mot', 'mcmot', 'lab'): -# save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n' -# elif data_type == 'kitti': -# save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n' -# else: -# raise ValueError(data_type) - -# with open(filename, 'w') as f: -# for frame_id, frame_data in results_dict.items(): -# if data_type == 'kitti': -# frame_id -= 1 -# for tlwh, track_id in frame_data: -# if track_id < 0: -# continue -# x1, y1, w, h = tlwh -# x2, y2 = x1 + w, y1 + h -# line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0) -# f.write(line) -# logger.info('Save results to {}'.format(filename)) - - -def read_results(filename, data_type: str, is_gt=False, is_ignore=False): - if data_type in ('mot', 'lab'): - read_fun = read_mot_results - else: - raise ValueError('Unknown data type: {}'.format(data_type)) - - return read_fun(filename, is_gt, is_ignore) - - -""" -labels={'ped', ... % 1 -'person_on_vhcl', ... % 2 -'car', ... % 3 -'bicycle', ... % 4 -'mbike', ... % 5 -'non_mot_vhcl', ... % 6 -'static_person', ... % 7 -'distractor', ... % 8 -'occluder', ... % 9 -'occluder_on_grnd', ... %10 -'occluder_full', ... % 11 -'reflection', ... % 12 -'crowd' ... % 13 -}; -""" - - -def read_mot_results(filename, is_gt, is_ignore): - valid_labels = {1} - ignore_labels = {2, 7, 8, 12} - results_dict = dict() - if os.path.isfile(filename): - with open(filename, 'r') as f: - for line in f.readlines(): - linelist = line.split(',') - if len(linelist) < 7: - continue - fid = int(linelist[0]) - if fid < 1: - continue - results_dict.setdefault(fid, list()) - - if is_gt: - if 'MOT16-' in filename or 'MOT17-' in filename: - label = int(float(linelist[7])) - mark = int(float(linelist[6])) - if mark == 0 or label not in valid_labels: - continue - score = 1 - elif is_ignore: - if 'MOT16-' in filename or 'MOT17-' in filename: - label = int(float(linelist[7])) - vis_ratio = float(linelist[8]) - if label not in ignore_labels and vis_ratio >= 0: - continue - else: - continue - score = 1 - else: - score = float(linelist[6]) - - tlwh = tuple(map(float, linelist[2:6])) - target_id = int(linelist[1]) - - results_dict[fid].append((tlwh, target_id, score)) - - return results_dict - - -def unzip_objs(objs): - if len(objs) > 0: - tlwhs, ids, scores = zip(*objs) - else: - tlwhs, ids, scores = [], [], [] - tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) - - return tlwhs, ids, scores \ No newline at end of file diff --git a/spaces/bino-ocle/audio-intelligence-dash/Makefile b/spaces/bino-ocle/audio-intelligence-dash/Makefile deleted file mode 100644 index a8258f29f0db5a42a05facc510a7012fe88bbc17..0000000000000000000000000000000000000000 --- a/spaces/bino-ocle/audio-intelligence-dash/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -SHELL := /bin/bash - -# Variables definitions -# ----------------------------------------------------------------------------- - - - - -# Target section and Global definitions -# ----------------------------------------------------------------------------- -.PHONY: run - -test: - poetry run pytest tests -vv --show-capture=all - -run: - cd app; poetry run python app.py \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Download Expiry Date 2 Full Movie in Tamil Dubbed The Plot Twists Will Shock You.md b/spaces/bioriAsaeru/text-to-voice/Download Expiry Date 2 Full Movie in Tamil Dubbed The Plot Twists Will Shock You.md deleted file mode 100644 index 3bee3d8ff4b73187fc392c9209d305347e57f286..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Download Expiry Date 2 Full Movie in Tamil Dubbed The Plot Twists Will Shock You.md +++ /dev/null @@ -1,6 +0,0 @@ -

Expiry Date 2 full movie in tamil dubbed download


Download File ❤❤❤ https://urloso.com/2uyRjY



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Employee Monitoring Software Full Version Free Dow lloras accelarator e Benefits Features and Reviews.md b/spaces/bioriAsaeru/text-to-voice/Employee Monitoring Software Full Version Free Dow lloras accelarator e Benefits Features and Reviews.md deleted file mode 100644 index d5a17b3c87ca7eda8e0c6b441435408509c96942..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Employee Monitoring Software Full Version Free Dow lloras accelarator e Benefits Features and Reviews.md +++ /dev/null @@ -1,6 +0,0 @@ -

Employee Monitoring Software Full Version Free Dow lloras accelarator e


Download ————— https://urloso.com/2uyQkN



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/HD Online Player (gandhi Tamil Dubbed Movie Download).md b/spaces/bioriAsaeru/text-to-voice/HD Online Player (gandhi Tamil Dubbed Movie Download).md deleted file mode 100644 index 02e0f8aa12e8c6d358c4772aedcef6505d268c1c..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/HD Online Player (gandhi Tamil Dubbed Movie Download).md +++ /dev/null @@ -1,16 +0,0 @@ -

HD Online Player (gandhi tamil dubbed movie download)


Downloadhttps://urloso.com/2uyRdv



-
-Gandhi was a worldwide hit at the box office, grossing over $15 million against a production budget of $8 million, and has been released to a number of languages and territories. The film was produced by Ronit Dror, who had worked on Devdas (2002), and was directed by Rajiv Rai, who had previously made the Hindi-language Sooryavansham (1977) and Rangoon (1988). John Kani, Raj Kumar, and Ramesh Sethu played Mahatma Gandhi. Ranjan Raman and Atul Kulkarni played Mohandas Karamchand Gandhi. The film was shot in the state of Uttar Pradesh India, and the song "Mere Sapno Ki Rani" was written by Mithoon Kumar and Shailendra. - -The film, a melodrama, was produced by the ITC in 1982, and was a commercial success in India. It was released in United Kingdom and United States in 1983, and was also released in several Asian countries. It was nominated for Best Art Direction and Best Picture at the 34th Asia Pacific Film Festival in New Zealand. At the 35th International Film Festival in Italy in 1983, Gandhi won the Best Film Award. - -Plot - -The film begins with the Mahatma showing up for an interview with William Howard Selwyn, a journalist. He is later arrested for breaking the Non-Co-operation Movement. The Mahatma is sent to the Andaman and Nicobar Islands for seven years. He is later released and returns to India in March, 1915. The Mahatma leaves for South Africa in 1915 to oppose the Imperialism of the British Empire. The first half of Gandhi takes a "chronological" and "chronicle-like" view of the Mahatma's life, while the second half is a "spectacular" and "bioscopic" approach, focusing on Gandhi's work with the Indian independence movement. - -Production - -The film was produced by Ronit Dror, who was also involved in Devdas (2002) and Sooryavansham (1977). Dror stated 4fefd39f24
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Janleva 555 Movie Download Hd 720p A Net Energy Gain of Romance and Thrill.md b/spaces/bioriAsaeru/text-to-voice/Janleva 555 Movie Download Hd 720p A Net Energy Gain of Romance and Thrill.md deleted file mode 100644 index 395ea81bcc9c1ac0ec931a477eb064e588f3dc73..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Janleva 555 Movie Download Hd 720p A Net Energy Gain of Romance and Thrill.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

18+ Janleva 555 (2012) Hindi Movie Watch Online HD Print Free Download Watch Full Movie Janleva 555 (2012) Hindi Online in HD Movie Download Hub Free Download. Pc 720p 480p Movie Download, 720p Romantic Hindi Movie Download, 1080p Adult Full Movie Download, 720p 480p watch online movie free HD movie watch Hindi Adult Full HD 720p Movie Watch online HD Free Download Janleva 555 (2012) Hindi Watch in HD Print Movie Download Hub

-

Janleva 555 Movie Download Hd 720p


Download File ››› https://urloso.com/2uyQlT



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/bla/tranny/App/Streaming/Utils/Sanity.py b/spaces/bla/tranny/App/Streaming/Utils/Sanity.py deleted file mode 100644 index f9474b6b1ee281f147701a3cfefbb9995c7ad811..0000000000000000000000000000000000000000 --- a/spaces/bla/tranny/App/Streaming/Utils/Sanity.py +++ /dev/null @@ -1,41 +0,0 @@ -from telethon import TelegramClient -from fastapi import Request - - -class Sanity: - client: TelegramClient - media = None - chat_id: int = -1001925049183 - file_id: int - req: Request - limit: int - offset: int - - async def file_exists(self): - try: - self.media = await self.client.get_messages( - entity=self.chat_id, ids=self.file_id - ) - return self.media - except Exception as e: - pass - - def check_ranges(self): - range_header = self.req.headers.get("Range") - if range_header: - offset = range_header.split("=")[1].split("-")[0] or 0 - limit = range_header.split("=")[1].split("-")[1] or self.media.file.size - else: - offset = 0 - limit = self.media.file.size - self.offset = int(offset) - self.limit = int(limit) - - if ( - (limit > self.media.file.size) - or (self.offset < 0) - or (self.limit < self.offset) - ): - return False - else: - return True diff --git a/spaces/blmdsydm/faster-whisper-webui/LICENSE.md b/spaces/blmdsydm/faster-whisper-webui/LICENSE.md deleted file mode 100644 index f5f4b8b5ecd27c09e4ef16e9662bcb7bb2bfc76f..0000000000000000000000000000000000000000 --- a/spaces/blmdsydm/faster-whisper-webui/LICENSE.md +++ /dev/null @@ -1,195 +0,0 @@ -Apache License -============== - -_Version 2.0, January 2004_ -_<>_ - -### Terms and Conditions for use, reproduction, and distribution - -#### 1. Definitions - -“License” shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -“Licensor” shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -“Legal Entity” shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, “control” means **(i)** the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the -outstanding shares, or **(iii)** beneficial ownership of such entity. - -“You” (or “Your”) shall mean an individual or Legal Entity exercising -permissions granted by this License. - -“Source” form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -“Object” form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -“Work” shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -“Derivative Works” shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -“Contribution” shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -“submitted” means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as “Not a Contribution.” - -“Contributor” shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -#### 2. Grant of Copyright License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -#### 3. Grant of Patent License - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -#### 4. Redistribution - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -* **(a)** You must give any other recipients of the Work or Derivative Works a copy of -this License; and -* **(b)** You must cause any modified files to carry prominent notices stating that You -changed the files; and -* **(c)** You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. - -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -#### 5. Submission of Contributions - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -#### 6. Trademarks - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -#### 7. Disclaimer of Warranty - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -#### 8. Limitation of Liability - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -#### 9. Accepting Warranty or Additional Liability - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -_END OF TERMS AND CONDITIONS_ - -### APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets `[]` replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same “printed page” as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/docs/METRICS.md b/spaces/brainblow/AudioCreator_Music-Audio_Generation/docs/METRICS.md deleted file mode 100644 index e2ae9a184cbccb8bfefb4ce77afa5ddab743a051..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/docs/METRICS.md +++ /dev/null @@ -1,127 +0,0 @@ -# AudioCraft objective metrics - -In addition to training losses, AudioCraft provides a set of objective metrics -for audio synthesis and audio generation. As these metrics may require -extra dependencies and can be costly to train, they are often disabled by default. -This section provides guidance for setting up and using these metrics in -the AudioCraft training pipelines. - -## Available metrics - -### Audio synthesis quality metrics - -#### SI-SNR - -We provide an implementation of the Scale-Invariant Signal-to-Noise Ratio in PyTorch. -No specific requirement is needed for this metric. Please activate the metric at the -evaluation stage with the appropriate flag: - -```shell -dora run <...> evaluate.metrics.sisnr=true -``` - -#### ViSQOL - -We provide a Python wrapper around the ViSQOL [official implementation](https://github.com/google/visqol) -to conveniently run ViSQOL within the training pipelines. - -One must specify the path to the ViSQOL installation through the configuration in order -to enable ViSQOL computations in AudioCraft: - -```shell -# the first parameter is used to activate visqol computation while the second specify -# the path to visqol's library to be used by our python wrapper -dora run <...> evaluate.metrics.visqol=true metrics.visqol.bin= -``` - -See an example grid: [Compression with ViSQOL](../audiocraft/grids/compression/encodec_musicgen_32khz.py) - -To learn more about ViSQOL and how to build ViSQOL binary using bazel, please refer to the -instructions available in the [open source repository](https://github.com/google/visqol). - -### Audio generation metrics - -#### Frechet Audio Distance - -Similarly to ViSQOL, we use a Python wrapper around the Frechet Audio Distance -[official implementation](https://github.com/google-research/google-research/tree/master/frechet_audio_distance) -in TensorFlow. - -Note that we had to make several changes to the actual code in order to make it work. -Please refer to the [FrechetAudioDistanceMetric](../audiocraft/metrics/fad.py) class documentation -for more details. We do not plan to provide further support in obtaining a working setup for the -Frechet Audio Distance at this stage. - -```shell -# the first parameter is used to activate FAD metric computation while the second specify -# the path to FAD library to be used by our python wrapper -dora run <...> evaluate.metrics.fad=true metrics.fad.bin= -``` - -See an example grid: [Evaluation with FAD](../audiocraft/grids/musicgen/musicgen_pretrained_32khz_eval.py) - -#### Kullback-Leibler Divergence - -We provide a PyTorch implementation of the Kullback-Leibler Divergence computed over the probabilities -of the labels obtained by a state-of-the-art audio classifier. We provide our implementation of the KLD -using the [PaSST classifier](https://github.com/kkoutini/PaSST). - -In order to use the KLD metric over PaSST, you must install the PaSST library as an extra dependency: -```shell -pip install 'git+https://github.com/kkoutini/passt_hear21@0.0.19#egg=hear21passt' -``` - -Then similarly, you can use the metric activating the corresponding flag: - -```shell -# one could extend the kld metric with additional audio classifier models that can then be picked through the configuration -dora run <...> evaluate.metrics.kld=true metrics.kld.model=passt -``` - -#### Text consistency - -We provide a text-consistency metric, similarly to the MuLan Cycle Consistency from -[MusicLM](https://arxiv.org/pdf/2301.11325.pdf) or the CLAP score used in -[Make-An-Audio](https://arxiv.org/pdf/2301.12661v1.pdf). -More specifically, we provide a PyTorch implementation of a Text consistency metric -relying on a pre-trained [Contrastive Language-Audio Pretraining (CLAP)](https://github.com/LAION-AI/CLAP). - -Please install the CLAP library as an extra dependency prior to using the metric: -```shell -pip install laion_clap -``` - -Then similarly, you can use the metric activating the corresponding flag: - -```shell -# one could extend the text consistency metric with additional audio classifier models that can then be picked through the configuration -dora run ... evaluate.metrics.text_consistency=true metrics.text_consistency.model=clap -``` - -Note that the text consistency metric based on CLAP will require the CLAP checkpoint to be -provided in the configuration. - -#### Chroma cosine similarity - -Finally, as introduced in MusicGen, we provide a Chroma Cosine Similarity metric in PyTorch. -No specific requirement is needed for this metric. Please activate the metric at the -evaluation stage with the appropriate flag: - -```shell -dora run ... evaluate.metrics.chroma_cosine=true -``` - -#### Comparing against reconstructed audio - -For all the above audio generation metrics, we offer the option to compute the metric on the reconstructed audio -fed in EnCodec instead of the generated sample using the flag `.use_gt=true`. - -## Example usage - -You will find example of configuration for the different metrics introduced above in: -* The [musicgen's default solver](../config/solver/musicgen/default.yaml) for all audio generation metrics -* The [compression's default solver](../config/solver/compression/default.yaml) for all audio synthesis metrics - -Similarly, we provide different examples in our grids: -* [Evaluation with ViSQOL](../audiocraft/grids/compression/encodec_musicgen_32khz.py) -* [Evaluation with FAD and others](../audiocraft/grids/musicgen/musicgen_pretrained_32khz_eval.py) diff --git a/spaces/brjathu/HMR2.0/hmr2/models/components/pose_transformer.py b/spaces/brjathu/HMR2.0/hmr2/models/components/pose_transformer.py deleted file mode 100644 index ac04971407cb59637490cc4842f048b9bc4758be..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/hmr2/models/components/pose_transformer.py +++ /dev/null @@ -1,358 +0,0 @@ -from inspect import isfunction -from typing import Callable, Optional - -import torch -from einops import rearrange -from einops.layers.torch import Rearrange -from torch import nn - -from .t_cond_mlp import ( - AdaptiveLayerNorm1D, - FrequencyEmbedder, - normalization_layer, -) -# from .vit import Attention, FeedForward - - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -class PreNorm(nn.Module): - def __init__(self, dim: int, fn: Callable, norm: str = "layer", norm_cond_dim: int = -1): - super().__init__() - self.norm = normalization_layer(norm, dim, norm_cond_dim) - self.fn = fn - - def forward(self, x: torch.Tensor, *args, **kwargs): - if isinstance(self.norm, AdaptiveLayerNorm1D): - return self.fn(self.norm(x, *args), **kwargs) - else: - return self.fn(self.norm(x), **kwargs) - - -class FeedForward(nn.Module): - def __init__(self, dim, hidden_dim, dropout=0.0): - super().__init__() - self.net = nn.Sequential( - nn.Linear(dim, hidden_dim), - nn.GELU(), - nn.Dropout(dropout), - nn.Linear(hidden_dim, dim), - nn.Dropout(dropout), - ) - - def forward(self, x): - return self.net(x) - - -class Attention(nn.Module): - def __init__(self, dim, heads=8, dim_head=64, dropout=0.0): - super().__init__() - inner_dim = dim_head * heads - project_out = not (heads == 1 and dim_head == dim) - - self.heads = heads - self.scale = dim_head**-0.5 - - self.attend = nn.Softmax(dim=-1) - self.dropout = nn.Dropout(dropout) - - self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False) - - self.to_out = ( - nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout)) - if project_out - else nn.Identity() - ) - - def forward(self, x): - qkv = self.to_qkv(x).chunk(3, dim=-1) - q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), qkv) - - dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale - - attn = self.attend(dots) - attn = self.dropout(attn) - - out = torch.matmul(attn, v) - out = rearrange(out, "b h n d -> b n (h d)") - return self.to_out(out) - - -class CrossAttention(nn.Module): - def __init__(self, dim, context_dim=None, heads=8, dim_head=64, dropout=0.0): - super().__init__() - inner_dim = dim_head * heads - project_out = not (heads == 1 and dim_head == dim) - - self.heads = heads - self.scale = dim_head**-0.5 - - self.attend = nn.Softmax(dim=-1) - self.dropout = nn.Dropout(dropout) - - context_dim = default(context_dim, dim) - self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias=False) - self.to_q = nn.Linear(dim, inner_dim, bias=False) - - self.to_out = ( - nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout)) - if project_out - else nn.Identity() - ) - - def forward(self, x, context=None): - context = default(context, x) - k, v = self.to_kv(context).chunk(2, dim=-1) - q = self.to_q(x) - q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), [q, k, v]) - - dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale - - attn = self.attend(dots) - attn = self.dropout(attn) - - out = torch.matmul(attn, v) - out = rearrange(out, "b h n d -> b n (h d)") - return self.to_out(out) - - -class Transformer(nn.Module): - def __init__( - self, - dim: int, - depth: int, - heads: int, - dim_head: int, - mlp_dim: int, - dropout: float = 0.0, - norm: str = "layer", - norm_cond_dim: int = -1, - ): - super().__init__() - self.layers = nn.ModuleList([]) - for _ in range(depth): - sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout) - ff = FeedForward(dim, mlp_dim, dropout=dropout) - self.layers.append( - nn.ModuleList( - [ - PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim), - PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim), - ] - ) - ) - - def forward(self, x: torch.Tensor, *args): - for attn, ff in self.layers: - x = attn(x, *args) + x - x = ff(x, *args) + x - return x - - -class TransformerCrossAttn(nn.Module): - def __init__( - self, - dim: int, - depth: int, - heads: int, - dim_head: int, - mlp_dim: int, - dropout: float = 0.0, - norm: str = "layer", - norm_cond_dim: int = -1, - context_dim: Optional[int] = None, - ): - super().__init__() - self.layers = nn.ModuleList([]) - for _ in range(depth): - sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout) - ca = CrossAttention( - dim, context_dim=context_dim, heads=heads, dim_head=dim_head, dropout=dropout - ) - ff = FeedForward(dim, mlp_dim, dropout=dropout) - self.layers.append( - nn.ModuleList( - [ - PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim), - PreNorm(dim, ca, norm=norm, norm_cond_dim=norm_cond_dim), - PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim), - ] - ) - ) - - def forward(self, x: torch.Tensor, *args, context=None, context_list=None): - if context_list is None: - context_list = [context] * len(self.layers) - if len(context_list) != len(self.layers): - raise ValueError(f"len(context_list) != len(self.layers) ({len(context_list)} != {len(self.layers)})") - - for i, (self_attn, cross_attn, ff) in enumerate(self.layers): - x = self_attn(x, *args) + x - x = cross_attn(x, *args, context=context_list[i]) + x - x = ff(x, *args) + x - return x - - -class DropTokenDropout(nn.Module): - def __init__(self, p: float = 0.1): - super().__init__() - if p < 0 or p > 1: - raise ValueError( - "dropout probability has to be between 0 and 1, " "but got {}".format(p) - ) - self.p = p - - def forward(self, x: torch.Tensor): - # x: (batch_size, seq_len, dim) - if self.training and self.p > 0: - zero_mask = torch.full_like(x[0, :, 0], self.p).bernoulli().bool() - # TODO: permutation idx for each batch using torch.argsort - if zero_mask.any(): - x = x[:, ~zero_mask, :] - return x - - -class ZeroTokenDropout(nn.Module): - def __init__(self, p: float = 0.1): - super().__init__() - if p < 0 or p > 1: - raise ValueError( - "dropout probability has to be between 0 and 1, " "but got {}".format(p) - ) - self.p = p - - def forward(self, x: torch.Tensor): - # x: (batch_size, seq_len, dim) - if self.training and self.p > 0: - zero_mask = torch.full_like(x[:, :, 0], self.p).bernoulli().bool() - # Zero-out the masked tokens - x[zero_mask, :] = 0 - return x - - -class TransformerEncoder(nn.Module): - def __init__( - self, - num_tokens: int, - token_dim: int, - dim: int, - depth: int, - heads: int, - mlp_dim: int, - dim_head: int = 64, - dropout: float = 0.0, - emb_dropout: float = 0.0, - emb_dropout_type: str = "drop", - emb_dropout_loc: str = "token", - norm: str = "layer", - norm_cond_dim: int = -1, - token_pe_numfreq: int = -1, - ): - super().__init__() - if token_pe_numfreq > 0: - token_dim_new = token_dim * (2 * token_pe_numfreq + 1) - self.to_token_embedding = nn.Sequential( - Rearrange("b n d -> (b n) d", n=num_tokens, d=token_dim), - FrequencyEmbedder(token_pe_numfreq, token_pe_numfreq - 1), - Rearrange("(b n) d -> b n d", n=num_tokens, d=token_dim_new), - nn.Linear(token_dim_new, dim), - ) - else: - self.to_token_embedding = nn.Linear(token_dim, dim) - self.pos_embedding = nn.Parameter(torch.randn(1, num_tokens, dim)) - if emb_dropout_type == "drop": - self.dropout = DropTokenDropout(emb_dropout) - elif emb_dropout_type == "zero": - self.dropout = ZeroTokenDropout(emb_dropout) - else: - raise ValueError(f"Unknown emb_dropout_type: {emb_dropout_type}") - self.emb_dropout_loc = emb_dropout_loc - - self.transformer = Transformer( - dim, depth, heads, dim_head, mlp_dim, dropout, norm=norm, norm_cond_dim=norm_cond_dim - ) - - def forward(self, inp: torch.Tensor, *args, **kwargs): - x = inp - - if self.emb_dropout_loc == "input": - x = self.dropout(x) - x = self.to_token_embedding(x) - - if self.emb_dropout_loc == "token": - x = self.dropout(x) - b, n, _ = x.shape - x += self.pos_embedding[:, :n] - - if self.emb_dropout_loc == "token_afterpos": - x = self.dropout(x) - x = self.transformer(x, *args) - return x - - -class TransformerDecoder(nn.Module): - def __init__( - self, - num_tokens: int, - token_dim: int, - dim: int, - depth: int, - heads: int, - mlp_dim: int, - dim_head: int = 64, - dropout: float = 0.0, - emb_dropout: float = 0.0, - emb_dropout_type: str = 'drop', - norm: str = "layer", - norm_cond_dim: int = -1, - context_dim: Optional[int] = None, - skip_token_embedding: bool = False, - ): - super().__init__() - if not skip_token_embedding: - self.to_token_embedding = nn.Linear(token_dim, dim) - else: - self.to_token_embedding = nn.Identity() - if token_dim != dim: - raise ValueError( - f"token_dim ({token_dim}) != dim ({dim}) when skip_token_embedding is True" - ) - - self.pos_embedding = nn.Parameter(torch.randn(1, num_tokens, dim)) - if emb_dropout_type == "drop": - self.dropout = DropTokenDropout(emb_dropout) - elif emb_dropout_type == "zero": - self.dropout = ZeroTokenDropout(emb_dropout) - elif emb_dropout_type == "normal": - self.dropout = nn.Dropout(emb_dropout) - - self.transformer = TransformerCrossAttn( - dim, - depth, - heads, - dim_head, - mlp_dim, - dropout, - norm=norm, - norm_cond_dim=norm_cond_dim, - context_dim=context_dim, - ) - - def forward(self, inp: torch.Tensor, *args, context=None, context_list=None): - x = self.to_token_embedding(inp) - b, n, _ = x.shape - - x = self.dropout(x) - x += self.pos_embedding[:, :n] - - x = self.transformer(x, *args, context=context, context_list=context_list) - return x - diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py b/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py deleted file mode 100644 index 63c54ee9a5ce2368494b775cc90fada1439feaa5..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mask_rcnn_R_101_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) - -train.max_iter *= 4 # 100ep -> 400ep - -lr_multiplier.scheduler.milestones = [ - milestone * 4 for milestone in lr_multiplier.scheduler.milestones -] -lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/dev/packaging/build_all_wheels.sh b/spaces/brjathu/HMR2.0/vendor/detectron2/dev/packaging/build_all_wheels.sh deleted file mode 100644 index 00f9de5e27867bf210438190c2951a571ac1f3fc..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/dev/packaging/build_all_wheels.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash -e -# Copyright (c) Facebook, Inc. and its affiliates. - -[[ -d "dev/packaging" ]] || { - echo "Please run this script at detectron2 root!" - exit 1 -} - -build_one() { - cu=$1 - pytorch_ver=$2 - - case "$cu" in - cu*) - container_name=manylinux-cuda${cu/cu/} - ;; - cpu) - container_name=manylinux-cuda101 - ;; - *) - echo "Unrecognized cu=$cu" - exit 1 - ;; - esac - - echo "Launching container $container_name ..." - container_id="$container_name"_"$cu"_"$pytorch_ver" - - py_versions=(3.7 3.8 3.9) - - for py in "${py_versions[@]}"; do - docker run -itd \ - --name "$container_id" \ - --mount type=bind,source="$(pwd)",target=/detectron2 \ - pytorch/$container_name - - cat <= 1: - for p in stem.parameters(): - p.requires_grad = False - stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem) - - # fmt: off - out_features = cfg.MODEL.RESNETS.OUT_FEATURES - depth = cfg.MODEL.RESNETS.DEPTH - num_groups = cfg.MODEL.RESNETS.NUM_GROUPS - width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP - bottleneck_channels = num_groups * width_per_group - in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS - out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS - stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 - res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION - deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE - deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED - deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS - num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH - branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS - trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE - test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX - # fmt: on - assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) - - num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] - - stages = [] - - res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5} - out_stage_idx = [res_stage_idx[f] for f in out_features] - trident_stage_idx = res_stage_idx[trident_stage] - max_stage_idx = max(out_stage_idx) - for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): - dilation = res5_dilation if stage_idx == 5 else 1 - first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 - stage_kargs = { - "num_blocks": num_blocks_per_stage[idx], - "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), - "in_channels": in_channels, - "bottleneck_channels": bottleneck_channels, - "out_channels": out_channels, - "num_groups": num_groups, - "norm": norm, - "stride_in_1x1": stride_in_1x1, - "dilation": dilation, - } - if stage_idx == trident_stage_idx: - assert not deform_on_per_stage[ - idx - ], "Not support deformable conv in Trident blocks yet." - stage_kargs["block_class"] = TridentBottleneckBlock - stage_kargs["num_branch"] = num_branch - stage_kargs["dilations"] = branch_dilations - stage_kargs["test_branch_idx"] = test_branch_idx - stage_kargs.pop("dilation") - elif deform_on_per_stage[idx]: - stage_kargs["block_class"] = DeformBottleneckBlock - stage_kargs["deform_modulated"] = deform_modulated - stage_kargs["deform_num_groups"] = deform_num_groups - else: - stage_kargs["block_class"] = BottleneckBlock - blocks = ( - make_trident_stage(**stage_kargs) - if stage_idx == trident_stage_idx - else ResNet.make_stage(**stage_kargs) - ) - in_channels = out_channels - out_channels *= 2 - bottleneck_channels *= 2 - - if freeze_at >= stage_idx: - for block in blocks: - block.freeze() - stages.append(blocks) - return ResNet(stem, stages, out_features=out_features) diff --git a/spaces/chansung/llama2-with-gradio-chat/js.py b/spaces/chansung/llama2-with-gradio-chat/js.py deleted file mode 100644 index 781e4c35f98903536b1fcdb075a331988698eeb9..0000000000000000000000000000000000000000 --- a/spaces/chansung/llama2-with-gradio-chat/js.py +++ /dev/null @@ -1,81 +0,0 @@ -GET_LOCAL_STORAGE = """ -function() { - globalThis.setStorage = (key, value)=>{ - localStorage.setItem(key, JSON.stringify(value)); - } - globalThis.getStorage = (key, value)=>{ - return JSON.parse(localStorage.getItem(key)); - } - - var local_data = getStorage('local_data'); - var history = []; - - if(local_data) { - local_data[0].pingpongs.forEach(element =>{ - history.push([element.ping, element.pong]); - }); - } - else { - local_data = []; - for (let step = 0; step < 10; step++) { - local_data.push({'ctx': '', 'pingpongs':[]}); - } - setStorage('local_data', local_data); - } - - if(history.length == 0) { - document.querySelector("#initial-popup").classList.remove('hide'); - } - - return [history, local_data]; -} -""" - -UPDATE_LEFT_BTNS_STATE = """ -(v)=>{ - document.querySelector('.custom-btn-highlight').classList.add('custom-btn'); - document.querySelector('.custom-btn-highlight').classList.remove('custom-btn-highlight'); - - const elements = document.querySelectorAll(".custom-btn"); - - for(var i=0; i < elements.length; i++) { - const element = elements[i]; - if(element.textContent == v) { - console.log(v); - element.classList.add('custom-btn-highlight'); - element.classList.remove('custom-btn'); - break; - } - } -}""" - -UPDATE_PLACEHOLDERS = """ -function update_placeholders(txt, placeholder_txt1, placeholder_txt2, placeholder_txt3) { - let example_prompt = txt; - - const regex = /\[([^\]]*)\]/g; - const matches = txt.match(regex); - - if (matches != null) { - if (matches.length >= 1) { - if (placeholder_txt1 !== "") { - example_prompt = example_prompt.replace(matches[0], placeholder_txt1); - } - } - - if (matches.length >= 2) { - if (placeholder_txt2 !== "") { - example_prompt = example_prompt.replace(matches[1], placeholder_txt2); - } - } - - if (matches.length >= 3) { - if (placeholder_txt1 !== "") { - example_prompt = example_prompt.replace(matches[2], placeholder_txt3); - } - } - } - - return example_prompt -} -""" \ No newline at end of file diff --git a/spaces/chansung/textual-inversion-pipeline/README.md b/spaces/chansung/textual-inversion-pipeline/README.md deleted file mode 100644 index 191f758e99d9d9e0de6b97eed38c8bebc6ce32b9..0000000000000000000000000000000000000000 --- a/spaces/chansung/textual-inversion-pipeline/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Textual Inversion -emoji: 🐶 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- diff --git a/spaces/chansung/zero2story/modules/llms/__init__.py b/spaces/chansung/zero2story/modules/llms/__init__.py deleted file mode 100644 index 1a8850a327b71c18b8324cf3e953f1a5f210aa83..0000000000000000000000000000000000000000 --- a/spaces/chansung/zero2story/modules/llms/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .llm_factory_abstracts import ( - LLMFactory, - PromptFmt, PromptManager, PPManager, UIPPManager, LLMService -) - -from .palm_service import ( - PaLMFactory, - PaLMChatPromptFmt, PaLMPromptManager, PaLMChatPPManager, GradioPaLMChatPPManager, PaLMService, -) - -from .utils import get_llm_factory \ No newline at end of file diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/data/data_prefetcher.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/data/data_prefetcher.py deleted file mode 100644 index a118cf4e4ef968c9cf89a72457ede8c63bdf2cce..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/data/data_prefetcher.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import torch - - -class DataPrefetcher: - """ - DataPrefetcher is inspired by code of following file: - https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py - It could speedup your pytorch dataloader. For more information, please check - https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789. - """ - - def __init__(self, loader): - self.loader = iter(loader) - self.stream = torch.cuda.Stream() - self.input_cuda = self._input_cuda_for_image - self.record_stream = DataPrefetcher._record_stream_for_image - self.preload() - - def preload(self): - try: - self.next_input, self.next_target, _, _ = next(self.loader) - except StopIteration: - self.next_input = None - self.next_target = None - return - - with torch.cuda.stream(self.stream): - self.input_cuda() - self.next_target = self.next_target.cuda(non_blocking=True) - - def next(self): - torch.cuda.current_stream().wait_stream(self.stream) - input = self.next_input - target = self.next_target - if input is not None: - self.record_stream(input) - if target is not None: - target.record_stream(torch.cuda.current_stream()) - self.preload() - return input, target - - def _input_cuda_for_image(self): - self.next_input = self.next_input.cuda(non_blocking=True) - - @staticmethod - def _record_stream_for_image(input): - input.record_stream(torch.cuda.current_stream()) diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/summarization/README.md b/spaces/chendl/compositional_test/transformers/examples/pytorch/summarization/README.md deleted file mode 100644 index db7f8f4061a5c98099b2fd2f5c4289b6db548080..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/summarization/README.md +++ /dev/null @@ -1,196 +0,0 @@ - - -## Summarization - -This directory contains examples for finetuning and evaluating transformers on summarization tasks. -Please tag @patil-suraj with any issues/unexpected behaviors, or send a PR! -For deprecated `bertabs` instructions, see [`bertabs/README.md`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/bertabs/README.md). -For the old `finetune_trainer.py` and related utils, see [`examples/legacy/seq2seq`](https://github.com/huggingface/transformers/blob/main/examples/legacy/seq2seq). - -### Supported Architectures - -- `BartForConditionalGeneration` -- `FSMTForConditionalGeneration` (translation only) -- `MBartForConditionalGeneration` -- `MarianMTModel` -- `PegasusForConditionalGeneration` -- `T5ForConditionalGeneration` -- `MT5ForConditionalGeneration` - -`run_summarization.py` is a lightweight example of how to download and preprocess a dataset from the [🤗 Datasets](https://github.com/huggingface/datasets) library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. - -For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets.html#json-files -and you also will find examples of these below. - -## With Trainer - -Here is an example on a summarization task: -```bash -python examples/pytorch/summarization/run_summarization.py \ - --model_name_or_path t5-small \ - --do_train \ - --do_eval \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --overwrite_output_dir \ - --predict_with_generate -``` - -Only T5 models `t5-small`, `t5-base`, `t5-large`, `t5-3b` and `t5-11b` must use an additional argument: `--source_prefix "summarize: "`. - -We used CNN/DailyMail dataset in this example as `t5-small` was trained on it and one can get good scores even when pre-training with a very small sample. - -Extreme Summarization (XSum) Dataset is another commonly used dataset for the task of summarization. To use it replace `--dataset_name cnn_dailymail --dataset_config "3.0.0"` with `--dataset_name xsum`. - -And here is how you would use it on your own files, after adjusting the values for the arguments -`--train_file`, `--validation_file`, `--text_column` and `--summary_column` to match your setup: - -```bash -python examples/pytorch/summarization/run_summarization.py \ - --model_name_or_path t5-small \ - --do_train \ - --do_eval \ - --train_file path_to_csv_or_jsonlines_file \ - --validation_file path_to_csv_or_jsonlines_file \ - --source_prefix "summarize: " \ - --output_dir /tmp/tst-summarization \ - --overwrite_output_dir \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=4 \ - --predict_with_generate -``` - -The task of summarization supports custom CSV and JSONLINES formats. - -#### Custom CSV Files - -If it's a csv file the training and validation files should have a column for the inputs texts and a column for the summaries. - -If the csv file has just two columns as in the following example: - -```csv -text,summary -"I'm sitting here in a boring room. It's just another rainy Sunday afternoon. I'm wasting my time I got nothing to do. I'm hanging around I'm waiting for you. But nothing ever happens. And I wonder","I'm sitting in a room where I'm waiting for something to happen" -"I see trees so green, red roses too. I see them bloom for me and you. And I think to myself what a wonderful world. I see skies so blue and clouds so white. The bright blessed day, the dark sacred night. And I think to myself what a wonderful world.","I'm a gardener and I'm a big fan of flowers." -"Christmas time is here. Happiness and cheer. Fun for all that children call. Their favorite time of the year. Snowflakes in the air. Carols everywhere. Olden times and ancient rhymes. Of love and dreams to share","It's that time of year again." -``` - -The first column is assumed to be for `text` and the second is for summary. - -If the csv file has multiple columns, you can then specify the names of the columns to use: - -```bash - --text_column text_column_name \ - --summary_column summary_column_name \ -``` - -For example if the columns were: - -```csv -id,date,text,summary -``` - -and you wanted to select only `text` and `summary`, then you'd pass these additional arguments: - -```bash - --text_column text \ - --summary_column summary \ -``` - -#### Custom JSONLINES Files - -The second supported format is jsonlines. Here is an example of a jsonlines custom data file. - - -```json -{"text": "I'm sitting here in a boring room. It's just another rainy Sunday afternoon. I'm wasting my time I got nothing to do. I'm hanging around I'm waiting for you. But nothing ever happens. And I wonder", "summary": "I'm sitting in a room where I'm waiting for something to happen"} -{"text": "I see trees so green, red roses too. I see them bloom for me and you. And I think to myself what a wonderful world. I see skies so blue and clouds so white. The bright blessed day, the dark sacred night. And I think to myself what a wonderful world.", "summary": "I'm a gardener and I'm a big fan of flowers."} -{"text": "Christmas time is here. Happiness and cheer. Fun for all that children call. Their favorite time of the year. Snowflakes in the air. Carols everywhere. Olden times and ancient rhymes. Of love and dreams to share", "summary": "It's that time of year again."} -``` - -Same as with the CSV files, by default the first value will be used as the text record and the second as the summary record. Therefore you can use any key names for the entries, in this example `text` and `summary` were used. - -And as with the CSV files, you can specify which values to select from the file, by explicitly specifying the corresponding key names. In our example this again would be: - -```bash - --text_column text \ - --summary_column summary \ -``` - -## With Accelerate - -Based on the script [`run_summarization_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/summarization/run_summarization_no_trainer.py). - -Like `run_summarization.py`, this script allows you to fine-tune any of the models supported on a -summarization task, the main difference is that this -script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. - -It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer -or the dataloaders directly in the script) but still run in a distributed setup, on TPU and supports mixed precision by -the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally -after installing it: - -```bash -pip install git+https://github.com/huggingface/accelerate -``` - -then - -```bash -python run_summarization_no_trainer.py \ - --model_name_or_path t5-small \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir ~/tmp/tst-summarization -``` - -You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run - -```bash -accelerate config -``` - -and reply to the questions asked. Then - -```bash -accelerate test -``` - -that will check everything is ready for training. Finally, you can launch training with - -```bash -accelerate launch run_summarization_no_trainer.py \ - --model_name_or_path t5-small \ - --dataset_name cnn_dailymail \ - --dataset_config "3.0.0" \ - --source_prefix "summarize: " \ - --output_dir ~/tmp/tst-summarization -``` - -This command is the same and will work for: - -- a CPU-only setup -- a setup with one GPU -- a distributed training with several GPUs (single or multi node) -- a training on TPUs - -Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it. diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cffi/cparser.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cffi/cparser.py deleted file mode 100644 index 74830e913f21409f536febddae7769d0364cd24b..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cffi/cparser.py +++ /dev/null @@ -1,1006 +0,0 @@ -from . import model -from .commontypes import COMMON_TYPES, resolve_common_type -from .error import FFIError, CDefError -try: - from . import _pycparser as pycparser -except ImportError: - import pycparser -import weakref, re, sys - -try: - if sys.version_info < (3,): - import thread as _thread - else: - import _thread - lock = _thread.allocate_lock() -except ImportError: - lock = None - -def _workaround_for_static_import_finders(): - # Issue #392: packaging tools like cx_Freeze can not find these - # because pycparser uses exec dynamic import. This is an obscure - # workaround. This function is never called. - import pycparser.yacctab - import pycparser.lextab - -CDEF_SOURCE_STRING = "" -_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", - re.DOTALL | re.MULTILINE) -_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" - r"\b((?:[^\n\\]|\\.)*?)$", - re.DOTALL | re.MULTILINE) -_r_line_directive = re.compile(r"^[ \t]*#[ \t]*(?:line|\d+)\b.*$", re.MULTILINE) -_r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}") -_r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$") -_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") -_r_words = re.compile(r"\w+|\S") -_parser_cache = None -_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) -_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") -_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") -_r_cdecl = re.compile(r"\b__cdecl\b") -_r_extern_python = re.compile(r'\bextern\s*"' - r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') -_r_star_const_space = re.compile( # matches "* const " - r"[*]\s*((const|volatile|restrict)\b\s*)+") -_r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+" - r"\.\.\.") -_r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.") - -def _get_parser(): - global _parser_cache - if _parser_cache is None: - _parser_cache = pycparser.CParser() - return _parser_cache - -def _workaround_for_old_pycparser(csource): - # Workaround for a pycparser issue (fixed between pycparser 2.10 and - # 2.14): "char*const***" gives us a wrong syntax tree, the same as - # for "char***(*const)". This means we can't tell the difference - # afterwards. But "char(*const(***))" gives us the right syntax - # tree. The issue only occurs if there are several stars in - # sequence with no parenthesis inbetween, just possibly qualifiers. - # Attempt to fix it by adding some parentheses in the source: each - # time we see "* const" or "* const *", we add an opening - # parenthesis before each star---the hard part is figuring out where - # to close them. - parts = [] - while True: - match = _r_star_const_space.search(csource) - if not match: - break - #print repr(''.join(parts)+csource), '=>', - parts.append(csource[:match.start()]) - parts.append('('); closing = ')' - parts.append(match.group()) # e.g. "* const " - endpos = match.end() - if csource.startswith('*', endpos): - parts.append('('); closing += ')' - level = 0 - i = endpos - while i < len(csource): - c = csource[i] - if c == '(': - level += 1 - elif c == ')': - if level == 0: - break - level -= 1 - elif c in ',;=': - if level == 0: - break - i += 1 - csource = csource[endpos:i] + closing + csource[i:] - #print repr(''.join(parts)+csource) - parts.append(csource) - return ''.join(parts) - -def _preprocess_extern_python(csource): - # input: `extern "Python" int foo(int);` or - # `extern "Python" { int foo(int); }` - # output: - # void __cffi_extern_python_start; - # int foo(int); - # void __cffi_extern_python_stop; - # - # input: `extern "Python+C" int foo(int);` - # output: - # void __cffi_extern_python_plus_c_start; - # int foo(int); - # void __cffi_extern_python_stop; - parts = [] - while True: - match = _r_extern_python.search(csource) - if not match: - break - endpos = match.end() - 1 - #print - #print ''.join(parts)+csource - #print '=>' - parts.append(csource[:match.start()]) - if 'C' in match.group(1): - parts.append('void __cffi_extern_python_plus_c_start; ') - else: - parts.append('void __cffi_extern_python_start; ') - if csource[endpos] == '{': - # grouping variant - closing = csource.find('}', endpos) - if closing < 0: - raise CDefError("'extern \"Python\" {': no '}' found") - if csource.find('{', endpos + 1, closing) >= 0: - raise NotImplementedError("cannot use { } inside a block " - "'extern \"Python\" { ... }'") - parts.append(csource[endpos+1:closing]) - csource = csource[closing+1:] - else: - # non-grouping variant - semicolon = csource.find(';', endpos) - if semicolon < 0: - raise CDefError("'extern \"Python\": no ';' found") - parts.append(csource[endpos:semicolon+1]) - csource = csource[semicolon+1:] - parts.append(' void __cffi_extern_python_stop;') - #print ''.join(parts)+csource - #print - parts.append(csource) - return ''.join(parts) - -def _warn_for_string_literal(csource): - if '"' not in csource: - return - for line in csource.splitlines(): - if '"' in line and not line.lstrip().startswith('#'): - import warnings - warnings.warn("String literal found in cdef() or type source. " - "String literals are ignored here, but you should " - "remove them anyway because some character sequences " - "confuse pre-parsing.") - break - -def _warn_for_non_extern_non_static_global_variable(decl): - if not decl.storage: - import warnings - warnings.warn("Global variable '%s' in cdef(): for consistency " - "with C it should have a storage class specifier " - "(usually 'extern')" % (decl.name,)) - -def _remove_line_directives(csource): - # _r_line_directive matches whole lines, without the final \n, if they - # start with '#line' with some spacing allowed, or '#NUMBER'. This - # function stores them away and replaces them with exactly the string - # '#line@N', where N is the index in the list 'line_directives'. - line_directives = [] - def replace(m): - i = len(line_directives) - line_directives.append(m.group()) - return '#line@%d' % i - csource = _r_line_directive.sub(replace, csource) - return csource, line_directives - -def _put_back_line_directives(csource, line_directives): - def replace(m): - s = m.group() - if not s.startswith('#line@'): - raise AssertionError("unexpected #line directive " - "(should have been processed and removed") - return line_directives[int(s[6:])] - return _r_line_directive.sub(replace, csource) - -def _preprocess(csource): - # First, remove the lines of the form '#line N "filename"' because - # the "filename" part could confuse the rest - csource, line_directives = _remove_line_directives(csource) - # Remove comments. NOTE: this only work because the cdef() section - # should not contain any string literals (except in line directives)! - def replace_keeping_newlines(m): - return ' ' + m.group().count('\n') * '\n' - csource = _r_comment.sub(replace_keeping_newlines, csource) - # Remove the "#define FOO x" lines - macros = {} - for match in _r_define.finditer(csource): - macroname, macrovalue = match.groups() - macrovalue = macrovalue.replace('\\\n', '').strip() - macros[macroname] = macrovalue - csource = _r_define.sub('', csource) - # - if pycparser.__version__ < '2.14': - csource = _workaround_for_old_pycparser(csource) - # - # BIG HACK: replace WINAPI or __stdcall with "volatile const". - # It doesn't make sense for the return type of a function to be - # "volatile volatile const", so we abuse it to detect __stdcall... - # Hack number 2 is that "int(volatile *fptr)();" is not valid C - # syntax, so we place the "volatile" before the opening parenthesis. - csource = _r_stdcall2.sub(' volatile volatile const(', csource) - csource = _r_stdcall1.sub(' volatile volatile const ', csource) - csource = _r_cdecl.sub(' ', csource) - # - # Replace `extern "Python"` with start/end markers - csource = _preprocess_extern_python(csource) - # - # Now there should not be any string literal left; warn if we get one - _warn_for_string_literal(csource) - # - # Replace "[...]" with "[__dotdotdotarray__]" - csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) - # - # Replace "...}" with "__dotdotdotNUM__}". This construction should - # occur only at the end of enums; at the end of structs we have "...;}" - # and at the end of vararg functions "...);". Also replace "=...[,}]" - # with ",__dotdotdotNUM__[,}]": this occurs in the enums too, when - # giving an unknown value. - matches = list(_r_partial_enum.finditer(csource)) - for number, match in enumerate(reversed(matches)): - p = match.start() - if csource[p] == '=': - p2 = csource.find('...', p, match.end()) - assert p2 > p - csource = '%s,__dotdotdot%d__ %s' % (csource[:p], number, - csource[p2+3:]) - else: - assert csource[p:p+3] == '...' - csource = '%s __dotdotdot%d__ %s' % (csource[:p], number, - csource[p+3:]) - # Replace "int ..." or "unsigned long int..." with "__dotdotdotint__" - csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource) - # Replace "float ..." or "double..." with "__dotdotdotfloat__" - csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource) - # Replace all remaining "..." with the same name, "__dotdotdot__", - # which is declared with a typedef for the purpose of C parsing. - csource = csource.replace('...', ' __dotdotdot__ ') - # Finally, put back the line directives - csource = _put_back_line_directives(csource, line_directives) - return csource, macros - -def _common_type_names(csource): - # Look in the source for what looks like usages of types from the - # list of common types. A "usage" is approximated here as the - # appearance of the word, minus a "definition" of the type, which - # is the last word in a "typedef" statement. Approximative only - # but should be fine for all the common types. - look_for_words = set(COMMON_TYPES) - look_for_words.add(';') - look_for_words.add(',') - look_for_words.add('(') - look_for_words.add(')') - look_for_words.add('typedef') - words_used = set() - is_typedef = False - paren = 0 - previous_word = '' - for word in _r_words.findall(csource): - if word in look_for_words: - if word == ';': - if is_typedef: - words_used.discard(previous_word) - look_for_words.discard(previous_word) - is_typedef = False - elif word == 'typedef': - is_typedef = True - paren = 0 - elif word == '(': - paren += 1 - elif word == ')': - paren -= 1 - elif word == ',': - if is_typedef and paren == 0: - words_used.discard(previous_word) - look_for_words.discard(previous_word) - else: # word in COMMON_TYPES - words_used.add(word) - previous_word = word - return words_used - - -class Parser(object): - - def __init__(self): - self._declarations = {} - self._included_declarations = set() - self._anonymous_counter = 0 - self._structnode2type = weakref.WeakKeyDictionary() - self._options = {} - self._int_constants = {} - self._recomplete = [] - self._uses_new_feature = None - - def _parse(self, csource): - csource, macros = _preprocess(csource) - # XXX: for more efficiency we would need to poke into the - # internals of CParser... the following registers the - # typedefs, because their presence or absence influences the - # parsing itself (but what they are typedef'ed to plays no role) - ctn = _common_type_names(csource) - typenames = [] - for name in sorted(self._declarations): - if name.startswith('typedef '): - name = name[8:] - typenames.append(name) - ctn.discard(name) - typenames += sorted(ctn) - # - csourcelines = [] - csourcelines.append('# 1 ""') - for typename in typenames: - csourcelines.append('typedef int %s;' % typename) - csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,' - ' __dotdotdot__;') - # this forces pycparser to consider the following in the file - # called from line 1 - csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,)) - csourcelines.append(csource) - fullcsource = '\n'.join(csourcelines) - if lock is not None: - lock.acquire() # pycparser is not thread-safe... - try: - ast = _get_parser().parse(fullcsource) - except pycparser.c_parser.ParseError as e: - self.convert_pycparser_error(e, csource) - finally: - if lock is not None: - lock.release() - # csource will be used to find buggy source text - return ast, macros, csource - - def _convert_pycparser_error(self, e, csource): - # xxx look for ":NUM:" at the start of str(e) - # and interpret that as a line number. This will not work if - # the user gives explicit ``# NUM "FILE"`` directives. - line = None - msg = str(e) - match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg) - if match: - linenum = int(match.group(1), 10) - csourcelines = csource.splitlines() - if 1 <= linenum <= len(csourcelines): - line = csourcelines[linenum-1] - return line - - def convert_pycparser_error(self, e, csource): - line = self._convert_pycparser_error(e, csource) - - msg = str(e) - if line: - msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) - else: - msg = 'parse error\n%s' % (msg,) - raise CDefError(msg) - - def parse(self, csource, override=False, packed=False, pack=None, - dllexport=False): - if packed: - if packed != True: - raise ValueError("'packed' should be False or True; use " - "'pack' to give another value") - if pack: - raise ValueError("cannot give both 'pack' and 'packed'") - pack = 1 - elif pack: - if pack & (pack - 1): - raise ValueError("'pack' must be a power of two, not %r" % - (pack,)) - else: - pack = 0 - prev_options = self._options - try: - self._options = {'override': override, - 'packed': pack, - 'dllexport': dllexport} - self._internal_parse(csource) - finally: - self._options = prev_options - - def _internal_parse(self, csource): - ast, macros, csource = self._parse(csource) - # add the macros - self._process_macros(macros) - # find the first "__dotdotdot__" and use that as a separator - # between the repeated typedefs and the real csource - iterator = iter(ast.ext) - for decl in iterator: - if decl.name == '__dotdotdot__': - break - else: - assert 0 - current_decl = None - # - try: - self._inside_extern_python = '__cffi_extern_python_stop' - for decl in iterator: - current_decl = decl - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise CDefError("typedef does not declare any name", - decl) - quals = 0 - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and - decl.type.type.names[-1].startswith('__dotdotdot')): - realtype = self._get_unknown_type(decl) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names[-1].startswith('__dotdotdot')): - realtype = self._get_unknown_ptr_type(decl) - else: - realtype, quals = self._get_type_and_quals( - decl.type, name=decl.name, partial_length_ok=True, - typedef_example="*(%s *)0" % (decl.name,)) - self._declare('typedef ' + decl.name, realtype, quals=quals) - elif decl.__class__.__name__ == 'Pragma': - pass # skip pragma, only in pycparser 2.15 - else: - raise CDefError("unexpected <%s>: this construct is valid " - "C but not valid in cdef()" % - decl.__class__.__name__, decl) - except CDefError as e: - if len(e.args) == 1: - e.args = e.args + (current_decl,) - raise - except FFIError as e: - msg = self._convert_pycparser_error(e, csource) - if msg: - e.args = (e.args[0] + "\n *** Err: %s" % msg,) - raise - - def _add_constants(self, key, val): - if key in self._int_constants: - if self._int_constants[key] == val: - return # ignore identical double declarations - raise FFIError( - "multiple declarations of constant: %s" % (key,)) - self._int_constants[key] = val - - def _add_integer_constant(self, name, int_str): - int_str = int_str.lower().rstrip("ul") - neg = int_str.startswith('-') - if neg: - int_str = int_str[1:] - # "010" is not valid oct in py3 - if (int_str.startswith("0") and int_str != '0' - and not int_str.startswith("0x")): - int_str = "0o" + int_str[1:] - pyvalue = int(int_str, 0) - if neg: - pyvalue = -pyvalue - self._add_constants(name, pyvalue) - self._declare('macro ' + name, pyvalue) - - def _process_macros(self, macros): - for key, value in macros.items(): - value = value.strip() - if _r_int_literal.match(value): - self._add_integer_constant(key, value) - elif value == '...': - self._declare('macro ' + key, value) - else: - raise CDefError( - 'only supports one of the following syntax:\n' - ' #define %s ... (literally dot-dot-dot)\n' - ' #define %s NUMBER (with NUMBER an integer' - ' constant, decimal/hex/octal)\n' - 'got:\n' - ' #define %s %s' - % (key, key, key, value)) - - def _declare_function(self, tp, quals, decl): - tp = self._get_type_pointer(tp, quals) - if self._options.get('dllexport'): - tag = 'dllexport_python ' - elif self._inside_extern_python == '__cffi_extern_python_start': - tag = 'extern_python ' - elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': - tag = 'extern_python_plus_c ' - else: - tag = 'function ' - self._declare(tag + decl.name, tp) - - def _parse_decl(self, decl): - node = decl.type - if isinstance(node, pycparser.c_ast.FuncDecl): - tp, quals = self._get_type_and_quals(node, name=decl.name) - assert isinstance(tp, model.RawFunctionType) - self._declare_function(tp, quals, decl) - else: - if isinstance(node, pycparser.c_ast.Struct): - self._get_struct_union_enum_type('struct', node) - elif isinstance(node, pycparser.c_ast.Union): - self._get_struct_union_enum_type('union', node) - elif isinstance(node, pycparser.c_ast.Enum): - self._get_struct_union_enum_type('enum', node) - elif not decl.name: - raise CDefError("construct does not declare any variable", - decl) - # - if decl.name: - tp, quals = self._get_type_and_quals(node, - partial_length_ok=True) - if tp.is_raw_function: - self._declare_function(tp, quals, decl) - elif (tp.is_integer_type() and - hasattr(decl, 'init') and - hasattr(decl.init, 'value') and - _r_int_literal.match(decl.init.value)): - self._add_integer_constant(decl.name, decl.init.value) - elif (tp.is_integer_type() and - isinstance(decl.init, pycparser.c_ast.UnaryOp) and - decl.init.op == '-' and - hasattr(decl.init.expr, 'value') and - _r_int_literal.match(decl.init.expr.value)): - self._add_integer_constant(decl.name, - '-' + decl.init.expr.value) - elif (tp is model.void_type and - decl.name.startswith('__cffi_extern_python_')): - # hack: `extern "Python"` in the C source is replaced - # with "void __cffi_extern_python_start;" and - # "void __cffi_extern_python_stop;" - self._inside_extern_python = decl.name - else: - if self._inside_extern_python !='__cffi_extern_python_stop': - raise CDefError( - "cannot declare constants or " - "variables with 'extern \"Python\"'") - if (quals & model.Q_CONST) and not tp.is_array_type: - self._declare('constant ' + decl.name, tp, quals=quals) - else: - _warn_for_non_extern_non_static_global_variable(decl) - self._declare('variable ' + decl.name, tp, quals=quals) - - def parse_type(self, cdecl): - return self.parse_type_and_quals(cdecl)[0] - - def parse_type_and_quals(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] - assert not macros - exprnode = ast.ext[-1].type.args.params[0] - if isinstance(exprnode, pycparser.c_ast.ID): - raise CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type_and_quals(exprnode.type) - - def _declare(self, name, obj, included=False, quals=0): - if name in self._declarations: - prevobj, prevquals = self._declarations[name] - if prevobj is obj and prevquals == quals: - return - if not self._options.get('override'): - raise FFIError( - "multiple declarations of %s (for interactive usage, " - "try cdef(xx, override=True))" % (name,)) - assert '__dotdotdot__' not in name.split() - self._declarations[name] = (obj, quals) - if included: - self._included_declarations.add(obj) - - def _extract_quals(self, type): - quals = 0 - if isinstance(type, (pycparser.c_ast.TypeDecl, - pycparser.c_ast.PtrDecl)): - if 'const' in type.quals: - quals |= model.Q_CONST - if 'volatile' in type.quals: - quals |= model.Q_VOLATILE - if 'restrict' in type.quals: - quals |= model.Q_RESTRICT - return quals - - def _get_type_pointer(self, type, quals, declname=None): - if isinstance(type, model.RawFunctionType): - return type.as_function_pointer() - if (isinstance(type, model.StructOrUnionOrEnum) and - type.name.startswith('$') and type.name[1:].isdigit() and - type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname, quals) - return model.PointerType(type, quals) - - def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False, - typedef_example=None): - # first, dereference typedefs, if we have it already parsed, we're good - if (isinstance(typenode, pycparser.c_ast.TypeDecl) and - isinstance(typenode.type, pycparser.c_ast.IdentifierType) and - len(typenode.type.names) == 1 and - ('typedef ' + typenode.type.names[0]) in self._declarations): - tp, quals = self._declarations['typedef ' + typenode.type.names[0]] - quals |= self._extract_quals(typenode) - return tp, quals - # - if isinstance(typenode, pycparser.c_ast.ArrayDecl): - # array type - if typenode.dim is None: - length = None - else: - length = self._parse_constant( - typenode.dim, partial_length_ok=partial_length_ok) - # a hack: in 'typedef int foo_t[...][...];', don't use '...' as - # the length but use directly the C expression that would be - # generated by recompiler.py. This lets the typedef be used in - # many more places within recompiler.py - if typedef_example is not None: - if length == '...': - length = '_cffi_array_len(%s)' % (typedef_example,) - typedef_example = "*" + typedef_example - # - tp, quals = self._get_type_and_quals(typenode.type, - partial_length_ok=partial_length_ok, - typedef_example=typedef_example) - return model.ArrayType(tp, length), quals - # - if isinstance(typenode, pycparser.c_ast.PtrDecl): - # pointer type - itemtype, itemquals = self._get_type_and_quals(typenode.type) - tp = self._get_type_pointer(itemtype, itemquals, declname=name) - quals = self._extract_quals(typenode) - return tp, quals - # - if isinstance(typenode, pycparser.c_ast.TypeDecl): - quals = self._extract_quals(typenode) - type = typenode.type - if isinstance(type, pycparser.c_ast.IdentifierType): - # assume a primitive type. get it from .names, but reduce - # synonyms to a single chosen combination - names = list(type.names) - if names != ['signed', 'char']: # keep this unmodified - prefixes = {} - while names: - name = names[0] - if name in ('short', 'long', 'signed', 'unsigned'): - prefixes[name] = prefixes.get(name, 0) + 1 - del names[0] - else: - break - # ignore the 'signed' prefix below, and reorder the others - newnames = [] - for prefix in ('unsigned', 'short', 'long'): - for i in range(prefixes.get(prefix, 0)): - newnames.append(prefix) - if not names: - names = ['int'] # implicitly - if names == ['int']: # but kill it if 'short' or 'long' - if 'short' in prefixes or 'long' in prefixes: - names = [] - names = newnames + names - ident = ' '.join(names) - if ident == 'void': - return model.void_type, quals - if ident == '__dotdotdot__': - raise FFIError(':%d: bad usage of "..."' % - typenode.coord.line) - tp0, quals0 = resolve_common_type(self, ident) - return tp0, (quals | quals0) - # - if isinstance(type, pycparser.c_ast.Struct): - # 'struct foobar' - tp = self._get_struct_union_enum_type('struct', type, name) - return tp, quals - # - if isinstance(type, pycparser.c_ast.Union): - # 'union foobar' - tp = self._get_struct_union_enum_type('union', type, name) - return tp, quals - # - if isinstance(type, pycparser.c_ast.Enum): - # 'enum foobar' - tp = self._get_struct_union_enum_type('enum', type, name) - return tp, quals - # - if isinstance(typenode, pycparser.c_ast.FuncDecl): - # a function type - return self._parse_function_type(typenode, name), 0 - # - # nested anonymous structs or unions end up here - if isinstance(typenode, pycparser.c_ast.Struct): - return self._get_struct_union_enum_type('struct', typenode, name, - nested=True), 0 - if isinstance(typenode, pycparser.c_ast.Union): - return self._get_struct_union_enum_type('union', typenode, name, - nested=True), 0 - # - raise FFIError(":%d: bad or unsupported type declaration" % - typenode.coord.line) - - def _parse_function_type(self, typenode, funcname=None): - params = list(getattr(typenode.args, 'params', [])) - for i, arg in enumerate(params): - if not hasattr(arg, 'type'): - raise CDefError("%s arg %d: unknown type '%s'" - " (if you meant to use the old C syntax of giving" - " untyped arguments, it is not supported)" - % (funcname or 'in expression', i + 1, - getattr(arg, 'name', '?'))) - ellipsis = ( - len(params) > 0 and - isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and - isinstance(params[-1].type.type, - pycparser.c_ast.IdentifierType) and - params[-1].type.type.names == ['__dotdotdot__']) - if ellipsis: - params.pop() - if not params: - raise CDefError( - "%s: a function with only '(...)' as argument" - " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) - for argdeclnode in params] - if not ellipsis and args == [model.void_type]: - args = [] - result, quals = self._get_type_and_quals(typenode.type) - # the 'quals' on the result type are ignored. HACK: we absure them - # to detect __stdcall functions: we textually replace "__stdcall" - # with "volatile volatile const" above. - abi = None - if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway - if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: - abi = '__stdcall' - return model.RawFunctionType(tuple(args), result, ellipsis, abi) - - def _as_func_arg(self, type, quals): - if isinstance(type, model.ArrayType): - return model.PointerType(type.item, quals) - elif isinstance(type, model.RawFunctionType): - return type.as_function_pointer() - else: - return type - - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): - # First, a level of caching on the exact 'type' node of the AST. - # This is obscure, but needed because pycparser "unrolls" declarations - # such as "typedef struct { } foo_t, *foo_p" and we end up with - # an AST that is not a tree, but a DAG, with the "type" node of the - # two branches foo_t and foo_p of the trees being the same node. - # It's a bit silly but detecting "DAG-ness" in the AST tree seems - # to be the only way to distinguish this case from two independent - # structs. See test_struct_with_two_usages. - try: - return self._structnode2type[type] - except KeyError: - pass - # - # Note that this must handle parsing "struct foo" any number of - # times and always return the same StructType object. Additionally, - # one of these times (not necessarily the first), the fields of - # the struct can be specified with "struct foo { ...fields... }". - # If no name is given, then we have to create a new anonymous struct - # with no caching; in this case, the fields are either specified - # right now or never. - # - force_name = name - name = type.name - # - # get the type or create it if needed - if name is None: - # 'force_name' is used to guess a more readable name for - # anonymous structs, for the common case "typedef struct { } foo". - if force_name is not None: - explicit_name = '$%s' % force_name - else: - self._anonymous_counter += 1 - explicit_name = '$%d' % self._anonymous_counter - tp = None - else: - explicit_name = name - key = '%s %s' % (kind, name) - tp, _ = self._declarations.get(key, (None, None)) - # - if tp is None: - if kind == 'struct': - tp = model.StructType(explicit_name, None, None, None) - elif kind == 'union': - tp = model.UnionType(explicit_name, None, None, None) - elif kind == 'enum': - if explicit_name == '__dotdotdot__': - raise CDefError("Enums cannot be declared with ...") - tp = self._build_enum_type(explicit_name, type.values) - else: - raise AssertionError("kind = %r" % (kind,)) - if name is not None: - self._declare(key, tp) - else: - if kind == 'enum' and type.values is not None: - raise NotImplementedError( - "enum %s: the '{}' declaration should appear on the first " - "time the enum is mentioned, not later" % explicit_name) - if not tp.forcename: - tp.force_the_name(force_name) - if tp.forcename and '$' in tp.name: - self._declare('anonymous %s' % tp.forcename, tp) - # - self._structnode2type[type] = tp - # - # enums: done here - if kind == 'enum': - return tp - # - # is there a 'type.decls'? If yes, then this is the place in the - # C sources that declare the fields. If no, then just return the - # existing type, possibly still incomplete. - if type.decls is None: - return tp - # - if tp.fldnames is not None: - raise CDefError("duplicate declaration of struct %s" % name) - fldnames = [] - fldtypes = [] - fldbitsize = [] - fldquals = [] - for decl in type.decls: - if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and - ''.join(decl.type.names) == '__dotdotdot__'): - # XXX pycparser is inconsistent: 'names' should be a list - # of strings, but is sometimes just one string. Use - # str.join() as a way to cope with both. - self._make_partial(tp, nested) - continue - if decl.bitsize is None: - bitsize = -1 - else: - bitsize = self._parse_constant(decl.bitsize) - self._partial_length = False - type, fqual = self._get_type_and_quals(decl.type, - partial_length_ok=True) - if self._partial_length: - self._make_partial(tp, nested) - if isinstance(type, model.StructType) and type.partial: - self._make_partial(tp, nested) - fldnames.append(decl.name or '') - fldtypes.append(type) - fldbitsize.append(bitsize) - fldquals.append(fqual) - tp.fldnames = tuple(fldnames) - tp.fldtypes = tuple(fldtypes) - tp.fldbitsize = tuple(fldbitsize) - tp.fldquals = tuple(fldquals) - if fldbitsize != [-1] * len(fldbitsize): - if isinstance(tp, model.StructType) and tp.partial: - raise NotImplementedError("%s: using both bitfields and '...;'" - % (tp,)) - tp.packed = self._options.get('packed') - if tp.completed: # must be re-completed: it is not opaque any more - tp.completed = 0 - self._recomplete.append(tp) - return tp - - def _make_partial(self, tp, nested): - if not isinstance(tp, model.StructOrUnion): - raise CDefError("%s cannot be partial" % (tp,)) - if not tp.has_c_name() and not nested: - raise NotImplementedError("%s is partial but has no C name" %(tp,)) - tp.partial = True - - def _parse_constant(self, exprnode, partial_length_ok=False): - # for now, limited to expressions that are an immediate number - # or positive/negative number - if isinstance(exprnode, pycparser.c_ast.Constant): - s = exprnode.value - if '0' <= s[0] <= '9': - s = s.rstrip('uUlL') - try: - if s.startswith('0'): - return int(s, 8) - else: - return int(s, 10) - except ValueError: - if len(s) > 1: - if s.lower()[0:2] == '0x': - return int(s, 16) - elif s.lower()[0:2] == '0b': - return int(s, 2) - raise CDefError("invalid constant %r" % (s,)) - elif s[0] == "'" and s[-1] == "'" and ( - len(s) == 3 or (len(s) == 4 and s[1] == "\\")): - return ord(s[-2]) - else: - raise CDefError("invalid constant %r" % (s,)) - # - if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and - exprnode.op == '+'): - return self._parse_constant(exprnode.expr) - # - if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and - exprnode.op == '-'): - return -self._parse_constant(exprnode.expr) - # load previously defined int constant - if (isinstance(exprnode, pycparser.c_ast.ID) and - exprnode.name in self._int_constants): - return self._int_constants[exprnode.name] - # - if (isinstance(exprnode, pycparser.c_ast.ID) and - exprnode.name == '__dotdotdotarray__'): - if partial_length_ok: - self._partial_length = True - return '...' - raise FFIError(":%d: unsupported '[...]' here, cannot derive " - "the actual array length in this context" - % exprnode.coord.line) - # - if isinstance(exprnode, pycparser.c_ast.BinaryOp): - left = self._parse_constant(exprnode.left) - right = self._parse_constant(exprnode.right) - if exprnode.op == '+': - return left + right - elif exprnode.op == '-': - return left - right - elif exprnode.op == '*': - return left * right - elif exprnode.op == '/': - return self._c_div(left, right) - elif exprnode.op == '%': - return left - self._c_div(left, right) * right - elif exprnode.op == '<<': - return left << right - elif exprnode.op == '>>': - return left >> right - elif exprnode.op == '&': - return left & right - elif exprnode.op == '|': - return left | right - elif exprnode.op == '^': - return left ^ right - # - raise FFIError(":%d: unsupported expression: expected a " - "simple numeric constant" % exprnode.coord.line) - - def _c_div(self, a, b): - result = a // b - if ((a < 0) ^ (b < 0)) and (a % b) != 0: - result += 1 - return result - - def _build_enum_type(self, explicit_name, decls): - if decls is not None: - partial = False - enumerators = [] - enumvalues = [] - nextenumvalue = 0 - for enum in decls.enumerators: - if _r_enum_dotdotdot.match(enum.name): - partial = True - continue - if enum.value is not None: - nextenumvalue = self._parse_constant(enum.value) - enumerators.append(enum.name) - enumvalues.append(nextenumvalue) - self._add_constants(enum.name, nextenumvalue) - nextenumvalue += 1 - enumerators = tuple(enumerators) - enumvalues = tuple(enumvalues) - tp = model.EnumType(explicit_name, enumerators, enumvalues) - tp.partial = partial - else: # opaque enum - tp = model.EnumType(explicit_name, (), ()) - return tp - - def include(self, other): - for name, (tp, quals) in other._declarations.items(): - if name.startswith('anonymous $enum_$'): - continue # fix for test_anonymous_enum_include - kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): - self._declare(name, tp, included=True, quals=quals) - for k, v in other._int_constants.items(): - self._add_constants(k, v) - - def _get_unknown_type(self, decl): - typenames = decl.type.type.names - if typenames == ['__dotdotdot__']: - return model.unknown_type(decl.name) - - if typenames == ['__dotdotdotint__']: - if self._uses_new_feature is None: - self._uses_new_feature = "'typedef int... %s'" % decl.name - return model.UnknownIntegerType(decl.name) - - if typenames == ['__dotdotdotfloat__']: - # note: not for 'long double' so far - if self._uses_new_feature is None: - self._uses_new_feature = "'typedef float... %s'" % decl.name - return model.UnknownFloatType(decl.name) - - raise FFIError(':%d: unsupported usage of "..." in typedef' - % decl.coord.line) - - def _get_unknown_ptr_type(self, decl): - if decl.type.type.type.names == ['__dotdotdot__']: - return model.unknown_ptr_type(decl.name) - raise FFIError(':%d: unsupported usage of "..." in typedef' - % decl.coord.line) diff --git a/spaces/cihyFjudo/fairness-paper-search/Autocad 2012 Portable Free Download 103 A Powerful and Lightweight CAD Software.md b/spaces/cihyFjudo/fairness-paper-search/Autocad 2012 Portable Free Download 103 A Powerful and Lightweight CAD Software.md deleted file mode 100644 index 3bdd9d7b3e97758301fadff4d23241ed2e453615..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Autocad 2012 Portable Free Download 103 A Powerful and Lightweight CAD Software.md +++ /dev/null @@ -1,6 +0,0 @@ -

CafeSuite 3.60.0 Crack


DOWNLOADhttps://tinurli.com/2uwiUx



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Explore Visual Literacy with Judith Wilde Pdf 85 Creative Solutions to Design Problems.md b/spaces/cihyFjudo/fairness-paper-search/Explore Visual Literacy with Judith Wilde Pdf 85 Creative Solutions to Design Problems.md deleted file mode 100644 index ae5e86418c50c894f1ae8612acd071fa21bd1fc7..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Explore Visual Literacy with Judith Wilde Pdf 85 Creative Solutions to Design Problems.md +++ /dev/null @@ -1,6 +0,0 @@ -

Visual Literacy Judith Wilde Pdf 85


Download Zip ►►►►► https://tinurli.com/2uwiR6



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/cleanmaster/so-vits-svc-akagi/resample.py b/spaces/cleanmaster/so-vits-svc-akagi/resample.py deleted file mode 100644 index d14abca73a2badd4798e30165636eb3db8f92127..0000000000000000000000000000000000000000 --- a/spaces/cleanmaster/so-vits-svc-akagi/resample.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - # speaker 's5', 'p280', 'p315' are excluded, - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, None) - wav, _ = librosa.effects.trim(wav, top_db=20) - peak = np.abs(wav).max() - if peak > 1.0: - wav = 0.98 * wav / peak - wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2) - wav2 /= max(wav2.max(), -wav2.min()) - save_name = wav_name - save_path2 = os.path.join(args.out_dir2, speaker, save_name) - wavfile.write( - save_path2, - args.sr2, - (wav2 * np.iinfo(np.int16).max).astype(np.int16) - ) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr2", type=int, default=32000, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir") - parser.add_argument("--out_dir2", type=str, default="./dataset/32k", help="path to target dir") - args = parser.parse_args() - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/_backends/_trio.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/_backends/_trio.py deleted file mode 100644 index cf2894350952e1169a6c77ea7c767e892f3efc1e..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/anyio/_backends/_trio.py +++ /dev/null @@ -1,996 +0,0 @@ -from __future__ import annotations - -import array -import math -import socket -from concurrent.futures import Future -from contextvars import copy_context -from dataclasses import dataclass -from functools import partial -from io import IOBase -from os import PathLike -from signal import Signals -from types import TracebackType -from typing import ( - IO, - TYPE_CHECKING, - Any, - AsyncGenerator, - AsyncIterator, - Awaitable, - Callable, - Collection, - Coroutine, - Generic, - Iterable, - Mapping, - NoReturn, - Sequence, - TypeVar, - cast, -) - -import sniffio -import trio.from_thread -from outcome import Error, Outcome, Value -from trio.socket import SocketType as TrioSocketType -from trio.to_thread import run_sync - -from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc -from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable -from .._core._eventloop import claim_worker_thread -from .._core._exceptions import ( - BrokenResourceError, - BusyResourceError, - ClosedResourceError, - EndOfStream, -) -from .._core._exceptions import ExceptionGroup as BaseExceptionGroup -from .._core._sockets import convert_ipv6_sockaddr -from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter -from .._core._synchronization import Event as BaseEvent -from .._core._synchronization import ResourceGuard -from .._core._tasks import CancelScope as BaseCancelScope -from ..abc import IPSockAddrType, UDPPacketType - -if TYPE_CHECKING: - from trio_typing import TaskStatus - -try: - from trio import lowlevel as trio_lowlevel -except ImportError: - from trio import hazmat as trio_lowlevel # type: ignore[no-redef] - from trio.hazmat import wait_readable, wait_writable -else: - from trio.lowlevel import wait_readable, wait_writable - -try: - trio_open_process = trio_lowlevel.open_process -except AttributeError: - # isort: off - from trio import ( # type: ignore[attr-defined, no-redef] - open_process as trio_open_process, - ) - -T_Retval = TypeVar("T_Retval") -T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType) - - -# -# Event loop -# - -run = trio.run -current_token = trio.lowlevel.current_trio_token -RunVar = trio.lowlevel.RunVar - - -# -# Miscellaneous -# - -sleep = trio.sleep - - -# -# Timeouts and cancellation -# - - -class CancelScope(BaseCancelScope): - def __new__( - cls, original: trio.CancelScope | None = None, **kwargs: object - ) -> CancelScope: - return object.__new__(cls) - - def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None: - self.__original = original or trio.CancelScope(**kwargs) - - def __enter__(self) -> CancelScope: - self.__original.__enter__() - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - # https://github.com/python-trio/trio-typing/pull/79 - return self.__original.__exit__( # type: ignore[func-returns-value] - exc_type, exc_val, exc_tb - ) - - def cancel(self) -> DeprecatedAwaitable: - self.__original.cancel() - return DeprecatedAwaitable(self.cancel) - - @property - def deadline(self) -> float: - return self.__original.deadline - - @deadline.setter - def deadline(self, value: float) -> None: - self.__original.deadline = value - - @property - def cancel_called(self) -> bool: - return self.__original.cancel_called - - @property - def shield(self) -> bool: - return self.__original.shield - - @shield.setter - def shield(self, value: bool) -> None: - self.__original.shield = value - - -CancelledError = trio.Cancelled -checkpoint = trio.lowlevel.checkpoint -checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled -cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint -current_effective_deadline = trio.current_effective_deadline -current_time = trio.current_time - - -# -# Task groups -# - - -class ExceptionGroup(BaseExceptionGroup, trio.MultiError): - pass - - -class TaskGroup(abc.TaskGroup): - def __init__(self) -> None: - self._active = False - self._nursery_manager = trio.open_nursery() - self.cancel_scope = None # type: ignore[assignment] - - async def __aenter__(self) -> TaskGroup: - self._active = True - self._nursery = await self._nursery_manager.__aenter__() - self.cancel_scope = CancelScope(self._nursery.cancel_scope) - return self - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - try: - return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) - except trio.MultiError as exc: - raise ExceptionGroup(exc.exceptions) from None - finally: - self._active = False - - def start_soon( - self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None - ) -> None: - if not self._active: - raise RuntimeError( - "This task group is not active; no new tasks can be started." - ) - - self._nursery.start_soon(func, *args, name=name) - - async def start( - self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None - ) -> object: - if not self._active: - raise RuntimeError( - "This task group is not active; no new tasks can be started." - ) - - return await self._nursery.start(func, *args, name=name) - - -# -# Threads -# - - -async def run_sync_in_worker_thread( - func: Callable[..., T_Retval], - *args: object, - cancellable: bool = False, - limiter: trio.CapacityLimiter | None = None, -) -> T_Retval: - def wrapper() -> T_Retval: - with claim_worker_thread("trio"): - return func(*args) - - # TODO: remove explicit context copying when trio 0.20 is the minimum requirement - context = copy_context() - context.run(sniffio.current_async_library_cvar.set, None) - return await run_sync( - context.run, wrapper, cancellable=cancellable, limiter=limiter - ) - - -# TODO: remove this workaround when trio 0.20 is the minimum requirement -def run_async_from_thread( - fn: Callable[..., Awaitable[T_Retval]], *args: Any -) -> T_Retval: - async def wrapper() -> T_Retval: - retval: T_Retval - - async def inner() -> None: - nonlocal retval - __tracebackhide__ = True - retval = await fn(*args) - - async with trio.open_nursery() as n: - context.run(n.start_soon, inner) - - __tracebackhide__ = True - return retval # noqa: F821 - - context = copy_context() - context.run(sniffio.current_async_library_cvar.set, "trio") - return trio.from_thread.run(wrapper) - - -def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval: - # TODO: remove explicit context copying when trio 0.20 is the minimum requirement - retval = trio.from_thread.run_sync(copy_context().run, fn, *args) - return cast(T_Retval, retval) - - -class BlockingPortal(abc.BlockingPortal): - def __new__(cls) -> BlockingPortal: - return object.__new__(cls) - - def __init__(self) -> None: - super().__init__() - self._token = trio.lowlevel.current_trio_token() - - def _spawn_task_from_thread( - self, - func: Callable, - args: tuple, - kwargs: dict[str, Any], - name: object, - future: Future, - ) -> None: - context = copy_context() - context.run(sniffio.current_async_library_cvar.set, "trio") - trio.from_thread.run_sync( - context.run, - partial(self._task_group.start_soon, name=name), - self._call_func, - func, - args, - kwargs, - future, - trio_token=self._token, - ) - - -# -# Subprocesses -# - - -@dataclass(eq=False) -class ReceiveStreamWrapper(abc.ByteReceiveStream): - _stream: trio.abc.ReceiveStream - - async def receive(self, max_bytes: int | None = None) -> bytes: - try: - data = await self._stream.receive_some(max_bytes) - except trio.ClosedResourceError as exc: - raise ClosedResourceError from exc.__cause__ - except trio.BrokenResourceError as exc: - raise BrokenResourceError from exc.__cause__ - - if data: - return data - else: - raise EndOfStream - - async def aclose(self) -> None: - await self._stream.aclose() - - -@dataclass(eq=False) -class SendStreamWrapper(abc.ByteSendStream): - _stream: trio.abc.SendStream - - async def send(self, item: bytes) -> None: - try: - await self._stream.send_all(item) - except trio.ClosedResourceError as exc: - raise ClosedResourceError from exc.__cause__ - except trio.BrokenResourceError as exc: - raise BrokenResourceError from exc.__cause__ - - async def aclose(self) -> None: - await self._stream.aclose() - - -@dataclass(eq=False) -class Process(abc.Process): - _process: trio.Process - _stdin: abc.ByteSendStream | None - _stdout: abc.ByteReceiveStream | None - _stderr: abc.ByteReceiveStream | None - - async def aclose(self) -> None: - if self._stdin: - await self._stdin.aclose() - if self._stdout: - await self._stdout.aclose() - if self._stderr: - await self._stderr.aclose() - - await self.wait() - - async def wait(self) -> int: - return await self._process.wait() - - def terminate(self) -> None: - self._process.terminate() - - def kill(self) -> None: - self._process.kill() - - def send_signal(self, signal: Signals) -> None: - self._process.send_signal(signal) - - @property - def pid(self) -> int: - return self._process.pid - - @property - def returncode(self) -> int | None: - return self._process.returncode - - @property - def stdin(self) -> abc.ByteSendStream | None: - return self._stdin - - @property - def stdout(self) -> abc.ByteReceiveStream | None: - return self._stdout - - @property - def stderr(self) -> abc.ByteReceiveStream | None: - return self._stderr - - -async def open_process( - command: str | bytes | Sequence[str | bytes], - *, - shell: bool, - stdin: int | IO[Any] | None, - stdout: int | IO[Any] | None, - stderr: int | IO[Any] | None, - cwd: str | bytes | PathLike | None = None, - env: Mapping[str, str] | None = None, - start_new_session: bool = False, -) -> Process: - process = await trio_open_process( # type: ignore[misc] - command, # type: ignore[arg-type] - stdin=stdin, - stdout=stdout, - stderr=stderr, - shell=shell, - cwd=cwd, - env=env, - start_new_session=start_new_session, - ) - stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None - stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None - stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None - return Process(process, stdin_stream, stdout_stream, stderr_stream) - - -class _ProcessPoolShutdownInstrument(trio.abc.Instrument): - def after_run(self) -> None: - super().after_run() - - -current_default_worker_process_limiter: RunVar = RunVar( - "current_default_worker_process_limiter" -) - - -async def _shutdown_process_pool(workers: set[Process]) -> None: - process: Process - try: - await sleep(math.inf) - except trio.Cancelled: - for process in workers: - if process.returncode is None: - process.kill() - - with CancelScope(shield=True): - for process in workers: - await process.aclose() - - -def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None: - trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers) - - -# -# Sockets and networking -# - - -class _TrioSocketMixin(Generic[T_SockAddr]): - def __init__(self, trio_socket: TrioSocketType) -> None: - self._trio_socket = trio_socket - self._closed = False - - def _check_closed(self) -> None: - if self._closed: - raise ClosedResourceError - if self._trio_socket.fileno() < 0: - raise BrokenResourceError - - @property - def _raw_socket(self) -> socket.socket: - return self._trio_socket._sock # type: ignore[attr-defined] - - async def aclose(self) -> None: - if self._trio_socket.fileno() >= 0: - self._closed = True - self._trio_socket.close() - - def _convert_socket_error(self, exc: BaseException) -> NoReturn: - if isinstance(exc, trio.ClosedResourceError): - raise ClosedResourceError from exc - elif self._trio_socket.fileno() < 0 and self._closed: - raise ClosedResourceError from None - elif isinstance(exc, OSError): - raise BrokenResourceError from exc - else: - raise exc - - -class SocketStream(_TrioSocketMixin, abc.SocketStream): - def __init__(self, trio_socket: TrioSocketType) -> None: - super().__init__(trio_socket) - self._receive_guard = ResourceGuard("reading from") - self._send_guard = ResourceGuard("writing to") - - async def receive(self, max_bytes: int = 65536) -> bytes: - with self._receive_guard: - try: - data = await self._trio_socket.recv(max_bytes) - except BaseException as exc: - self._convert_socket_error(exc) - - if data: - return data - else: - raise EndOfStream - - async def send(self, item: bytes) -> None: - with self._send_guard: - view = memoryview(item) - while view: - try: - bytes_sent = await self._trio_socket.send(view) - except BaseException as exc: - self._convert_socket_error(exc) - - view = view[bytes_sent:] - - async def send_eof(self) -> None: - self._trio_socket.shutdown(socket.SHUT_WR) - - -class UNIXSocketStream(SocketStream, abc.UNIXSocketStream): - async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]: - if not isinstance(msglen, int) or msglen < 0: - raise ValueError("msglen must be a non-negative integer") - if not isinstance(maxfds, int) or maxfds < 1: - raise ValueError("maxfds must be a positive integer") - - fds = array.array("i") - await checkpoint() - with self._receive_guard: - while True: - try: - message, ancdata, flags, addr = await self._trio_socket.recvmsg( - msglen, socket.CMSG_LEN(maxfds * fds.itemsize) - ) - except BaseException as exc: - self._convert_socket_error(exc) - else: - if not message and not ancdata: - raise EndOfStream - - break - - for cmsg_level, cmsg_type, cmsg_data in ancdata: - if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS: - raise RuntimeError( - f"Received unexpected ancillary data; message = {message!r}, " - f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}" - ) - - fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) - - return message, list(fds) - - async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None: - if not message: - raise ValueError("message must not be empty") - if not fds: - raise ValueError("fds must not be empty") - - filenos: list[int] = [] - for fd in fds: - if isinstance(fd, int): - filenos.append(fd) - elif isinstance(fd, IOBase): - filenos.append(fd.fileno()) - - fdarray = array.array("i", filenos) - await checkpoint() - with self._send_guard: - while True: - try: - await self._trio_socket.sendmsg( - [message], - [ - ( - socket.SOL_SOCKET, - socket.SCM_RIGHTS, # type: ignore[list-item] - fdarray, - ) - ], - ) - break - except BaseException as exc: - self._convert_socket_error(exc) - - -class TCPSocketListener(_TrioSocketMixin, abc.SocketListener): - def __init__(self, raw_socket: socket.socket): - super().__init__(trio.socket.from_stdlib_socket(raw_socket)) - self._accept_guard = ResourceGuard("accepting connections from") - - async def accept(self) -> SocketStream: - with self._accept_guard: - try: - trio_socket, _addr = await self._trio_socket.accept() - except BaseException as exc: - self._convert_socket_error(exc) - - trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - return SocketStream(trio_socket) - - -class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener): - def __init__(self, raw_socket: socket.socket): - super().__init__(trio.socket.from_stdlib_socket(raw_socket)) - self._accept_guard = ResourceGuard("accepting connections from") - - async def accept(self) -> UNIXSocketStream: - with self._accept_guard: - try: - trio_socket, _addr = await self._trio_socket.accept() - except BaseException as exc: - self._convert_socket_error(exc) - - return UNIXSocketStream(trio_socket) - - -class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket): - def __init__(self, trio_socket: TrioSocketType) -> None: - super().__init__(trio_socket) - self._receive_guard = ResourceGuard("reading from") - self._send_guard = ResourceGuard("writing to") - - async def receive(self) -> tuple[bytes, IPSockAddrType]: - with self._receive_guard: - try: - data, addr = await self._trio_socket.recvfrom(65536) - return data, convert_ipv6_sockaddr(addr) - except BaseException as exc: - self._convert_socket_error(exc) - - async def send(self, item: UDPPacketType) -> None: - with self._send_guard: - try: - await self._trio_socket.sendto(*item) - except BaseException as exc: - self._convert_socket_error(exc) - - -class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket): - def __init__(self, trio_socket: TrioSocketType) -> None: - super().__init__(trio_socket) - self._receive_guard = ResourceGuard("reading from") - self._send_guard = ResourceGuard("writing to") - - async def receive(self) -> bytes: - with self._receive_guard: - try: - return await self._trio_socket.recv(65536) - except BaseException as exc: - self._convert_socket_error(exc) - - async def send(self, item: bytes) -> None: - with self._send_guard: - try: - await self._trio_socket.send(item) - except BaseException as exc: - self._convert_socket_error(exc) - - -async def connect_tcp( - host: str, port: int, local_address: IPSockAddrType | None = None -) -> SocketStream: - family = socket.AF_INET6 if ":" in host else socket.AF_INET - trio_socket = trio.socket.socket(family) - trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - if local_address: - await trio_socket.bind(local_address) - - try: - await trio_socket.connect((host, port)) - except BaseException: - trio_socket.close() - raise - - return SocketStream(trio_socket) - - -async def connect_unix(path: str) -> UNIXSocketStream: - trio_socket = trio.socket.socket(socket.AF_UNIX) - try: - await trio_socket.connect(path) - except BaseException: - trio_socket.close() - raise - - return UNIXSocketStream(trio_socket) - - -async def create_udp_socket( - family: socket.AddressFamily, - local_address: IPSockAddrType | None, - remote_address: IPSockAddrType | None, - reuse_port: bool, -) -> UDPSocket | ConnectedUDPSocket: - trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM) - - if reuse_port: - trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - - if local_address: - await trio_socket.bind(local_address) - - if remote_address: - await trio_socket.connect(remote_address) - return ConnectedUDPSocket(trio_socket) - else: - return UDPSocket(trio_socket) - - -getaddrinfo = trio.socket.getaddrinfo -getnameinfo = trio.socket.getnameinfo - - -async def wait_socket_readable(sock: socket.socket) -> None: - try: - await wait_readable(sock) - except trio.ClosedResourceError as exc: - raise ClosedResourceError().with_traceback(exc.__traceback__) from None - except trio.BusyResourceError: - raise BusyResourceError("reading from") from None - - -async def wait_socket_writable(sock: socket.socket) -> None: - try: - await wait_writable(sock) - except trio.ClosedResourceError as exc: - raise ClosedResourceError().with_traceback(exc.__traceback__) from None - except trio.BusyResourceError: - raise BusyResourceError("writing to") from None - - -# -# Synchronization -# - - -class Event(BaseEvent): - def __new__(cls) -> Event: - return object.__new__(cls) - - def __init__(self) -> None: - self.__original = trio.Event() - - def is_set(self) -> bool: - return self.__original.is_set() - - async def wait(self) -> None: - return await self.__original.wait() - - def statistics(self) -> EventStatistics: - orig_statistics = self.__original.statistics() - return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting) - - def set(self) -> DeprecatedAwaitable: - self.__original.set() - return DeprecatedAwaitable(self.set) - - -class CapacityLimiter(BaseCapacityLimiter): - def __new__(cls, *args: object, **kwargs: object) -> CapacityLimiter: - return object.__new__(cls) - - def __init__( - self, *args: Any, original: trio.CapacityLimiter | None = None - ) -> None: - self.__original = original or trio.CapacityLimiter(*args) - - async def __aenter__(self) -> None: - return await self.__original.__aenter__() - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: - await self.__original.__aexit__(exc_type, exc_val, exc_tb) - - @property - def total_tokens(self) -> float: - return self.__original.total_tokens - - @total_tokens.setter - def total_tokens(self, value: float) -> None: - self.__original.total_tokens = value - - @property - def borrowed_tokens(self) -> int: - return self.__original.borrowed_tokens - - @property - def available_tokens(self) -> float: - return self.__original.available_tokens - - def acquire_nowait(self) -> DeprecatedAwaitable: - self.__original.acquire_nowait() - return DeprecatedAwaitable(self.acquire_nowait) - - def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable: - self.__original.acquire_on_behalf_of_nowait(borrower) - return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait) - - async def acquire(self) -> None: - await self.__original.acquire() - - async def acquire_on_behalf_of(self, borrower: object) -> None: - await self.__original.acquire_on_behalf_of(borrower) - - def release(self) -> None: - return self.__original.release() - - def release_on_behalf_of(self, borrower: object) -> None: - return self.__original.release_on_behalf_of(borrower) - - def statistics(self) -> CapacityLimiterStatistics: - orig = self.__original.statistics() - return CapacityLimiterStatistics( - borrowed_tokens=orig.borrowed_tokens, - total_tokens=orig.total_tokens, - borrowers=orig.borrowers, - tasks_waiting=orig.tasks_waiting, - ) - - -_capacity_limiter_wrapper: RunVar = RunVar("_capacity_limiter_wrapper") - - -def current_default_thread_limiter() -> CapacityLimiter: - try: - return _capacity_limiter_wrapper.get() - except LookupError: - limiter = CapacityLimiter( - original=trio.to_thread.current_default_thread_limiter() - ) - _capacity_limiter_wrapper.set(limiter) - return limiter - - -# -# Signal handling -# - - -class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]): - _iterator: AsyncIterator[int] - - def __init__(self, signals: tuple[Signals, ...]): - self._signals = signals - - def __enter__(self) -> _SignalReceiver: - self._cm = trio.open_signal_receiver(*self._signals) - self._iterator = self._cm.__enter__() - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - return self._cm.__exit__(exc_type, exc_val, exc_tb) - - def __aiter__(self) -> _SignalReceiver: - return self - - async def __anext__(self) -> Signals: - signum = await self._iterator.__anext__() - return Signals(signum) - - -def open_signal_receiver(*signals: Signals) -> _SignalReceiver: - return _SignalReceiver(signals) - - -# -# Testing and debugging -# - - -def get_current_task() -> TaskInfo: - task = trio_lowlevel.current_task() - - parent_id = None - if task.parent_nursery and task.parent_nursery.parent_task: - parent_id = id(task.parent_nursery.parent_task) - - return TaskInfo(id(task), parent_id, task.name, task.coro) - - -def get_running_tasks() -> list[TaskInfo]: - root_task = trio_lowlevel.current_root_task() - task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)] - nurseries = root_task.child_nurseries - while nurseries: - new_nurseries: list[trio.Nursery] = [] - for nursery in nurseries: - for task in nursery.child_tasks: - task_infos.append( - TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro) - ) - new_nurseries.extend(task.child_nurseries) - - nurseries = new_nurseries - - return task_infos - - -def wait_all_tasks_blocked() -> Awaitable[None]: - import trio.testing - - return trio.testing.wait_all_tasks_blocked() - - -class TestRunner(abc.TestRunner): - def __init__(self, **options: Any) -> None: - from collections import deque - from queue import Queue - - self._call_queue: Queue[Callable[..., object]] = Queue() - self._result_queue: deque[Outcome] = deque() - self._stop_event: trio.Event | None = None - self._nursery: trio.Nursery | None = None - self._options = options - - async def _trio_main(self) -> None: - self._stop_event = trio.Event() - async with trio.open_nursery() as self._nursery: - await self._stop_event.wait() - - async def _call_func( - self, func: Callable[..., Awaitable[object]], args: tuple, kwargs: dict - ) -> None: - try: - retval = await func(*args, **kwargs) - except BaseException as exc: - self._result_queue.append(Error(exc)) - else: - self._result_queue.append(Value(retval)) - - def _main_task_finished(self, outcome: object) -> None: - self._nursery = None - - def _get_nursery(self) -> trio.Nursery: - if self._nursery is None: - trio.lowlevel.start_guest_run( - self._trio_main, - run_sync_soon_threadsafe=self._call_queue.put, - done_callback=self._main_task_finished, - **self._options, - ) - while self._nursery is None: - self._call_queue.get()() - - return self._nursery - - def _call( - self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object - ) -> T_Retval: - self._get_nursery().start_soon(self._call_func, func, args, kwargs) - while not self._result_queue: - self._call_queue.get()() - - outcome = self._result_queue.pop() - return outcome.unwrap() - - def close(self) -> None: - if self._stop_event: - self._stop_event.set() - while self._nursery is not None: - self._call_queue.get()() - - def run_asyncgen_fixture( - self, - fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], - kwargs: dict[str, Any], - ) -> Iterable[T_Retval]: - async def fixture_runner(*, task_status: TaskStatus[T_Retval]) -> None: - agen = fixture_func(**kwargs) - retval = await agen.asend(None) - task_status.started(retval) - await teardown_event.wait() - try: - await agen.asend(None) - except StopAsyncIteration: - pass - else: - await agen.aclose() - raise RuntimeError("Async generator fixture did not stop") - - teardown_event = trio.Event() - fixture_value = self._call(lambda: self._get_nursery().start(fixture_runner)) - yield fixture_value - teardown_event.set() - - def run_fixture( - self, - fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]], - kwargs: dict[str, Any], - ) -> T_Retval: - return self._call(fixture_func, **kwargs) - - def run_test( - self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any] - ) -> None: - self._call(test_func, **kwargs) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/4xm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/4xm.c deleted file mode 100644 index fab3fb5b77f826badfd021b60a22116b8326a7a4..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/4xm.c +++ /dev/null @@ -1,1040 +0,0 @@ -/* - * 4XM codec - * Copyright (c) 2003 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * 4XM codec. - */ - -#include - -#include "libavutil/avassert.h" -#include "libavutil/frame.h" -#include "libavutil/imgutils.h" -#include "libavutil/intreadwrite.h" -#include "libavutil/mem_internal.h" -#include "libavutil/thread.h" -#include "avcodec.h" -#include "blockdsp.h" -#include "bswapdsp.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "decode.h" -#include "get_bits.h" - - -#define BLOCK_TYPE_VLC_BITS 5 -#define ACDC_VLC_BITS 9 - -#define CFRAME_BUFFER_COUNT 100 - -static const uint8_t block_type_tab[2][4][8][2] = { - { - { // { 8, 4, 2 } x { 8, 4, 2} - { 0, 1 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 30, 5 }, { 31, 5 }, { 0, 0 } - }, { // { 8, 4 } x 1 - { 0, 1 }, { 0, 0 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 15, 4 }, { 0, 0 } - }, { // 1 x { 8, 4 } - { 0, 1 }, { 2, 2 }, { 0, 0 }, { 6, 3 }, { 14, 4 }, { 15, 4 }, { 0, 0 } - }, { // 1 x 2, 2 x 1 - { 0, 1 }, { 0, 0 }, { 0, 0 }, { 2, 2 }, { 6, 3 }, { 14, 4 }, { 15, 4 } - } - }, { - { // { 8, 4, 2 } x { 8, 4, 2} - { 1, 2 }, { 4, 3 }, { 5, 3 }, { 0, 2 }, { 6, 3 }, { 7, 3 }, { 0, 0 } - }, {// { 8, 4 } x 1 - { 1, 2 }, { 0, 0 }, { 2, 2 }, { 0, 2 }, { 6, 3 }, { 7, 3 }, { 0, 0 } - }, {// 1 x { 8, 4 } - { 1, 2 }, { 2, 2 }, { 0, 0 }, { 0, 2 }, { 6, 3 }, { 7, 3 }, { 0, 0 } - }, {// 1 x 2, 2 x 1 - { 1, 2 }, { 0, 0 }, { 0, 0 }, { 0, 2 }, { 2, 2 }, { 6, 3 }, { 7, 3 } - } - } -}; - -static const uint8_t size2index[4][4] = { - { -1, 3, 1, 1 }, - { 3, 0, 0, 0 }, - { 2, 0, 0, 0 }, - { 2, 0, 0, 0 }, -}; - -static const int8_t mv[256][2] = { - { 0, 0 }, { 0, -1 }, { -1, 0 }, { 1, 0 }, { 0, 1 }, { -1, -1 }, { 1, -1 }, { -1, 1 }, - { 1, 1 }, { 0, -2 }, { -2, 0 }, { 2, 0 }, { 0, 2 }, { -1, -2 }, { 1, -2 }, { -2, -1 }, - { 2, -1 }, { -2, 1 }, { 2, 1 }, { -1, 2 }, { 1, 2 }, { -2, -2 }, { 2, -2 }, { -2, 2 }, - { 2, 2 }, { 0, -3 }, { -3, 0 }, { 3, 0 }, { 0, 3 }, { -1, -3 }, { 1, -3 }, { -3, -1 }, - { 3, -1 }, { -3, 1 }, { 3, 1 }, { -1, 3 }, { 1, 3 }, { -2, -3 }, { 2, -3 }, { -3, -2 }, - { 3, -2 }, { -3, 2 }, { 3, 2 }, { -2, 3 }, { 2, 3 }, { 0, -4 }, { -4, 0 }, { 4, 0 }, - { 0, 4 }, { -1, -4 }, { 1, -4 }, { -4, -1 }, { 4, -1 }, { 4, 1 }, { -1, 4 }, { 1, 4 }, - { -3, -3 }, { -3, 3 }, { 3, 3 }, { -2, -4 }, { -4, -2 }, { 4, -2 }, { -4, 2 }, { -2, 4 }, - { 2, 4 }, { -3, -4 }, { 3, -4 }, { 4, -3 }, { -5, 0 }, { -4, 3 }, { -3, 4 }, { 3, 4 }, - { -1, -5 }, { -5, -1 }, { -5, 1 }, { -1, 5 }, { -2, -5 }, { 2, -5 }, { 5, -2 }, { 5, 2 }, - { -4, -4 }, { -4, 4 }, { -3, -5 }, { -5, -3 }, { -5, 3 }, { 3, 5 }, { -6, 0 }, { 0, 6 }, - { -6, -1 }, { -6, 1 }, { 1, 6 }, { 2, -6 }, { -6, 2 }, { 2, 6 }, { -5, -4 }, { 5, 4 }, - { 4, 5 }, { -6, -3 }, { 6, 3 }, { -7, 0 }, { -1, -7 }, { 5, -5 }, { -7, 1 }, { -1, 7 }, - { 4, -6 }, { 6, 4 }, { -2, -7 }, { -7, 2 }, { -3, -7 }, { 7, -3 }, { 3, 7 }, { 6, -5 }, - { 0, -8 }, { -1, -8 }, { -7, -4 }, { -8, 1 }, { 4, 7 }, { 2, -8 }, { -2, 8 }, { 6, 6 }, - { -8, 3 }, { 5, -7 }, { -5, 7 }, { 8, -4 }, { 0, -9 }, { -9, -1 }, { 1, 9 }, { 7, -6 }, - { -7, 6 }, { -5, -8 }, { -5, 8 }, { -9, 3 }, { 9, -4 }, { 7, -7 }, { 8, -6 }, { 6, 8 }, - { 10, 1 }, { -10, 2 }, { 9, -5 }, { 10, -3 }, { -8, -7 }, { -10, -4 }, { 6, -9 }, { -11, 0 }, - { 11, 1 }, { -11, -2 }, { -2, 11 }, { 7, -9 }, { -7, 9 }, { 10, 6 }, { -4, 11 }, { 8, -9 }, - { 8, 9 }, { 5, 11 }, { 7, -10 }, { 12, -3 }, { 11, 6 }, { -9, -9 }, { 8, 10 }, { 5, 12 }, - { -11, 7 }, { 13, 2 }, { 6, -12 }, { 10, 9 }, { -11, 8 }, { -7, 12 }, { 0, 14 }, { 14, -2 }, - { -9, 11 }, { -6, 13 }, { -14, -4 }, { -5, -14 }, { 5, 14 }, { -15, -1 }, { -14, -6 }, { 3, -15 }, - { 11, -11 }, { -7, 14 }, { -5, 15 }, { 8, -14 }, { 15, 6 }, { 3, 16 }, { 7, -15 }, { -16, 5 }, - { 0, 17 }, { -16, -6 }, { -10, 14 }, { -16, 7 }, { 12, 13 }, { -16, 8 }, { -17, 6 }, { -18, 3 }, - { -7, 17 }, { 15, 11 }, { 16, 10 }, { 2, -19 }, { 3, -19 }, { -11, -16 }, { -18, 8 }, { -19, -6 }, - { 2, -20 }, { -17, -11 }, { -10, -18 }, { 8, 19 }, { -21, -1 }, { -20, 7 }, { -4, 21 }, { 21, 5 }, - { 15, 16 }, { 2, -22 }, { -10, -20 }, { -22, 5 }, { 20, -11 }, { -7, -22 }, { -12, 20 }, { 23, -5 }, - { 13, -20 }, { 24, -2 }, { -15, 19 }, { -11, 22 }, { 16, 19 }, { 23, -10 }, { -18, -18 }, { -9, -24 }, - { 24, -10 }, { -3, 26 }, { -23, 13 }, { -18, -20 }, { 17, 21 }, { -4, 27 }, { 27, 6 }, { 1, -28 }, - { -11, 26 }, { -17, -23 }, { 7, 28 }, { 11, -27 }, { 29, 5 }, { -23, -19 }, { -28, -11 }, { -21, 22 }, - { -30, 7 }, { -17, 26 }, { -27, 16 }, { 13, 29 }, { 19, -26 }, { 10, -31 }, { -14, -30 }, { 20, -27 }, - { -29, 18 }, { -16, -31 }, { -28, -22 }, { 21, -30 }, { -25, 28 }, { 26, -29 }, { 25, -32 }, { -32, -32 } -}; - -/* This is simply the scaled down elementwise product of the standard JPEG - * quantizer table and the AAN premul table. */ -static const uint8_t dequant_table[64] = { - 16, 15, 13, 19, 24, 31, 28, 17, - 17, 23, 25, 31, 36, 63, 45, 21, - 18, 24, 27, 37, 52, 59, 49, 20, - 16, 28, 34, 40, 60, 80, 51, 20, - 18, 31, 48, 66, 68, 86, 56, 21, - 19, 38, 56, 59, 64, 64, 48, 20, - 27, 48, 55, 55, 56, 51, 35, 15, - 20, 35, 34, 32, 31, 22, 15, 8, -}; - -static VLC block_type_vlc[2][4]; - - -typedef struct CFrameBuffer { - unsigned int allocated_size; - unsigned int size; - int id; - uint8_t *data; -} CFrameBuffer; - -typedef struct FourXContext { - AVCodecContext *avctx; - BlockDSPContext bdsp; - BswapDSPContext bbdsp; - uint16_t *frame_buffer; - uint16_t *last_frame_buffer; - GetBitContext pre_gb; ///< ac/dc prefix - GetBitContext gb; - GetByteContext g; - GetByteContext g2; - int mv[256]; - VLC pre_vlc; - int last_dc; - DECLARE_ALIGNED(32, int16_t, block)[6][64]; - void *bitstream_buffer; - unsigned int bitstream_buffer_size; - int version; - CFrameBuffer cfrm[CFRAME_BUFFER_COUNT]; -} FourXContext; - - -#define FIX_1_082392200 70936 -#define FIX_1_414213562 92682 -#define FIX_1_847759065 121095 -#define FIX_2_613125930 171254 - -#define MULTIPLY(var, const) ((int)((var) * (unsigned)(const)) >> 16) - -static void idct(int16_t block[64]) -{ - int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int tmp10, tmp11, tmp12, tmp13; - int z5, z10, z11, z12, z13; - int i; - int temp[64]; - - for (i = 0; i < 8; i++) { - tmp10 = block[8 * 0 + i] + block[8 * 4 + i]; - tmp11 = block[8 * 0 + i] - block[8 * 4 + i]; - - tmp13 = block[8 * 2 + i] + block[8 * 6 + i]; - tmp12 = MULTIPLY(block[8 * 2 + i] - block[8 * 6 + i], FIX_1_414213562) - tmp13; - - tmp0 = tmp10 + tmp13; - tmp3 = tmp10 - tmp13; - tmp1 = tmp11 + tmp12; - tmp2 = tmp11 - tmp12; - - z13 = block[8 * 5 + i] + block[8 * 3 + i]; - z10 = block[8 * 5 + i] - block[8 * 3 + i]; - z11 = block[8 * 1 + i] + block[8 * 7 + i]; - z12 = block[8 * 1 + i] - block[8 * 7 + i]; - - tmp7 = z11 + z13; - tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); - - z5 = MULTIPLY(z10 + z12, FIX_1_847759065); - tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; - tmp12 = MULTIPLY(z10, -FIX_2_613125930) + z5; - - tmp6 = tmp12 - tmp7; - tmp5 = tmp11 - tmp6; - tmp4 = tmp10 + tmp5; - - temp[8 * 0 + i] = tmp0 + tmp7; - temp[8 * 7 + i] = tmp0 - tmp7; - temp[8 * 1 + i] = tmp1 + tmp6; - temp[8 * 6 + i] = tmp1 - tmp6; - temp[8 * 2 + i] = tmp2 + tmp5; - temp[8 * 5 + i] = tmp2 - tmp5; - temp[8 * 4 + i] = tmp3 + tmp4; - temp[8 * 3 + i] = tmp3 - tmp4; - } - - for (i = 0; i < 8 * 8; i += 8) { - tmp10 = temp[0 + i] + temp[4 + i]; - tmp11 = temp[0 + i] - temp[4 + i]; - - tmp13 = temp[2 + i] + temp[6 + i]; - tmp12 = MULTIPLY(temp[2 + i] - temp[6 + i], FIX_1_414213562) - tmp13; - - tmp0 = tmp10 + tmp13; - tmp3 = tmp10 - tmp13; - tmp1 = tmp11 + tmp12; - tmp2 = tmp11 - tmp12; - - z13 = temp[5 + i] + temp[3 + i]; - z10 = temp[5 + i] - temp[3 + i]; - z11 = temp[1 + i] + temp[7 + i]; - z12 = temp[1 + i] - temp[7 + i]; - - tmp7 = z11 + z13; - tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); - - z5 = MULTIPLY(z10 + z12, FIX_1_847759065); - tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; - tmp12 = MULTIPLY(z10, -FIX_2_613125930) + z5; - - tmp6 = tmp12 - tmp7; - tmp5 = tmp11 - tmp6; - tmp4 = tmp10 + tmp5; - - block[0 + i] = (tmp0 + tmp7) >> 6; - block[7 + i] = (tmp0 - tmp7) >> 6; - block[1 + i] = (tmp1 + tmp6) >> 6; - block[6 + i] = (tmp1 - tmp6) >> 6; - block[2 + i] = (tmp2 + tmp5) >> 6; - block[5 + i] = (tmp2 - tmp5) >> 6; - block[4 + i] = (tmp3 + tmp4) >> 6; - block[3 + i] = (tmp3 - tmp4) >> 6; - } -} - -static av_cold void init_vlcs(void) -{ - static VLCElem table[2][4][32]; - int i, j; - - for (i = 0; i < 2; i++) { - for (j = 0; j < 4; j++) { - block_type_vlc[i][j].table = table[i][j]; - block_type_vlc[i][j].table_allocated = 32; - init_vlc(&block_type_vlc[i][j], BLOCK_TYPE_VLC_BITS, 7, - &block_type_tab[i][j][0][1], 2, 1, - &block_type_tab[i][j][0][0], 2, 1, - INIT_VLC_USE_NEW_STATIC); - } - } -} - -static void init_mv(FourXContext *f, int linesize) -{ - int i; - - for (i = 0; i < 256; i++) { - if (f->version > 1) - f->mv[i] = mv[i][0] + mv[i][1] * linesize / 2; - else - f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * linesize / 2; - } -} - -#if HAVE_BIGENDIAN -#define LE_CENTRIC_MUL(dst, src, scale, dc) \ - { \ - unsigned tmpval = AV_RN32(src); \ - tmpval = (tmpval << 16) | (tmpval >> 16); \ - tmpval = tmpval * (scale) + (dc); \ - tmpval = (tmpval << 16) | (tmpval >> 16); \ - AV_WN32A(dst, tmpval); \ - } -#else -#define LE_CENTRIC_MUL(dst, src, scale, dc) \ - { \ - unsigned tmpval = AV_RN32(src) * (scale) + (dc); \ - AV_WN32A(dst, tmpval); \ - } -#endif - -static inline void mcdc(uint16_t *dst, const uint16_t *src, int log2w, - int h, int stride, int scale, unsigned dc) -{ - int i; - dc *= 0x10001; - - switch (log2w) { - case 0: - for (i = 0; i < h; i++) { - dst[0] = scale * src[0] + dc; - if (scale) - src += stride; - dst += stride; - } - break; - case 1: - for (i = 0; i < h; i++) { - LE_CENTRIC_MUL(dst, src, scale, dc); - if (scale) - src += stride; - dst += stride; - } - break; - case 2: - for (i = 0; i < h; i++) { - LE_CENTRIC_MUL(dst, src, scale, dc); - LE_CENTRIC_MUL(dst + 2, src + 2, scale, dc); - if (scale) - src += stride; - dst += stride; - } - break; - case 3: - for (i = 0; i < h; i++) { - LE_CENTRIC_MUL(dst, src, scale, dc); - LE_CENTRIC_MUL(dst + 2, src + 2, scale, dc); - LE_CENTRIC_MUL(dst + 4, src + 4, scale, dc); - LE_CENTRIC_MUL(dst + 6, src + 6, scale, dc); - if (scale) - src += stride; - dst += stride; - } - break; - default: - av_assert0(0); - } -} - -static int decode_p_block(FourXContext *f, uint16_t *dst, const uint16_t *src, - int log2w, int log2h, int stride) -{ - int index, h, code, ret, scale = 1; - uint16_t *start, *end; - unsigned dc = 0; - - av_assert0(log2w >= 0 && log2h >= 0); - - index = size2index[log2h][log2w]; - av_assert0(index >= 0); - - if (get_bits_left(&f->gb) < 1) - return AVERROR_INVALIDDATA; - h = 1 << log2h; - code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table, - BLOCK_TYPE_VLC_BITS, 1); - av_assert0(code >= 0 && code <= 6); - - start = f->last_frame_buffer; - end = start + stride * (f->avctx->height - h + 1) - (1 << log2w); - - if (code == 1) { - log2h--; - if ((ret = decode_p_block(f, dst, src, log2w, log2h, stride)) < 0) - return ret; - return decode_p_block(f, dst + (stride << log2h), - src + (stride << log2h), - log2w, log2h, stride); - } else if (code == 2) { - log2w--; - if ((ret = decode_p_block(f, dst , src, log2w, log2h, stride)) < 0) - return ret; - return decode_p_block(f, dst + (1 << log2w), - src + (1 << log2w), - log2w, log2h, stride); - } else if (code == 6) { - if (bytestream2_get_bytes_left(&f->g2) < 4) { - av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); - return AVERROR_INVALIDDATA; - } - if (log2w) { - dst[0] = bytestream2_get_le16u(&f->g2); - dst[1] = bytestream2_get_le16u(&f->g2); - } else { - dst[0] = bytestream2_get_le16u(&f->g2); - dst[stride] = bytestream2_get_le16u(&f->g2); - } - return 0; - } - - if ((code&3)==0 && bytestream2_get_bytes_left(&f->g) < 1) { - av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n"); - return AVERROR_INVALIDDATA; - } - - if (code == 0) { - src += f->mv[bytestream2_get_byte(&f->g)]; - } else if (code == 3 && f->version >= 2) { - return 0; - } else if (code == 4) { - src += f->mv[bytestream2_get_byte(&f->g)]; - if (bytestream2_get_bytes_left(&f->g2) < 2){ - av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); - return AVERROR_INVALIDDATA; - } - dc = bytestream2_get_le16(&f->g2); - } else if (code == 5) { - if (bytestream2_get_bytes_left(&f->g2) < 2){ - av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); - return AVERROR_INVALIDDATA; - } - av_assert0(start <= src && src <= end); - scale = 0; - dc = bytestream2_get_le16(&f->g2); - } - - if (start > src || src > end) { - av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); - return AVERROR_INVALIDDATA; - } - - mcdc(dst, src, log2w, h, stride, scale, dc); - - return 0; -} - -static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length) -{ - int x, y; - const int width = f->avctx->width; - const int height = f->avctx->height; - uint16_t *dst = f->frame_buffer; - uint16_t *src; - unsigned int bitstream_size, bytestream_size, wordstream_size, extra, - bytestream_offset, wordstream_offset; - int ret; - - src = f->last_frame_buffer; - - if (f->version > 1) { - extra = 20; - if (length < extra) - return AVERROR_INVALIDDATA; - bitstream_size = AV_RL32(buf + 8); - wordstream_size = AV_RL32(buf + 12); - bytestream_size = AV_RL32(buf + 16); - } else { - extra = 0; - bitstream_size = AV_RL16(buf - 4); - wordstream_size = AV_RL16(buf - 2); - bytestream_size = FFMAX(length - bitstream_size - wordstream_size, 0); - } - - if (bitstream_size > length || bitstream_size >= INT_MAX/8 || - bytestream_size > length - bitstream_size || - wordstream_size > length - bytestream_size - bitstream_size || - extra > length - bytestream_size - bitstream_size - wordstream_size) { - av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size, - bitstream_size+ bytestream_size+ wordstream_size - length); - return AVERROR_INVALIDDATA; - } - - av_fast_padded_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, - bitstream_size); - if (!f->bitstream_buffer) - return AVERROR(ENOMEM); - f->bbdsp.bswap_buf(f->bitstream_buffer, (const uint32_t *) (buf + extra), - bitstream_size / 4); - init_get_bits(&f->gb, f->bitstream_buffer, 8 * bitstream_size); - - wordstream_offset = extra + bitstream_size; - bytestream_offset = extra + bitstream_size + wordstream_size; - bytestream2_init(&f->g2, buf + wordstream_offset, - length - wordstream_offset); - bytestream2_init(&f->g, buf + bytestream_offset, - length - bytestream_offset); - - init_mv(f, width * 2); - - for (y = 0; y < height; y += 8) { - for (x = 0; x < width; x += 8) - if ((ret = decode_p_block(f, dst + x, src + x, 3, 3, width)) < 0) - return ret; - src += 8 * width; - dst += 8 * width; - } - - return 0; -} - -/** - * decode block and dequantize. - * Note this is almost identical to MJPEG. - */ -static int decode_i_block(FourXContext *f, int16_t *block) -{ - int code, i, j, level, val; - - if (get_bits_left(&f->pre_gb) < 2) { - av_log(f->avctx, AV_LOG_ERROR, "%d bits left before decode_i_block()\n", get_bits_left(&f->pre_gb)); - return AVERROR_INVALIDDATA; - } - - /* DC coef */ - val = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3); - if (val >> 4) { - av_log(f->avctx, AV_LOG_ERROR, "error dc run != 0\n"); - return AVERROR_INVALIDDATA; - } - - if (val) - val = get_xbits(&f->gb, val); - - val = val * dequant_table[0] + f->last_dc; - f->last_dc = block[0] = val; - /* AC coefs */ - i = 1; - for (;;) { - code = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3); - - /* EOB */ - if (code == 0) - break; - if (code == 0xf0) { - i += 16; - if (i >= 64) { - av_log(f->avctx, AV_LOG_ERROR, "run %d overflow\n", i); - return 0; - } - } else { - if (code & 0xf) { - level = get_xbits(&f->gb, code & 0xf); - } else { - av_log(f->avctx, AV_LOG_ERROR, "0 coeff\n"); - return AVERROR_INVALIDDATA; - } - i += code >> 4; - if (i >= 64) { - av_log(f->avctx, AV_LOG_ERROR, "run %d overflow\n", i); - return 0; - } - - j = ff_zigzag_direct[i]; - block[j] = level * dequant_table[j]; - i++; - if (i >= 64) - break; - } - } - - return 0; -} - -static inline void idct_put(FourXContext *f, int x, int y) -{ - int16_t (*block)[64] = f->block; - int stride = f->avctx->width; - int i; - uint16_t *dst = f->frame_buffer + y * stride + x; - - for (i = 0; i < 4; i++) { - block[i][0] += 0x80 * 8 * 8; - idct(block[i]); - } - - if (!(f->avctx->flags & AV_CODEC_FLAG_GRAY)) { - for (i = 4; i < 6; i++) - idct(block[i]); - } - - /* Note transform is: - * y = ( 1b + 4g + 2r) / 14 - * cb = ( 3b - 2g - 1r) / 14 - * cr = (-1b - 4g + 5r) / 14 */ - for (y = 0; y < 8; y++) { - for (x = 0; x < 8; x++) { - int16_t *temp = block[(x >> 2) + 2 * (y >> 2)] + - 2 * (x & 3) + 2 * 8 * (y & 3); // FIXME optimize - int cb = block[4][x + 8 * y]; - int cr = block[5][x + 8 * y]; - int cg = (cb + cr) >> 1; - int y; - - cb += cb; - - y = temp[0]; - dst[0] = ((y + cb) >> 3) + (((y - cg) & 0xFC) << 3) + (((y + cr) & 0xF8) << 8); - y = temp[1]; - dst[1] = ((y + cb) >> 3) + (((y - cg) & 0xFC) << 3) + (((y + cr) & 0xF8) << 8); - y = temp[8]; - dst[stride] = ((y + cb) >> 3) + (((y - cg) & 0xFC) << 3) + (((y + cr) & 0xF8) << 8); - y = temp[9]; - dst[1 + stride] = ((y + cb) >> 3) + (((y - cg) & 0xFC) << 3) + (((y + cr) & 0xF8) << 8); - dst += 2; - } - dst += 2 * stride - 2 * 8; - } -} - -static int decode_i_mb(FourXContext *f) -{ - int ret; - int i; - - f->bdsp.clear_blocks(f->block[0]); - - for (i = 0; i < 6; i++) - if ((ret = decode_i_block(f, f->block[i])) < 0) - return ret; - - return 0; -} - -static const uint8_t *read_huffman_tables(FourXContext *f, - const uint8_t * const buf, - int buf_size) -{ - int frequency[512] = { 0 }; - uint8_t flag[512]; - int up[512]; - uint8_t len_tab[257]; - int bits_tab[257]; - int start, end; - const uint8_t *ptr = buf; - const uint8_t *ptr_end = buf + buf_size; - int j; - - memset(up, -1, sizeof(up)); - - start = *ptr++; - end = *ptr++; - for (;;) { - int i; - - if (ptr_end - ptr < FFMAX(end - start + 1, 0) + 1) { - av_log(f->avctx, AV_LOG_ERROR, "invalid data in read_huffman_tables\n"); - return NULL; - } - - for (i = start; i <= end; i++) - frequency[i] = *ptr++; - start = *ptr++; - if (start == 0) - break; - - end = *ptr++; - } - frequency[256] = 1; - - while ((ptr - buf) & 3) - ptr++; // 4byte align - - if (ptr > ptr_end) { - av_log(f->avctx, AV_LOG_ERROR, "ptr overflow in read_huffman_tables\n"); - return NULL; - } - - for (j = 257; j < 512; j++) { - int min_freq[2] = { 256 * 256, 256 * 256 }; - int smallest[2] = { 0, 0 }; - int i; - for (i = 0; i < j; i++) { - if (frequency[i] == 0) - continue; - if (frequency[i] < min_freq[1]) { - if (frequency[i] < min_freq[0]) { - min_freq[1] = min_freq[0]; - smallest[1] = smallest[0]; - min_freq[0] = frequency[i]; - smallest[0] = i; - } else { - min_freq[1] = frequency[i]; - smallest[1] = i; - } - } - } - if (min_freq[1] == 256 * 256) - break; - - frequency[j] = min_freq[0] + min_freq[1]; - flag[smallest[0]] = 0; - flag[smallest[1]] = 1; - up[smallest[0]] = - up[smallest[1]] = j; - frequency[smallest[0]] = frequency[smallest[1]] = 0; - } - - for (j = 0; j < 257; j++) { - int node, len = 0, bits = 0; - - for (node = j; up[node] != -1; node = up[node]) { - bits += flag[node] << len; - len++; - if (len > 31) - // can this happen at all ? - av_log(f->avctx, AV_LOG_ERROR, - "vlc length overflow\n"); - } - - bits_tab[j] = bits; - len_tab[j] = len; - } - - ff_free_vlc(&f->pre_vlc); - if (init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257, len_tab, 1, 1, - bits_tab, 4, 4, 0)) - return NULL; - - return ptr; -} - -static int mix(int c0, int c1) -{ - int blue = 2 * (c0 & 0x001F) + (c1 & 0x001F); - int green = (2 * (c0 & 0x03E0) + (c1 & 0x03E0)) >> 5; - int red = 2 * (c0 >> 10) + (c1 >> 10); - return red / 3 * 1024 + green / 3 * 32 + blue / 3; -} - -static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length) -{ - int x, y, x2, y2; - const int width = f->avctx->width; - const int height = f->avctx->height; - const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4); - uint16_t *dst = f->frame_buffer; - const uint8_t *buf_end = buf + length; - GetByteContext g3; - - if (length < mbs * 8) { - av_log(f->avctx, AV_LOG_ERROR, "packet size too small\n"); - return AVERROR_INVALIDDATA; - } - bytestream2_init(&g3, buf, length); - - for (y = 0; y < height; y += 16) { - for (x = 0; x < width; x += 16) { - unsigned int color[4] = { 0 }, bits; - if (buf_end - buf < 8) - return AVERROR_INVALIDDATA; - // warning following is purely guessed ... - color[0] = bytestream2_get_le16u(&g3); - color[1] = bytestream2_get_le16u(&g3); - - if (color[0] & 0x8000) - av_log(f->avctx, AV_LOG_ERROR, "unk bit 1\n"); - if (color[1] & 0x8000) - av_log(f->avctx, AV_LOG_ERROR, "unk bit 2\n"); - - color[2] = mix(color[0], color[1]); - color[3] = mix(color[1], color[0]); - - bits = bytestream2_get_le32u(&g3); - for (y2 = 0; y2 < 16; y2++) { - for (x2 = 0; x2 < 16; x2++) { - int index = 2 * (x2 >> 2) + 8 * (y2 >> 2); - dst[y2 * width + x2] = color[(bits >> index) & 3]; - } - } - dst += 16; - } - dst += 16 * width - x; - } - - return 0; -} - -static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length) -{ - int x, y, ret; - const int width = f->avctx->width; - const int height = f->avctx->height; - const unsigned int bitstream_size = AV_RL32(buf); - unsigned int prestream_size; - const uint8_t *prestream; - - if (bitstream_size > (1 << 26)) - return AVERROR_INVALIDDATA; - - if (length < bitstream_size + 12) { - av_log(f->avctx, AV_LOG_ERROR, "packet size too small\n"); - return AVERROR_INVALIDDATA; - } - - prestream_size = 4 * AV_RL32(buf + bitstream_size + 4); - prestream = buf + bitstream_size + 12; - - if (prestream_size + bitstream_size + 12 != length - || prestream_size > (1 << 26)) { - av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", - prestream_size, bitstream_size, length); - return AVERROR_INVALIDDATA; - } - - prestream = read_huffman_tables(f, prestream, prestream_size); - if (!prestream) { - av_log(f->avctx, AV_LOG_ERROR, "Error reading Huffman tables.\n"); - return AVERROR_INVALIDDATA; - } - - av_assert0(prestream <= buf + length); - - init_get_bits(&f->gb, buf + 4, 8 * bitstream_size); - - prestream_size = length + buf - prestream; - - av_fast_padded_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, - prestream_size); - if (!f->bitstream_buffer) - return AVERROR(ENOMEM); - f->bbdsp.bswap_buf(f->bitstream_buffer, (const uint32_t *) prestream, - prestream_size / 4); - init_get_bits(&f->pre_gb, f->bitstream_buffer, 8 * prestream_size); - - f->last_dc = 0 * 128 * 8 * 8; - - for (y = 0; y < height; y += 16) { - for (x = 0; x < width; x += 16) { - if ((ret = decode_i_mb(f)) < 0) - return ret; - - idct_put(f, x, y); - } - } - - if (get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3) != 256) - av_log(f->avctx, AV_LOG_ERROR, "end mismatch\n"); - - return 0; -} - -static int decode_frame(AVCodecContext *avctx, AVFrame *picture, - int *got_frame, AVPacket *avpkt) -{ - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; - FourXContext *const f = avctx->priv_data; - int i, frame_4cc, frame_size, ret; - - if (buf_size < 20) - return AVERROR_INVALIDDATA; - - av_assert0(avctx->width % 16 == 0 && avctx->height % 16 == 0); - - if (buf_size < AV_RL32(buf + 4) + 8) { - av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %"PRIu32"\n", - buf_size, AV_RL32(buf + 4)); - return AVERROR_INVALIDDATA; - } - - frame_4cc = AV_RL32(buf); - - if (frame_4cc == AV_RL32("cfrm")) { - int free_index = -1; - int id, whole_size; - const int data_size = buf_size - 20; - CFrameBuffer *cfrm; - - if (f->version <= 1) { - av_log(f->avctx, AV_LOG_ERROR, "cfrm in version %d\n", f->version); - return AVERROR_INVALIDDATA; - } - - id = AV_RL32(buf + 12); - whole_size = AV_RL32(buf + 16); - - if (data_size < 0 || whole_size < 0) { - av_log(f->avctx, AV_LOG_ERROR, "sizes invalid\n"); - return AVERROR_INVALIDDATA; - } - - for (i = 0; i < CFRAME_BUFFER_COUNT; i++) - if (f->cfrm[i].id && f->cfrm[i].id < avctx->frame_num) - av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", - f->cfrm[i].id); - - for (i = 0; i < CFRAME_BUFFER_COUNT; i++) { - if (f->cfrm[i].id == id) - break; - if (f->cfrm[i].size == 0) - free_index = i; - } - - if (i >= CFRAME_BUFFER_COUNT) { - i = free_index; - f->cfrm[i].id = id; - } - cfrm = &f->cfrm[i]; - - if (data_size > UINT_MAX - cfrm->size - AV_INPUT_BUFFER_PADDING_SIZE) - return AVERROR_INVALIDDATA; - - cfrm->data = av_fast_realloc(cfrm->data, &cfrm->allocated_size, - cfrm->size + data_size + AV_INPUT_BUFFER_PADDING_SIZE); - // explicit check needed as memcpy below might not catch a NULL - if (!cfrm->data) { - av_log(f->avctx, AV_LOG_ERROR, "realloc failure\n"); - return AVERROR(ENOMEM); - } - - memcpy(cfrm->data + cfrm->size, buf + 20, data_size); - cfrm->size += data_size; - - if (cfrm->size >= whole_size) { - buf = cfrm->data; - frame_size = cfrm->size; - - if (id != avctx->frame_num) - av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %"PRId64"\n", - id, avctx->frame_num); - - if (f->version <= 1) - return AVERROR_INVALIDDATA; - - cfrm->size = cfrm->id = 0; - frame_4cc = AV_RL32("pfrm"); - } else - return buf_size; - } else { - buf = buf + 12; - frame_size = buf_size - 12; - } - - if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) - return ret; - - if (frame_4cc == AV_RL32("ifr2")) { - picture->pict_type = AV_PICTURE_TYPE_I; - if ((ret = decode_i2_frame(f, buf - 4, frame_size + 4)) < 0) { - av_log(f->avctx, AV_LOG_ERROR, "decode i2 frame failed\n"); - return ret; - } - } else if (frame_4cc == AV_RL32("ifrm")) { - picture->pict_type = AV_PICTURE_TYPE_I; - if ((ret = decode_i_frame(f, buf, frame_size)) < 0) { - av_log(f->avctx, AV_LOG_ERROR, "decode i frame failed\n"); - return ret; - } - } else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) { - picture->pict_type = AV_PICTURE_TYPE_P; - if ((ret = decode_p_frame(f, buf, frame_size)) < 0) { - av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n"); - return ret; - } - } else if (frame_4cc == AV_RL32("snd_")) { - av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", - buf_size); - return AVERROR_INVALIDDATA; - } else { - av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", - buf_size); - return AVERROR_INVALIDDATA; - } - - picture->key_frame = picture->pict_type == AV_PICTURE_TYPE_I; - - av_image_copy_plane(picture->data[0], picture->linesize[0], - (const uint8_t*)f->frame_buffer, avctx->width * 2, - avctx->width * 2, avctx->height); - FFSWAP(uint16_t *, f->frame_buffer, f->last_frame_buffer); - - *got_frame = 1; - - return buf_size; -} - -static av_cold int decode_end(AVCodecContext *avctx) -{ - FourXContext * const f = avctx->priv_data; - int i; - - av_freep(&f->frame_buffer); - av_freep(&f->last_frame_buffer); - av_freep(&f->bitstream_buffer); - f->bitstream_buffer_size = 0; - for (i = 0; i < CFRAME_BUFFER_COUNT; i++) { - av_freep(&f->cfrm[i].data); - f->cfrm[i].allocated_size = 0; - } - ff_free_vlc(&f->pre_vlc); - - return 0; -} - -static av_cold int decode_init(AVCodecContext *avctx) -{ - static AVOnce init_static_once = AV_ONCE_INIT; - FourXContext * const f = avctx->priv_data; - int ret; - - if (avctx->extradata_size != 4 || !avctx->extradata) { - av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n"); - return AVERROR_INVALIDDATA; - } - if((avctx->width % 16) || (avctx->height % 16)) { - av_log(avctx, AV_LOG_ERROR, "unsupported width/height\n"); - return AVERROR_INVALIDDATA; - } - - ret = av_image_check_size(avctx->width, avctx->height, 0, avctx); - if (ret < 0) - return ret; - - f->frame_buffer = av_mallocz(avctx->width * avctx->height * 2); - f->last_frame_buffer = av_mallocz(avctx->width * avctx->height * 2); - if (!f->frame_buffer || !f->last_frame_buffer) - return AVERROR(ENOMEM); - - f->version = AV_RL32(avctx->extradata) >> 16; - ff_blockdsp_init(&f->bdsp); - ff_bswapdsp_init(&f->bbdsp); - f->avctx = avctx; - - if (f->version > 2) - avctx->pix_fmt = AV_PIX_FMT_RGB565; - else - avctx->pix_fmt = AV_PIX_FMT_BGR555; - - ff_thread_once(&init_static_once, init_vlcs); - - return 0; -} - -const FFCodec ff_fourxm_decoder = { - .p.name = "4xm", - CODEC_LONG_NAME("4X Movie"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_4XM, - .priv_data_size = sizeof(FourXContext), - .init = decode_init, - .close = decode_end, - FF_CODEC_DECODE_CB(decode_frame), - .p.capabilities = AV_CODEC_CAP_DR1, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; diff --git a/spaces/competitions/CryCeleb2023/Dockerfile b/spaces/competitions/CryCeleb2023/Dockerfile deleted file mode 100644 index 0afc086eedf9fcd5a42adf6b9682cdb15d73a410..0000000000000000000000000000000000000000 --- a/spaces/competitions/CryCeleb2023/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM huggingface/competitions:latest -CMD competitions run \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Bed Wars A Blockman GO Adventure for IOS Users.md b/spaces/congsaPfin/Manga-OCR/logs/Bed Wars A Blockman GO Adventure for IOS Users.md deleted file mode 100644 index 133f736505b0db5794d2c69c44996be9ca673627..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Bed Wars A Blockman GO Adventure for IOS Users.md +++ /dev/null @@ -1,170 +0,0 @@ - -

Bed Wars APK IOS: How to Play the Popular Team-Based PVP Game on Your iPhone or iPad

-

If you are a fan of team-based PVP games, you might have heard of Bed Wars, a game that has attracted millions of players on Android and PC platforms. But what if you want to play Bed Wars on your iPhone or iPad? Is there a way to download and install Bed Wars APK IOS on your iOS device? In this article, we will answer these questions and more. We will explain what Bed Wars is, why it is popular, how to download and install Bed Wars APK IOS, and what are some alternatives for Bed Wars APK IOS. Let's get started!

-

bed wars apk ios


DOWNLOADhttps://urlca.com/2uObAk



-

What is Bed Wars?

-

Bed Wars is a game developed by Blockman GO Studio, a company that specializes in creating sandbox games with pixelated graphics and various modes. Bed Wars is one of their most popular games, with over 50 million downloads on Google Play Store alone.

-

The rules of the game

-

The basic premise of Bed Wars is simple: you are divided into teams of four players each, and each team has a base with a bed. Your goal is to protect your bed from being destroyed by other teams, while trying to destroy their beds and eliminate them from the game. The last team standing wins the game.

-

To achieve this goal, you need to collect resources from your island and the center island. You can use these resources to buy blocks, weapons, tools, traps, and other items from the shop. You can also upgrade your resource generators and your team perks with diamonds. You can build bridges with blocks to connect to other islands and attack your enemies. But be careful, because once your bed is destroyed, you cannot respawn anymore.

-

The modes of the game

-

Bed Wars has three modes: Solo, Duo, and Quad. In Solo mode, you play alone against seven other players. In Duo mode, you play with a partner against three other teams of two players each. In Quad mode, you play with three teammates against three other teams of four players each. Each mode has different maps that are randomly selected.

-

The items of the game

-

Bed Wars has a variety of items that you can buy from the shop with different resources. Here are some examples:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ItemResourceUse
WoolIronBuild bridges and walls
Wooden swordIronFight enemies
PickaxeGoldBreak blocks faster
TNTGoldExplode enemies' beds and blocks
BowDiamondsShoot arrows at enemies from a distance
Invisibility potionDiamondsBecome invisible for a short time
-

There are many more items that you can discover and use in Bed Wars. You can also customize your character's appearance and chat with other players in the lobby.

-

Why is Bed Wars popular?

-

Bed Wars is a game that has gained a lot of popularity among players of all ages and backgrounds. There are several reasons why Bed Wars is so popular, such as:

-

bed wars game download for ios
-bed wars adventures free apk
-bed wars blockman go ios
-bed wars pvp multiplayer apk
-bed wars online ios
-bed wars mod apk unlimited money
-bed wars offline ios
-bed wars hack apk download
-bed wars simulator ios
-bed wars apk latest version
-bed wars ios no jailbreak
-bed wars apk obb data
-bed wars ios gameplay
-bed wars apk revdl
-bed wars ios controller support
-bed wars apk pure
-bed wars ios review
-bed wars apk mirror
-bed wars ios cheats
-bed wars apk uptodown
-bed wars ios tips and tricks
-bed wars apk mod menu
-bed wars ios requirements
-bed wars apk for pc
-bed wars ios release date
-bed wars apk old version
-bed wars ios beta
-bed wars apk android 1
-bed wars ios app store
-bed wars apk rexdl
-bed wars ios update
-bed wars apk no ads
-bed wars ios reddit
-bed wars apk pro
-bed wars ios vs android
-bed wars apk full version
-bed wars ios download link
-bed wars apk cracked
-bed wars ios free download
-bed wars apk unlimited coins and gems
-bed wars ios best team
-bed wars apk with lucky patcher
-bed wars ios discord server
-bed wars apk premium unlocked
-bed wars ios how to play with friends
-bed wars apk no root needed
-bed wars ios tutorial guide

-

The benefits of playing Bed Wars

-

Playing Bed Wars can have many benefits for your mental and physical health, such as:

-
    -
  • Improving your strategic thinking and problem-solving skills
  • -
  • Enhancing your creativity and imagination
  • -
  • Boosting your teamwork and communication skills
  • -
  • Reducing your stress and anxiety levels
  • -
  • Having fun and making new friends
  • -
-

The challenges of playing Bed Wars

-

Playing Bed Wars can also be challenging and exciting, as you have to face different situations and opponents every time. Some of the challenges of playing Bed Wars are:

-
    -
  • Managing your resources and time efficiently
  • -
  • Defending your bed and base from multiple attacks
  • -
  • Attacking other teams' beds and bases without exposing yourself
  • -
  • Adapting to different maps and modes
  • -
  • Dealing with unexpected events and surprises
  • -
-

The tips and tricks for playing Bed Wars

-

If you want to improve your skills and performance in Bed Wars, you can follow some tips and tricks, such as:

-
    -
  • Choose a team color that blends with the map
  • -
  • Cover your bed with multiple layers of different blocks
  • -
  • Upgrade your resource generators and team perks as soon as possible
  • -
  • Buy a fireball and use it to destroy bridges and knock back enemies
  • -
  • Use ender pearls to teleport to other islands or escape from danger
  • -
-

How to download and install Bed Wars APK IOS?

-

If you are an iOS user, you might be wondering how to download and install Bed Wars APK IOS on your iPhone or iPad. Unfortunately, there is no official version of Bed Wars for iOS devices yet. However, there are some ways to play Bed Wars on your iOS device, such as:

-

The requirements for Bed Wars APK IOS

-

To play Bed Wars on your iOS device, you will need the following requirements:

-
    -
  • An iOS device with iOS 9.0 or later
  • -
  • A stable internet connection
  • -
  • A third-party app installer such as TutuApp or AppValley
  • -
  • A VPN app such as NordVPN or ExpressVPN (optional)
  • -
-

The steps for downloading and installing Bed Wars APK IOS

-

To download and install Bed Wars APK IOS on your iOS device, you can follow these steps:

-
    -
  1. Open your Safari browser and go to the website of the app installer you want to use, such as TutuApp or AppValley.
  2. -
  3. Tap on the download button and follow the instructions to install the app installer on your device.
  4. -
  5. Go to your device settings and trust the app installer's profile.
  6. -
  7. Open the app installer and search for Bed Wars APK IOS.
  8. -
  9. Tap on the download button and follow the instructions to install Bed Wars APK IOS on your device.
  10. -
  11. If you want to avoid any potential issues or bans, you can use a VPN app to change your location and IP address.
  12. -
  13. Enjoy playing Bed Wars on your iOS device!
  14. -
-

The alternatives for Bed Wars APK IOS

-

If you don't want to use a third-party app installer or a VPN app, you can also try some alternatives for Bed Wars APK IOS, such as:

-
    -
  • Minecraft: Minecraft is a sandbox game that allows you to create and explore infinite worlds. You can also play multiplayer modes such as Skywars, Eggwars, or Hunger Games, which are similar to Bed Wars.
  • -
  • Roblox: Roblox is a platform that lets you play and create games of various genres. You can find many games that are inspired by Bed Wars, such as Egg Hunt, Treasure Hunt, or Island Royale.
  • -
  • Fortnite: Fortnite is a battle royale game that pits 100 players against each other in a shrinking map. You can also build structures, collect weapons, and team up with other players.
  • -
-

Conclusion

-

In conclusion, Bed Wars is a popular team-based PVP game that challenges you to protect your bed and destroy your enemies' beds and bases. You can play Bed Wars on your Android or PC device, or you can try to download and install Bed Wars APK IOS on your iPhone or iPad with some methods. You can also enjoy some alternatives for Bed Wars APK IOS, such as Minecraft, Roblox, or Fortnite. We hope this article has helped you learn more about Bed Wars and how to play it on your iOS device. Have fun and good luck!

-

FAQs

-

Here are some frequently asked questions about Bed Wars APK IOS:

-

Q: Is Bed Wars APK IOS safe to download and install?

-

A: Bed Wars APK IOS is not an official version of the game, so it may not be safe to download and install. You may encounter some risks such as viruses, malware, or bans. We recommend you to use a trusted app installer and a VPN app to avoid any potential issues.

-

Q: Is Bed Wars APK IOS free to play?

-

A: Yes, Bed Wars APK IOS is free to play, just like the original game. However, you may need to pay for some in-app purchases or subscriptions for the app installer or the VPN app.

-

Q: Can I play Bed Wars APK IOS with my friends?

-

A: Yes, you can play Bed Wars APK IOS with your friends, as long as they are also using the same app installer and VPN app. You can join the same lobby and team up with them.

-

Q: Can I update Bed Wars APK IOS to the latest version?

-

A: Yes, you can update Bed Wars APK IOS to the latest version, as long as the app installer provides the update. You can check the app installer's website or app for any updates.

-

Q: Can I transfer my progress from Bed Wars APK IOS to the original game?

-

A: No, you cannot transfer your progress from Bed Wars APK IOS to the original game, as they are not compatible. You will have to start from scratch if you switch to the original game.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Bus Simulator Indonesia APK Experience the Culture and Traffic of Indonesia.md b/spaces/congsaPfin/Manga-OCR/logs/Bus Simulator Indonesia APK Experience the Culture and Traffic of Indonesia.md deleted file mode 100644 index 41f6f7182cb8aa63656483d2ed884338b5e17ce0..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Bus Simulator Indonesia APK Experience the Culture and Traffic of Indonesia.md +++ /dev/null @@ -1,100 +0,0 @@ -
-

Bus Simulator Indonesia APK Uptodown: A Fun and Realistic Game for Android Users

-

If you are a fan of simulation games, you might have heard of Bus Simulator Indonesia, a popular game that lets you drive buses in various cities and regions of Indonesia. But did you know that you can download the Bus Simulator Indonesia APK for Android from Uptodown, a trusted platform for downloading apps and games? In this article, we will tell you everything you need to know about Bus Simulator Indonesia APK Uptodown, including what it is, how to download and install it, and why you should play it.

-

What is Bus Simulator Indonesia?

-

Bus Simulator Indonesia is a game developed by Maleo, an Indonesian game studio. It is a realistic and fun simulation game that allows you to experience what it is like to be a bus driver in Indonesia. You can choose from different types of buses, customize them with your own liveries, and drive them on various routes across the country. You can also enjoy the authentic Indonesian scenery, culture, and landmarks as you drive.

-

bus simulator indonesia apk uptodown


Download ……… https://urlca.com/2uOb5s



-

Features of Bus Simulator Indonesia

-

Bus Simulator Indonesia has many features that make it an enjoyable and immersive game. Here are some of them:

-

- Customizable buses and liveries

-

You can choose from a wide range of buses, from minibuses to double-deckers, and customize them with your own designs and colors. You can also download and use liveries created by other players, or share your own creations with the community.

-

- Realistic traffic and weather conditions

-

You can experience the realistic traffic and weather conditions of Indonesia, such as traffic jams, rain, fog, night, and day cycles. You also have to follow the traffic rules and regulations, such as speed limits, signals, signs, tolls, etc. You can also interact with other vehicles and pedestrians on the road.

-

- Various game modes and challenges

-

You can play Bus Simulator Indonesia in different game modes, such as free mode, career mode, or multiplayer mode. You can also take on various challenges, such as driving on steep roads, avoiding obstacles, or completing missions. You can also earn money and rewards by completing tasks and achievements.

-

How to download and install Bus Simulator Indonesia APK from Uptodown?

-

If you want to play Bus Simulator Indonesia on your Android device, you can download the Bus Simulator Indonesia APK from Uptodown. Uptodown is a reliable platform that offers free and safe downloads of apps and games for Android users. Here are the steps to download and install Bus Simulator Indonesia APK from Uptodown:

-

Steps to download and install Bus Simulator Indonesia APK

-

- Visit the Uptodown website and search for Bus Simulator Indonesia

-

Go to https://bus-simulator-indonesia.en.uptodown.com/android on your browser and search for Bus Simulator Indonesia. You will see the game page with the description, screenshots, ratings, reviews, and download button.

-

- Download the latest version of the game APK file

-

Click on the download button and wait for the game APK file to be downloaded on your device. The file size is about 200 MB, so make sure you have enough storage space and a stable internet connection.

-

bus simulator indonesia mod apk download uptodown
-bus simulator indonesia latest version apk uptodown
-bus simulator indonesia apk uptodown for pc
-bus simulator indonesia apk uptodown free download
-bus simulator indonesia apk uptodown offline
-bus simulator indonesia hack apk uptodown
-bus simulator indonesia livery download apk uptodown
-bus simulator indonesia unlimited money apk uptodown
-bus simulator indonesia 2022 apk uptodown
-bus simulator indonesia old version apk uptodown
-bus simulator indonesia new update apk uptodown
-bus simulator indonesia online multiplayer apk uptodown
-bus simulator indonesia traffic mod apk uptodown
-bus simulator indonesia hd graphics apk uptodown
-bus simulator indonesia 3d game apk uptodown
-bus simulator indonesia skin editor apk uptodown
-bus simulator indonesia cheat menu apk uptodown
-bus simulator indonesia full unlocked apk uptodown
-bus simulator indonesia bussid v3.7.1 mod apk uptodown
-bus simulator indonesia no ads apk uptodown
-bus simulator indonesia realistic physics apk uptodown
-bus simulator indonesia all buses unlocked apk uptodown
-bus simulator indonesia map editor apk uptodown
-bus simulator indonesia custom horn sound apk uptodown
-bus simulator indonesia best mod apk uptodown
-bus simulator indonesia original version apk uptodown
-bus simulator indonesia high speed mod apk uptodown
-bus simulator indonesia low mb apk uptodown
-bus simulator indonesia easy control apk uptodown
-bus simulator indonesia premium features apk uptodown
-bus simulator indonesia beta version apk uptodown
-bus simulator indonesia night mode apk uptodown
-bus simulator indonesia indian livery mod apk uptodown
-bus simulator indonesia world tour mod apk uptodown
-bus simulator indonesia vip pass mod apk uptodown
-bus simulator indonesia support all devices apk uptodown
-bus simulator indonesia no root required apk uptodown
-bus simulator indonesia anti ban mod apk uptodown
-bus simulator indonesia bug fix update apk uptodown
-bus simulator indonesia smooth gameplay mod apk uptodown
-bus simulator indonesia realistic weather mod apk uptodown
-bus simulator indonesia steering wheel control mod apk uptodown
-bus simulator indonesia voice navigation mod apk uptodown
-bus simulator indonesia manual gear shift mod apk uptodown
-bus simulator indonesia dashboard camera mod apk uptodown
-bus simulator indonesia traffic jam mod apk uptodown
-bus simulator indonesia police chase mod apk uptodown
-bus simulator indonesia passenger mode mod apk uptodown

-

- Enable unknown sources on your Android device settings

-

Before you can install the game APK file, you need to enable unknown sources on your Android device settings. This will allow you to install apps and games from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but you can ignore it and proceed.

-

- Install the game APK file and launch it

-

Once you have enabled unknown sources, you can install the game APK file by tapping on it and following the instructions. After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You may need to grant some permissions to the game, such as access to your storage, location, microphone, etc.

-

Why should you play Bus Simulator Indonesia APK from Uptodown?

-

There are many reasons why you should play Bus Simulator Indonesia APK from Uptodown. Here are some of them:

-

Benefits of playing Bus Simulator Indonesia APK from Uptodown

-

- Free and safe download

-

Uptodown offers free and safe downloads of apps and games for Android users. You don't need to register or sign up to download Bus Simulator Indonesia APK from Uptodown. You also don't need to worry about viruses, malware, or spyware, as Uptodown scans all the files before uploading them.

-

- No ads or in-app purchases

-

Another benefit of playing Bus Simulator Indonesia APK from Uptodown is that you don't have to deal with annoying ads or in-app purchases. You can enjoy the game without any interruptions or distractions. You also don't have to spend any money to unlock features or items in the game.

-

- Updated and optimized version of the game

-

Uptodown also provides updated and optimized versions of apps and games for Android users. You can download the latest version of Bus Simulator Indonesia APK from Uptodown, which has improved graphics, performance, and stability. You can also find older versions of the game if you prefer them.

-

Conclusion

-

Bus Simulator Indonesia is a fun and realistic simulation game that lets you drive buses in various cities and regions of Indonesia. You can download the Bus Simulator Indonesia APK from Uptodown, a reliable platform that offers free and safe downloads of apps and games for Android users. You can also enjoy the benefits of playing Bus Simulator Indonesia APK from Uptodown, such as no ads, no in-app purchases, and updated versions of the game. If you are looking for a simulation game that will keep you entertained and engaged, you should try Bus Simulator Indonesia APK from Uptodown.

-

FAQs

-

Here are some frequently asked questions about Bus Simulator Indonesia APK from Uptodown:

-
    -
  • Q: Is Bus Simulator Indonesia APK from Uptodown compatible with my device?
  • -
  • A: Bus Simulator Indonesia APK from Uptodown is compatible with most Android devices that have Android 4.2 or higher. However, some devices may not support some features or functions of the game.
  • -
  • Q: How can I update Bus Simulator Indonesia APK from Uptodown?
  • -
  • A: You can update Bus Simulator Indonesia APK from Uptodown by visiting the Uptodown website and downloading the latest version of the game APK file. You can also enable automatic updates on your device settings to get notified when a new version is available.
  • -
  • Q: How can I uninstall Bus Simulator Indonesia APK from Uptodown?
  • -
  • A: You can uninstall Bus Simulator Indonesia APK from Uptodown by going to Settings > Apps > Bus Simulator Indonesia and tapping on Uninstall. You can also delete the game APK file from your device storage.
  • -
  • Q: How can I contact the developer of Bus Simulator Indonesia?
  • -
  • A: You can contact the developer of Bus Simulator Indonesia by visiting their official website at https://www.maleo.id/ or their Facebook page at https://www.facebook.com/maletogames/. You can also send them an email at support@maleo.id.
  • -
  • Q: How can I get more information about Bus Simulator Indonesia?
  • -
  • A: You can get more information about Bus Simulator Indonesia by visiting the Uptodown website and reading the game description, screenshots, ratings, reviews, and comments. You can also watch videos and tutorials of the game on YouTube or other platforms.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Daca Cerul Ar Cadea - Iuly Neamtu si Betty Salam (Originala 2023) - Asculta Online pe SoundCloud.md b/spaces/congsaPfin/Manga-OCR/logs/Daca Cerul Ar Cadea - Iuly Neamtu si Betty Salam (Originala 2023) - Asculta Online pe SoundCloud.md deleted file mode 100644 index 3aef16b5b477acda2f2752b40034286c3c9f0b52..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Daca Cerul Ar Cadea - Iuly Neamtu si Betty Salam (Originala 2023) - Asculta Online pe SoundCloud.md +++ /dev/null @@ -1,78 +0,0 @@ - -

Daca Cerul Ar Cadea: A Song Review

-

If you are looking for a catchy and romantic song to listen to, you might want to check out "Daca Cerul Ar Cadea" by Iuly Neamtu and Betty Salam. This is a song in the genre of manele, which is a type of music that originated in Romania and combines elements of folk, pop, and oriental music. The song was released on March 1, 2023, and has over 2 million views on YouTube. The lyrics of the song express the love and devotion of the singers for each other, and their willingness to face any challenge or danger as long as they are together. The title of the song means "If the sky would fall" in Romanian, and it is a metaphor for the extreme situations that they would endure for their love. In this article, we will explore more about this song, its genre, its themes, its popularity, and its reviews.

-

What is manele?

-

Manele is a genre of music that originated in Romania in the late 1980s and early 1990s. It is influenced by various musical styles, such as Turkish, Arabic, Greek, Balkan, Roma, and Romanian folk music. Manele is characterized by its use of synthesizers, keyboards, accordions, violins, guitars, drums, and vocals. The lyrics of manele often deal with themes such as love, money, power, social status, partying, or personal experiences. Manele is considered a controversial genre in Romania, as it is often associated with low culture, poor taste, vulgarity, or criminality. However, manele is also very popular among many Romanians, especially among young people or marginalized groups. Some of the most famous manele artists include Florin Salam, Nicolae Guta, Adrian Minune, Vali Vijelie, Liviu Guta, Denisa Raducu, Sorinel Pustiu, Tzanca Uraganu, Jador, Costel Biju, Dani Mocanu, Susanu, Cristi Dules, Leo de la Kuweit.

-

daca cerul ar cadea download


Download Zip ————— https://urlca.com/2uO6MN



-

What are the themes and messages of "Daca Cerul Ar Cadea"?

-

"Daca Cerul Ar Cadea" is a love song that expresses the strong feelings and commitment of Iuly Neamtu and Betty Salam for each other. The singers declare that they would give up everything for their love, even their own bodies or lives. They also say that they are not afraid of anything as long as they are together. They use hyperboles such as "if the sky would fall", "if you would leave me", "I don't want money or anything without you", or "I would do anything to make you happy

How popular is "Daca Cerul Ar Cadea"?

-

"Daca Cerul Ar Cadea" is one of the most popular songs in the manele genre in 2023. It has over 2 million views on YouTube, over 1 million streams on SoundCloud, and over 500,000 downloads on Mp3Noi.eu. The song has also been featured on various playlists, radio stations, and podcasts that showcase the best of manele. The song has received positive feedback from many listeners, who praised the singers' voices, the catchy melody, the romantic lyrics, and the quality of the production. Some of the comments on YouTube include:

-
    -
  • "This song is so beautiful and touching, I can feel their love in every word. They are such a cute couple, I wish them all the best." - Maria Popescu
  • -
  • "I love this song, it makes me want to dance and hug my partner. It's so catchy and uplifting, I can't stop listening to it." - Ionut Ionescu
  • -
  • "This is one of the best songs in manele history, it has everything: emotion, passion, energy, rhythm, harmony. Iuly and Betty are amazing singers, they have such powerful and expressive voices. They deserve all the success and recognition." - Florin Petrescu
  • -
-

Conclusion

-

In conclusion, "Daca Cerul Ar Cadea" is a song that showcases the beauty and diversity of manele music. It is a song that expresses the love and devotion of Iuly Neamtu and Betty Salam for each other, and their willingness to face any challenge or danger as long as they are together. The song has a catchy and romantic melody, a rich and varied instrumentation, and a heartfelt and poetic lyrics. The song is very popular among manele fans and has received positive reviews from many listeners. If you are looking for a song that will make you feel happy and inspired, you should definitely listen to "Daca Cerul Ar Cadea" or explore more about manele music.

-

FAQs

-
    -
  1. What does "Daca Cerul Ar Cadea" mean in English?
  2. -

    "Daca Cerul Ar Cadea" means "If the sky would fall" in English. It is a metaphor for the extreme situations that the singers would endure for their love.

    -
  3. Who are Iuly Neamtu and Betty Salam?
  4. -

    Iuly Neamtu and Betty Salam are two Romanian singers who specialize in manele music. They are also a couple in real life. They have collaborated on several songs, such as "Esti Frumusel", "Am eu grija de tine", or "As vrea cerul de sub nori".

    -
  5. What is manele music?
  6. -

    Manele is a genre of music that originated in Romania in the late 1980s and early 1990s. It is influenced by various musical styles, such as Turkish, Arabic, Greek, Balkan, Roma, and Romanian folk music. Manele is characterized by its use of synthesizers, keyboards, accordions, violins, guitars, drums, and vocals. The lyrics of manele often deal with themes such as love, money, power, social status, partying, or personal experiences.

    -
  7. Where can I listen to or download "Daca Cerul Ar Cadea"?
  8. -

    You can listen to or download "Daca Cerul Ar Cadea" on various platforms, such as SoundCloud, Audiomack, Mp3Noi.eu, or YouTube. You can also find the lyrics of the song on Lyrics Translate or Versuri.ro.

    -
  9. What are some other popular songs in manele?
  10. -

    Some other popular songs in manele include:

    - - - - - -
    "Saint Tropez" by Florin Salam"Amma Manele" by Jador
    "De ce ma minti" by Nicolae Guta"Baga bani" by Puya feat. Don Baxter
    "Asu si Bobby - Ma iubeste" by Asu si Bobby"Dragoste de inchiriat" by Akcent
    "Tu esti baiat de baiat" by Dani Mocanu"Fara t

    "Fara tine" by Adrian Minune

    "Amor gitano" by Alessio
    -

    You can find more songs in manele on YouTube, Spotify, Apple Music, or other streaming services.

    -

    daca cerul ar cadea download mp3
    -daca cerul ar cadea download zippy
    -daca cerul ar cadea download fisierul meu
    -daca cerul ar cadea download gratis
    -daca cerul ar cadea download originala
    -daca cerul ar cadea download manele noi
    -daca cerul ar cadea download iuly neamtu
    -daca cerul ar cadea download betty salam
    -daca cerul ar cadea download 2023
    -daca cerul ar cadea download soundcloud
    -daca cerul ar cadea download youtube
    -daca cerul ar cadea download live
    -daca cerul ar cadea download remix
    -daca cerul ar cadea download video
    -daca cerul ar cadea download versuri
    -daca cerul ar cadea download ringtone
    -daca cerul ar cadea download karaoke
    -daca cerul ar cadea download instrumental
    -daca cerul ar cadea download muzica noua
    -daca cerul ar cadea download mp3noi.eu
    -daca cerul ar cadea download manelemp3.net
    -daca cerul ar cadea download online
    -daca cerul ar cadea download free
    -daca cerul ar cadea download radio edit
    -daca cerul ar cadea download hit
    -daca cerul ar cadea download melodie noua
    -daca cerul ar cadea download piesa noua
    -daca cerul ar cadea download manele caviar
    -daca cerul ar cadea download manele mentolate
    -daca cerul ar cadea download la inaltime
    -daca cerul ar cadea download brazilianca
    -daca cerul ar cadea download margareto intoarce-te
    -daca cerul ar cadea download fred si barney
    -daca cerul ar cadea download test drive
    -daca cerul ar cadea download alo scuze
    -daca cerul ar cadea download nu am somn deloc
    -daca cerul ar cadea download lalele din olanda
    -daca cerul ar cadea download very very primaverii
    -daca cerul ar cadea download lololo lolali
    -daca cerul ar cadea download vreau patul sa arda
    -daca cerul ar cadea download barfe

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Temple Run 2 APK and Enjoy More Powerups Achievements and Characters.md b/spaces/congsaPfin/Manga-OCR/logs/Download Temple Run 2 APK and Enjoy More Powerups Achievements and Characters.md deleted file mode 100644 index 11f288cc2def42d97b96d107a3623296d099bdfc..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Temple Run 2 APK and Enjoy More Powerups Achievements and Characters.md +++ /dev/null @@ -1,219 +0,0 @@ - -

    Download APK Game Temple Run 2: A Guide for Android Users

    -

    Are you a fan of endless running games? Do you love the thrill of escaping from a giant monkey while dodging obstacles and collecting treasures? If yes, then you might want to download APK game Temple Run 2, one of the most popular and addictive games on the Google Play Store. But what if you can't access the Play Store or you want to enjoy some extra features that are not available in the official version? In this article, we will show you how to download and install Temple Run 2 APK on your Android device, as well as the benefits and risks of doing so. We will also share some tips and tricks for playing Temple Run 2 like a pro. Let's get started!

    -

    What is Temple Run 2?

    -

    Temple Run 2 is a sequel to the original Temple Run, which was released in 2011 by Imangi Studios. It is an infinite runner game where you control an adventurer who has stolen a cursed idol from a temple and must run away from the evil demon monkeys that are chasing him. Along the way, you have to avoid various obstacles such as cliffs, zip lines, mines, and forests, while collecting coins, gems, and power-ups. You can also unlock different characters and locations, each with their own special abilities and themes. Temple Run 2 has over a zillion downloads on the Play Store and has received positive reviews from critics and players alike. It is also free to play, but you can purchase some in-game items with real money if you wish.

    -

    download apk game temple run 2


    Downloadhttps://urlca.com/2uOdTh



    -

    Features of Temple Run 2

    -

    Some of the features that make Temple Run 2 stand out from other running games are:

    -
      -
    • Beautiful new graphics: The game has improved its visuals and animations, making it more realistic and immersive. The environments are also more diverse and organic, with stunning details and effects.
    • -
    • New obstacles: The game has added more challenges and surprises to keep you on your toes. You will encounter fire, waterfalls, rotating saws, swinging ropes, mine carts, and more.
    • -
    • More power-ups: The game has introduced new power-ups that can help you escape or boost your performance. You can use shields, magnets, boosters, head starts, coin bonuses, and more.
    • -
    • More achievements: The game has increased its number of achievements that you can unlock by completing certain tasks or reaching certain milestones. You can also compare your achievements with your friends or other players around the world.
    • -
    • Special powers for each character: The game has given each character a unique power that can be activated by filling up a meter. For example, Guy Dangerous can summon a shield that protects him from obstacles, Scarlett Fox can dash forward at high speed, Barry Bones can spawn a coin trail behind him, and so on.
    • -
    • Bigger monkey: The game has made the monkey that chases you bigger and scarier than ever. It can also jump over obstacles or swipe at you if you get too close.
    • -
    -

    How to download and install Temple Run 2 APK on Android

    -

    If you want to download APK game Temple Run 2 on your Android device, you will need to follow these steps:

    -
      -
    1. Go to a trusted website that offers Temple Run 2 APK files. You can use [APKCombo ] or [APKPure] as examples.
    2. -
    3. Search for Temple Run 2 and choose the latest version of the game. You can also check the ratings, reviews, and screenshots of the game before downloading it.
    4. -
    5. Download the Temple Run 2 APK file to your device. You may need to enable the option to install apps from unknown sources in your device settings. This will allow you to install apps that are not from the Play Store.
    6. -
    7. Once the download is complete, locate the Temple Run 2 APK file in your file manager and tap on it to install it. You may need to grant some permissions to the app during the installation process.
    8. -
    9. After the installation is done, you can launch the game and enjoy playing Temple Run 2 on your Android device.
    10. -
    -

    Why download Temple Run 2 APK?

    -

    You might be wondering why you should download APK game Temple Run 2 instead of getting it from the Play Store. Well, there are some advantages and disadvantages of doing so, and we will explain them below.

    -

    Benefits of downloading Temple Run 2 APK

    -

    Some of the benefits of downloading Temple Run 2 APK are:

    -
      -
    • Access to the latest version: Sometimes, the Play Store may not update the game to the latest version due to various reasons, such as compatibility issues, regional restrictions, or technical errors. By downloading Temple Run 2 APK, you can get access to the newest features and bug fixes of the game as soon as they are released by the developers.
    • -
    • Access to modded versions: Some websites may offer modded versions of Temple Run 2 APK, which are modified versions of the game that have some extra features or advantages that are not available in the official version. For example, you may find a modded version that has unlimited coins and gems, unlocked characters and power-ups, or no ads. However, you should be careful when downloading modded versions, as they may also contain malware or viruses that can harm your device or steal your data.
    • -
    • No need for Google account: If you don't have a Google account or you don't want to use it for some reason, you can still download and play Temple Run 2 APK without signing in to the Play Store. This can save you some time and hassle, as well as protect your privacy and security.
    • -
    -

    Risks of downloading Temple Run 2 APK

    -

    Some of the risks of downloading Temple Run 2 APK are:

    -
      -
    • Potential malware and viruses: As mentioned earlier, some websites may offer fake or malicious versions of Temple Run 2 APK that can infect your device with malware or viruses that can damage your system, delete your files, or steal your personal information. You should always download Temple Run 2 APK from trusted and reputable sources, and scan them with a reliable antivirus software before installing them.
    • -
    • Potential legal issues: Downloading Temple Run 2 APK may violate some terms and conditions of the Play Store or the game developers. This may result in some legal consequences, such as getting banned from the game, losing your progress, or facing a lawsuit. You should always respect the intellectual property rights of the game creators and follow their rules and regulations.
    • -
    • Potential compatibility issues: Downloading Temple Run 2 APK may not work properly on your device if it is not compatible with your hardware or software specifications. This may cause some problems, such as crashing, freezing, lagging, or glitches. You should always check the requirements and compatibility of the game before downloading it.
    • -
    -

    How to avoid malware and viruses when downloading Temple Run 2 APK

    -

    To avoid malware and viruses when downloading Temple Run 2 APK, you should follow these tips:

    -

    * Temple Run 2 game download for Android devices
    -* How to install Temple Run 2 apk on your phone
    -* Temple Run 2 latest version free download apk
    -* Download Temple Run 2 mod apk with unlimited coins and gems
    -* Temple Run 2 apk download offline play mode
    -* Best tips and tricks for Temple Run 2 game
    -* Temple Run 2 review and rating by users
    -* Temple Run 2 gameplay video and screenshots
    -* Temple Run 2 new features and updates
    -* Temple Run 2 cheats and hacks apk download
    -* Temple Run 2 vs Temple Run: which one is better?
    -* Temple Run 2 challenges and achievements guide
    -* Temple Run 2 characters and powerups unlock
    -* Temple Run 2 online multiplayer mode apk download
    -* Temple Run 2 alternatives and similar games
    -* Temple Run 2 for PC Windows and Mac download
    -* Temple Run 2 for iOS iPhone and iPad download
    -* Temple Run 2 for Amazon Kindle Fire download
    -* Temple Run 2 for Samsung Galaxy devices download
    -* Temple Run 2 for Huawei devices download
    -* Temple Run 2 for Xiaomi devices download
    -* Temple Run 2 for Oppo devices download
    -* Temple Run 2 for Vivo devices download
    -* Temple Run 2 for Nokia devices download
    -* Temple Run 2 for LG devices download
    -* Temple Run 2 for Sony devices download
    -* Temple Run 2 for Motorola devices download
    -* Temple Run 2 for Google Pixel devices download
    -* Temple Run 2 for OnePlus devices download
    -* Temple Run 2 for Asus devices download
    -* Temple Run 2 for Lenovo devices download
    -* Temple Run 2 for Acer devices download
    -* Temple Run 2 for HTC devices download
    -* Temple Run 2 for ZTE devices download
    -* Temple Run 2 for Alcatel devices download
    -* Temple Run 2 for Blackberry devices download
    -* Temple Run 2 for Micromax devices download
    -* Temple Run 2 for Lava devices download
    -* Temple Run 2 for Karbonn devices download
    -* Temple Run 2 for Spice devices download

    -
      -
    • Use a secure browser: You should use a secure and updated browser that has features such as anti-phishing, anti-malware, and pop-up blocker. This will help you avoid malicious websites and links that may try to trick you into downloading harmful files.
    • -
    • Use a VPN service: You should use a VPN service that can encrypt your data and hide your IP address. This will help you bypass any regional restrictions or censorship that may prevent you from accessing certain websites or games. It will also protect your privacy and security from hackers and trackers.
    • -
    • Use a trusted website: You should use a trusted website that has a good reputation and reviews from other users. You can also check the domain name, URL, and SSL certificate of the website to verify its authenticity and security. You can also use tools such as [VirusTotal] or [URLVoid] to scan the website for any malware or viruses.
    • -
    • Use a reliable antivirus software: You should use a reliable antivirus software that can detect and remove any malware or viruses that may infect your device. You should also update your antivirus software regularly and perform a full scan of your device before and after downloading Temple Run 2 APK.
    • -
    -

    Tips and tricks for playing Temple Run 2

    -

    Now that you have downloaded and installed Temple Run 2 APK on your Android device, you might want to learn some tips and tricks for playing the game better and having more fun. Here are some of them:

    -

    How to unlock characters and power-ups

    -

    One of the most exciting aspects of Temple Run 2 is unlocking new characters and power-ups that can enhance your gameplay. You can unlock characters and power-ups by spending coins or gems, which are the in-game currencies. You can earn coins by running, collecting them on the way, or watching ads. You can earn gems by completing objectives, opening chests, or buying them with real money. You can also get free coins and gems by logging in daily, completing global challenges, or joining special events. Here are some of the characters and power-ups that you can unlock in Temple Run 2:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Gem Bonus - -100 gemsMagnet Duration - - - - - - -
    CharacterCostPower
    Guy DangerousFreeShield Duration
    Scarlett Fox5,000 coinsBoost Distance
    Barry Bones15,000 coinsCoin Bonus
    Karma Lee25,000 coinsScore Multiplier
    Montana Smith40,000 coinsCoin Value
    Zack Wonder60,000 coins
    Francisco Montoya
    Santa Claus500 gemsSave Me
    - - - - - - - - - - - - - - - - - -750 coinsMakes you run faster and jump higher for a short time - -1,000 coins or 1 gemGives you a boost at the start of the run - -2,500 coins or 5 gemsGives you a bigger boost at the start of the run - -5,000 coins or 10 gemsGives you extra coins at the end of the run - -10,000 coins or 20 gemsGives you extra gems at the end of the run - -15,000 coins or 30 gemsGives you extra points at the end of the run - -Varies depending on levelIncreases the duration of the magnet power-up - - - - - - - - - - - -
    Power-upCostEffect
    Shield250 coinsProtects you from obstacles for a short time
    Magnet500 coinsAttracts coins to you for a short time
    Booster
    Head Start
    Mega Head Start
    Coin Bonus
    Gem Bonus
    Score Bonus
    Coin Magnet Upgrade
    Shield Duration UpgradeVaries depending on levelIncreases the duration of the shield power-up
    Booster Distance UpgradeVaries depending on levelIncreases the distance covered by the booster power-up
    -

    How to complete global challenges and daily quests

    -

    Another way to earn coins and gems in Temple Run 2 is to complete global challenges and daily quests. Global challenges are events that last for a limited time and require you to achieve a certain goal, such as running a certain distance, collecting a certain number of coins, or using a certain power-up. You can join global challenges by tapping on the globe icon on the main menu. You can also see your progress and ranking among other players. Completing global challenges will reward you with coins, gems, or chests that contain various items. Daily quests are tasks that change every day and require you to do something specific, such as running in a certain location, escaping from a certain monkey, or collecting a certain artifact. You can see your daily quests by tapping on the calendar icon on the main menu. Completing daily quests will reward you with coins or gems.

    -

    How to use gems and coins wisely

    -

    Gems and coins are the two currencies in Temple Run 2 that you can use to buy or upgrade various things in the game. However, they are not easy to come by, so you should use them wisely and sparingly. Here are some tips on how to use gems and coins wisely:

    -
      -
    • Don't waste gems on saving yourself: One of the most tempting ways to use gems is to save yourself from dying when you hit an obstacle or fall off a cliff. However, this is not a good idea, as it will cost you more and more gems each time you do it, and it will not help you improve your skills or score. Instead, you should accept your fate and start over, or watch an ad to revive yourself for free.
    • -
    • Don't buy power-ups before the run: Another tempting way to use coins is to buy power-ups before the run, such as head starts, mega head starts, or coin bonuses. However, this is also not a good idea, as it will cost you a lot of coins and it will not guarantee you a better performance or score. Instead, you should save your coins for unlocking or upgrading characters and power-ups that will last longer and have more impact.
    • -
    • Don't buy chests with coins: Another tempting way to use coins is to buy chests that contain random items, such as gems, coins, artifacts, or outfits. However, this is also not a good idea, as it will cost you a lot of coins and it will not guarantee you anything valuable or useful. Instead, you should earn chests by completing global challenges or objectives, or watch ads to get free chests.
    • -
    • Don't buy gems with real money: The last tempting way to use gems is to buy them with real money. However, this is also not a good idea, as it will cost you a lot of money and it will not make the game more fun or satisfying. Instead, you should earn gems by playing the game regularly, completing daily quests, opening chests, or watching ads.
    • -
    -

    How to improve your running skills and score higher

    -

    The ultimate goal of Temple Run 2 is to run as far as possible and score as high as possible. To do this, you need to improve your running skills and avoid making mistakes that can end your run prematurely. Here are some tips on how to improve your running skills and score higher:

    -
      -
    • Swipe accurately: The most basic skill in Temple Run 2 is swiping accurately on the screen to turn left or right, jump over or slide under obstacles, or tilt left or right to move sideways. You need to swipe accurately and timely to avoid crashing into obstacles or falling off edges. You also need to swipe in the direction of the arrows that appear on the screen when you encounter forks or curves.
    • -
    • Use power-ups strategically: The most useful skill in Temple Run 2 is using power-ups strategically to boost your performance and score. You need to know when and how to use each power-up effectively. For example, you should use the shield when you encounter a difficult obstacle or a large gap, you should use the magnet when you see a lot of coins ahead of you, you should use the booster when you have a clear path or when you want to escape from the monkey faster.
    • -
    • Collect artifacts and outfits: The most fun skill in Temple Run 2 is collecting artifacts and outfits that can make your game more interesting and rewarding. You need to collect artifacts by finding them randomly on the way or by opening chests. You can also collect outfits by spending gems or by completing certain objectives. Artifacts and outfits can give you bonus coins, gems, or points, as well as change the appearance of your character or the game environment.
    • -
    • Practice and learn: The most important skill in Temple Run 2 is practicing and learning from your mistakes. You need to play the game regularly and try different strategies and techniques to see what works best for you. You also need to learn from your mistakes and avoid repeating them in the future. You can also watch videos or read guides from other players who have mastered the game and learn from their tips and tricks.
    • -
    -

    Conclusion

    -

    In conclusion, Temple Run 2 is a fun and addictive game that you can download and play on your Android device. However, if you want to enjoy some extra features or benefits that are not available in the official version, you can download APK game Temple Run 2 from a trusted website and install it on your device. However, you should also be aware of the risks and challenges of doing so, and take precautions to avoid malware, viruses, legal issues, or compatibility problems. You should also use your gems and coins wisely and improve your running skills and score by following some tips and tricks that we have shared in this article. We hope that this guide has helped you download APK game Temple Run 2 and have more fun playing it.

    -

    Summary of the main points

    -
      -
    • Temple Run 2 is a popular and addictive infinite runner game where you have to escape from a giant monkey while avoiding obstacles and collecting treasures.
    • -
    • You can download APK game Temple Run 2 from a trusted website and install it on your Android device to enjoy some extra features or benefits that are not available in the official version.
    • -
    • You should also be careful of the risks and challenges of downloading Temple Run 2 APK, such as malware, viruses, legal issues, or compatibility problems.
    • -
    • You should use your gems and coins wisely and improve your running skills and score by following some tips and tricks that we have shared in this article.
    • -
    -

    Call to action

    -

    If you are ready to download APK game Temple Run 2 and start running for your life, then click on the link below and follow the instructions. Don't forget to share your feedback and experience with us in the comments section. Happy running!

    -

    Download Temple Run 2 APK here

    -

    FAQs

    -

    Here are some frequently asked questions about downloading APK game Temple Run 2:

    -
      -
    1. Is Temple Run 2 APK safe to download?
      Temple Run 2 APK is safe to download if you get it from a trusted and reputable website that offers genuine and virus-free files. However, you should always scan the file with a reliable antivirus software before installing it on your device.
    2. -
    3. Is Temple Run 2 APK legal to download?
      Temple Run 2 APK may not be legal to download in some countries or regions where it violates the terms and conditions of the Play Store or the game developers. You should always check the laws and regulations of your location before downloading Temple Run 2 APK.
    4. -
    5. Is Temple Run 2 APK compatible with my device?
      Temple Run 2 APK may not be compatible with your device if it does not meet the minimum requirements or specifications of the game. You should always check the compatibility of the game before downloading it.
    6. -
    7. How do I update Temple Run 2 APK?
      To update Temple Run 2 APK, you will need to download the latest version of the game from the same website where you got it from. You will also need to uninstall the previous version of the game before installing the new one.
    8. -
    9. How do I uninstall Temple Run 2 APK?
      To uninstall Temple Run 2 APK, you will need to go to your device settings, find the app manager, select Temple Run 2, and tap on uninstall. You will also need to delete any leftover files or folders related to the game from your device storage.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Experience Realistic Truck Driving in Truck Simulator Ultimate - Download Now.md b/spaces/congsaPfin/Manga-OCR/logs/Experience Realistic Truck Driving in Truck Simulator Ultimate - Download Now.md deleted file mode 100644 index ecca00f55f95fefc4b811bbd78784a00ecc6a015..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Experience Realistic Truck Driving in Truck Simulator Ultimate - Download Now.md +++ /dev/null @@ -1,104 +0,0 @@ - -

    Truck Simulator Ultimate: A Realistic and Fun Trucking Game

    -

    If you have ever dreamed of driving a truck across different countries, delivering goods, and managing your own logistics company, then you should try Truck Simulator Ultimate, a simulation game developed by Zuuks Games. This game lets you experience the thrill and challenge of being a trucker in a realistic and immersive way. You can choose from a variety of official Mercedes-Benz licensed trucks, customize them with different accessories, and drive them through over 100 cities around the world. You can also hire drivers, expand your fleet, and compete with other players in multiplayer modes. In this article, we will tell you more about the features of Truck Simulator Ultimate, how to download and play it on your PC, and some tips and tricks to help you succeed in the game.

    -

    truck simulator ultimate download


    DOWNLOADhttps://urlca.com/2uOfmq



    -

    Features of Truck Simulator Ultimate

    -

    Truck Simulator Ultimate is not just a driving game, but also a tycoon game where you can create and run your own trucking company. Here are some of the features that make this game stand out:

    -

    Official Mercedes-Benz licensed trucks

    -

    You can choose from a range of high-quality trucks that are officially licensed by Mercedes-Benz, one of the leading manufacturers of commercial vehicles. Each truck has its own specifications, performance, and design. You can also customize your trucks with different colors, lights, horns, and other accessories to make them look more unique.

    -

    Realistic recreations of over 100 cities around the world

    -

    You can travel across different continents and countries, from the United States and China, to Germany and South Korea. You will see realistic recreations of over 100 cities, with their landmarks, roads, traffic, weather, and culture. You will also encounter different challenges and scenarios depending on the location, such as tolls, checkpoints, accidents, roadworks, etc.

    -

    Multiplayer modes with friends or other players

    -

    You can play with your friends or other players in various multiplayer modes. You can work together to deliver joint cargo from city to city, or compete in races to see who is the fastest trucker. You can also chat with other players, join or create clans, and participate in events and tournaments.

    -

    Customizable trucks and offices

    -

    You can not only customize your trucks, but also your offices. You can set up your offices in different countries and design them according to your preferences. You can also upgrade your offices with better facilities and equipment to improve your productivity and reputation.

    -

    truck simulator ultimate download for pc
    -truck simulator ultimate download apk
    -truck simulator ultimate download ios
    -truck simulator ultimate download mod apk
    -truck simulator ultimate download free
    -truck simulator ultimate download android
    -truck simulator ultimate download latest version
    -truck simulator ultimate download bluestacks
    -truck simulator ultimate download windows 10
    -truck simulator ultimate download mac
    -truck simulator ultimate download obb
    -truck simulator ultimate download uptodown
    -truck simulator ultimate download hack
    -truck simulator ultimate download offline
    -truck simulator ultimate download play store
    -truck simulator ultimate download app store
    -truck simulator ultimate download update
    -truck simulator ultimate download game
    -truck simulator ultimate download full version
    -truck simulator ultimate download pc windows 7
    -truck simulator ultimate download pc windows 8
    -truck simulator ultimate download pc windows xp
    -truck simulator ultimate download laptop
    -truck simulator ultimate download computer
    -truck simulator ultimate download desktop
    -truck simulator ultimate download emulator
    -truck simulator ultimate download online
    -truck simulator ultimate download nox player
    -truck simulator ultimate download memu play
    -truck simulator ultimate download ld player
    -truck simulator ultimate download steam
    -truck simulator ultimate download epic games
    -truck simulator ultimate download microsoft store
    -truck simulator ultimate download amazon appstore
    -truck simulator ultimate download samsung galaxy store
    -truck simulator ultimate download huawei appgallery
    -truck simulator ultimate download xiaomi getapps
    -truck simulator ultimate download oppo app market
    -truck simulator ultimate download vivo app store
    -truck simulator ultimate download google drive
    -truck simulator ultimate download mega.nz
    -truck simulator ultimate download mediafire.com
    -truck simulator ultimate download dropbox.com
    -truck simulator ultimate download zippyshare.com

    -

    How to Download and Play Truck Simulator Ultimate on PC

    -

    While Truck Simulator Ultimate is available for Android and iOS devices, you might want to play it on your PC for a better gaming experience. Playing on PC will give you several benefits, such as:

    -
      -
    • A bigger screen size that will let you enjoy the graphics and details of the game more.
    • -
    • A smoother performance that will prevent lagging or crashing issues.
    • -
    • A more comfortable control scheme that will let you use your keyboard and mouse instead of touch screen.
    • -
    • A more stable internet connection that will prevent disconnection or data loss issues.
    • -
    -

    To play Truck Simulator Ultimate on your PC, you will need an emulator software that will allow you to run Android apps on your computer. One of the best emulators you can use is BlueStacks, which is fast, secure, and easy to use. Here are the steps to download and install BlueStacks and Truck Simulator Ultimate on your PC:

    -
      -
    1. Go to the official website of BlueStacks and download the latest version of the software for your PC.
    2. -
    3. Run the installer file and follow the instructions to complete the installation process.
    4. -
    5. Launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free.
    6. -
    7. Go to the Google Play Store app on BlueStacks and search for Truck Simulator Ultimate. Alternatively, you can also download the APK file of the game from a trusted source and drag and drop it to the BlueStacks home screen.
    8. -
    9. Click on the install button and wait for the game to be installed on your PC.
    10. -
    11. Once the installation is done, you can launch the game from the BlueStacks home screen or app drawer.
    12. -
    -

    Tips and Tricks for Playing Truck Simulator Ultimate

    -

    Now that you have downloaded and installed Truck Simulator Ultimate on your PC, you are ready to start your trucking adventure. However, before you hit the road, here are some tips and tricks that will help you play better and enjoy the game more:

    -

    How to manage your company and employees

    -

    One of the main goals of Truck Simulator Ultimate is to grow your trucking company and become a successful tycoon. To do this, you will need to hire drivers, assign them jobs, pay them salaries, and monitor their performance. You can also train your drivers to improve their skills and efficiency. You can access the company management menu from the office screen, where you can see your income, expenses, reputation, and ranking. You can also see your employees' profiles, ratings, salaries, and statuses. You should try to balance your budget and keep your employees happy and motivated.

    -

    How to complete jobs and earn money

    -

    To earn money in Truck Simulator Ultimate, you will need to complete jobs that involve delivering cargo from one city to another. You can find jobs from different sources, such as contracts, cargo market, quick jobs, or multiplayer modes. You can also create your own jobs by choosing your cargo type, destination, and price. You should try to complete jobs as fast and as safely as possible, without damaging your cargo or violating traffic rules. You should also pay attention to the fuel level, fatigue level, and maintenance status of your truck. You can refuel, rest, or repair your truck at gas stations, hotels, or service centers along the way.

    -

    How to upgrade your trucks and accessories

    -

    To improve your performance and reputation in Truck Simulator Ultimate, you will need to upgrade your trucks and accessories. You can buy new trucks or sell old ones at dealerships located in different cities. You can also customize your trucks with different parts and accessories at garages located in different cities. You can change the engine, transmission, chassis, wheels, paint job, interior design, lights, horns, etc. of your trucks. You can also buy or sell trailers at trailer dealerships located in different cities. You should try to upgrade your trucks and accessories according to your needs and preferences.

    -

    Conclusion

    -

    Truck Simulator Ultimate is a realistic and fun trucking game that lets you experience the thrill and challenge of being a trucker in a realistic and immersive way. You can choose from a variety of official Mercedes-Benz licensed trucks, customize them with different accessories, and drive them through over 100 cities around the world. You can also hire drivers, expand your fleet, and compete with other players in multiplayer modes. You can also play the game on your PC with BlueStacks, which will give you a better gaming experience. In this article, we have told you more about the features of Truck Simulator Ultimate, how to download and play it on your PC, and some tips and tricks to help you succeed in the game. If you are a fan of simulation games, you should definitely give Truck Simulator Ultimate a try. You can download it for free from the Google Play Store or the App Store, or follow the steps above to play it on your PC with BlueStacks. Have fun and enjoy the ride!

    -

    FAQs

    -

    Here are some of the frequently asked questions about Truck Simulator Ultimate:

    -

    Q1: Is Truck Simulator Ultimate free to play?

    -

    A1: Yes, Truck Simulator Ultimate is free to play. However, it also contains in-app purchases that can enhance your gameplay or unlock more features. You can disable in-app purchases in your device settings if you don't want to use them.

    -

    Q2: How can I play Truck Simulator Ultimate offline?

    -

    A2: You can play Truck Simulator Ultimate offline by turning off your internet connection before launching the game. However, you will not be able to access some features that require an internet connection, such as multiplayer modes, events, tournaments, etc.

    -

    Q3: What are the system requirements for playing Truck Simulator Ultimate on PC?

    -

    A3: To play Truck Simulator Ultimate on PC with BlueStacks, you will need a Windows 7 or higher operating system, an Intel or AMD processor, at least 4 GB of RAM, and at least 5 GB of free disk space. You will also need a stable internet connection and a Google account.

    -

    Q4: How can I contact the developers of Truck Simulator Ultimate?

    -

    A4: You can contact the developers of Truck Simulator Ultimate by sending an email to info@zuuks.com or by visiting their website at https://www.zuuks.com/. You can also follow them on Facebook, Twitter, Instagram, or YouTube for the latest news and updates about the game.

    -

    Q5: What are some other truck simulator games that I can play?

    -

    A5: If you like truck simulator games, you might also enjoy some of these games:

    -
      -
    • Euro Truck Simulator 2: A simulation game that lets you drive trucks across Europe, delivering cargo and exploring different countries and cultures.
    • -
    • American Truck Simulator: A simulation game that lets you drive trucks across the United States, delivering cargo and experiencing the American dream.
    • -
    • World Truck Driving Simulator: A simulation game that lets you drive trucks across different countries and continents, delivering cargo and facing various challenges and scenarios.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download Online Videos with Vidmate 2017 APK for Android.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download Online Videos with Vidmate 2017 APK for Android.md deleted file mode 100644 index 2738f72bee6d8b9c857e483a7acf20fae9f70692..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Download Online Videos with Vidmate 2017 APK for Android.md +++ /dev/null @@ -1,133 +0,0 @@ - -

    VidMate 2017 APK Download for Android: How to Install and Use

    -

    If you are looking for a way to download videos and music from various online platforms, such as YouTube, Facebook, Instagram, TikTok, and more, then you might want to try VidMate. VidMate is a popular video downloader app that allows you to enjoy your favorite content offline. In this article, we will show you how to download and install VidMate 2017 APK for Android, as well as how to use it to download videos and music. We will also compare VidMate 2017 APK with other video downloaders and answer some frequently asked questions.

    -

    vidmate 2017 apk download for android


    DOWNLOAD 🗸🗸🗸 https://urlca.com/2uObYd



    -

    What is VidMate?

    -

    VidMate is a free video downloader app that lets you download videos and music from various online platforms, such as YouTube, Facebook, Instagram, TikTok, and more. You can choose from different formats and resolutions, such as MP4, MP3, HD, etc. You can also watch live TV channels, access popular websites, and discover trending videos with VidMate.

    -

    Features of VidMate

    -
      -
    • Download videos and music from over 1000 online platforms
    • -
    • Choose from different formats and resolutions
    • -
    • Watch live TV channels
    • -
    • Access popular websites
    • -
    • Discover trending videos
    • -
    • Manage your downloads
    • -
    • Share your downloads with others
    • -
    -

    Benefits of VidMate

    -
      -
    • Save your mobile data by downloading videos and music offline
    • -
    • Enjoy your favorite content anytime and anywhere
    • -
    • Watch live TV channels without any subscription fees
    • -
    • Access popular websites without any restrictions
    • -
    • Discover new and interesting videos every day
    • -
    • Manage your downloads easily
    • -
    • Share your downloads with your friends and family
    • -
    -

    How to Download and Install VidMate 2017 APK for Android

    -

    To download and install VidMate 2017 APK for Android, you need to follow these steps:

    -

    Step 1: Enable Unknown Sources

    -

    Since VidMate is not available on the Google Play Store, you need to enable unknown sources on your Android device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

    -

    Step 2: Download VidMate 2017 APK File

    -

    Next, you need to download the VidMate 2017 APK file from a trusted source. You can use this link to download the file. Alternatively, you can scan the QR code below with your phone camera to download the file.

    - QR code for VidMate 2017 APK download -

    Step 3: Install VidMate 2017 APK File

    -

    Once you have downloaded the VidMate 2017 APK file, you need to install it on your Android device. To do this, locate the file in your file manager and tap on it. You will see a prompt asking you to confirm the installation. Tap on Install and wait for the installation to complete.

    -

    vidmate 2017 old version apk free download
    -vidmate 2017 hd video downloader apk for android
    -vidmate 2017 app download install apk for android mobile
    -vidmate 2017 apk download latest version for android phone
    -vidmate 2017 apk file download for android device
    -vidmate 2017 online video downloader apk android
    -vidmate 2017 free download apk for android tablet
    -vidmate 2017 apk download uptodown for android
    -vidmate 2017 full hd video downloader apk android
    -vidmate 2017 app apk download for android tv
    -vidmate 2017 apk download apkpure for android
    -vidmate 2017 best video downloader apk for android
    -vidmate 2017 app free download apk for android
    -vidmate 2017 apk download for android 4.4.2
    -vidmate 2017 video downloader app apk android
    -vidmate 2017 apk download for android 5.1.1
    -vidmate 2017 hd video downloader app apk for android
    -vidmate 2017 app download apk for android 2.3.6
    -vidmate 2017 latest apk download for android
    -vidmate 2017 movie video downloader apk for android
    -vidmate 2017 new version apk download for android
    -vidmate 2017 youtube video downloader apk for android
    -vidmate 2017 old apk download for android mobile
    -vidmate 2017 fast video downloader apk for android
    -vidmate 2017 pro apk download for android phone
    -vidmate 2017 old version video downloader apk for android
    -vidmate 2017 app download install new version apk for android
    -vidmate 2017 official apk download for android device
    -vidmate 2017 old version hd video downloader apk for android
    -vidmate 2017 app download install old version apk for android mobile

    -

    How to Use VidMate 2017 APK for Android

    -

    After installing VidMate 2017 APK on your Android device, you can use it to download videos and music from various online platforms. To do this, follow these steps:

    -

    Step 1: Launch VidMate App

    -

    Open the VidMate app on your Android device. You will see the home screen with different tabs, such as Trending, Video, Music, TV, etc. You can swipe left or right to navigate through the tabs.

    -

    Step 2: Search for Videos or Music

    -

    To search for videos or music, you can use the search bar at the top of the screen. You can enter keywords, titles, artists, genres, etc. You can also use the voice search feature by tapping on the microphone icon. You will see a list of results matching your query. You can tap on any result to view more details, such as duration, size, format, resolution, etc.

    -

    Step 3: Download Videos or Music

    -

    To download videos or music, you can tap on the red download button at the bottom of the screen. You will see a pop-up window asking you to choose the format and resolution of your download. You can select from different options, such as MP4, MP3, HD, etc. You can also change the download location and name of your file. After selecting your preferences, tap on Download and wait for the download to finish. You can check the progress of your download in the notification bar or in the Download tab of the app.

    -

    Comparison Table of VidMate 2017 APK and Other Video Downloaders

    -

    To help you understand how VidMate 2017 APK compares with other video downloaders, we have created a comparison table below. We have compared VidMate 2017 APK with three other popular video downloaders: TubeMate, Snaptube, and Videoder.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    Conclusion

    -

    VidMate 2017 APK is a free video downloader app that lets you download videos and music from various online platforms. It has many features and benefits that make it stand out from other video downloaders. It is easy to download and install VidMate 2017 APK for Android and use it to enjoy your favorite content offline. We hope this article has helped you learn more about VidMate 2017 APK and how to use it. If you have any questions or feedback, please feel free to leave a comment below.

    -

    FAQs

    -
      -
    • Is VidMate 2017 APK safe to use?
    • -

      VidMate 2017 APK is safe to use as long as you download it from a trusted source. However, you should always be careful when downloading apps from unknown sources and scan them for viruses or malware before installing them.

      -
    • Is VidMate 2017 APK legal to use?
    • -

      VidMate 2017 APK is legal to use as long as you respect the intellectual property rights of the content owners and do not use it for commercial purposes or distribute it without permission. However, you should always check the terms and conditions of the online platforms you are downloading from and comply with them.

      -
    • How can I update VidMate 2017 APK?
    • -

      You can update VidMate 2017 APK by downloading the latest version from a trusted source and installing it over the existing one. Alternatively, you can check for updates within the app by going to Settings > Update and tapping on Check Update.

      -
    • How can I uninstall VidMate 201 7 APK?
    • -

      You can uninstall VidMate 2017 APK by going to Settings > Apps > VidMate and tapping on Uninstall. You can also delete the VidMate 2017 APK file from your file manager if you still have it.

      -
    • What are some alternatives to VidMate 2017 APK?
    • -

      Some alternatives to VidMate 2017 APK are TubeMate, Snaptube, Videoder, etc. However, they may not have all the features and benefits that VidMate 2017 APK offers. You can compare them using the comparison table above and decide which one suits your needs best.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Run Your Own Internet Cafe in Internet Cafe Simulator 1 PC - Bagas31 Download Link.md b/spaces/congsaPfin/Manga-OCR/logs/How to Run Your Own Internet Cafe in Internet Cafe Simulator 1 PC - Bagas31 Download Link.md deleted file mode 100644 index ab2994bd90fba5586ed158116a2835f095c78deb..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Run Your Own Internet Cafe in Internet Cafe Simulator 1 PC - Bagas31 Download Link.md +++ /dev/null @@ -1,94 +0,0 @@ - -

    Download Internet Cafe Simulator 1 PC Bagas31: A Guide for Gamers

    -

    If you are looking for a realistic and immersive simulation game that lets you run your own internet cafe business, then you might want to check out Internet Cafe Simulator. This game allows you to set up and manage a comprehensive workplace, interact with various people and activities in the city, and even engage in illegal work if you want. But where can you download this game for free? In this article, we will show you how to download Internet Cafe Simulator 1 PC Bagas31, one of the most popular and reliable websites for downloading software and games. We will also give you some tips and tricks on how to play the game and have fun.

    -

    download internet cafe simulator 1 pc bagas31


    Download ····· https://urlca.com/2uOdfO



    -

    What is Internet Cafe Simulator?

    -

    Internet Cafe Simulator is an internet cafe business simulation game developed by Cheesecake Dev and released in 2019. The game lets you build the best internet cafe in the world by choosing from various options such as furniture, devices, games, software, food, drinks, and more. You can also customize your environment and decorate it according to your taste. You have to pay the rent of your apartment and shop, satisfy your customers, upgrade your computers, buy game licenses, and deal with competitors. You can also explore the city and interact with different people and events. You can choose to be a decent person or a criminal who does illegal work for money. But be careful, as there are consequences for your actions.

    -

    Why download it from Bagas31?

    -

    Bagas31 is a website that provides free download links for various software and games. It is one of the most visited and trusted websites in Indonesia for downloading software and games. There are many reasons why you should download Internet Cafe Simulator 1 PC Bagas31, such as:

    -
      -
    • You can get the full version of the game for free, without paying any fees or subscriptions.
    • -
    • You can get the latest updates and patches for the game, ensuring that it runs smoothly and without bugs.
    • -
    • You can get access to additional features and mods that enhance your gaming experience.
    • -
    • You can get support from the Bagas31 community, which consists of thousands of users who share their opinions, reviews, tips, and solutions.
    • -
    • You can get a safe and secure download link, without any viruses or malware.
    • -
    -

    How to download Internet Cafe Simulator 1 PC Bagas31?

    -

    Downloading Internet Cafe Simulator 1 PC Bagas31 is easy and simple. Just follow these steps:

    -
      -
    1. Go to Bagas31 website and search for "Internet Cafe Simulator".
    2. -
    3. Select the link that says "Internet Cafe Simulator Full Version".
    4. -
    5. Click on the "Download" button and wait for the file to be downloaded.
    6. -
    7. Extract the file using WinRAR or any other extraction tool.
    8. -
    9. Run the setup file and follow the instructions to install the game.
    10. -
    11. Copy the crack file from the crack folder and paste it into the game directory.
    12. -
    13. Run the game as administrator and enjoy!
    14. -
    -

    Tips and tricks for playing Internet Cafe Simulator

    -

    Now that you have downloaded and installed Internet Cafe Simulator 1 PC Bagas31, you are ready to play the game. Here are some tips and tricks that will help you run your internet cafe business and have fun:

    -

    How to download internet cafe simulator 1 pc game for free
    -Internet cafe simulator 1 pc game full version download link
    -Internet cafe simulator 1 pc game review and gameplay
    -Internet cafe simulator 1 pc game system requirements and installation guide
    -Internet cafe simulator 1 pc game cheats and mods
    -Internet cafe simulator 1 pc game download from arealgamer.org[^1^]
    -Internet cafe simulator 1 pc game crack by CODEX
    -Internet cafe simulator 1 pc game simulation genre
    -Internet cafe simulator 1 pc game developed by Cheesecake Dev
    -Internet cafe simulator 1 pc game released on 25 Oct, 2019
    -Internet cafe simulator 1 pc game features and modes
    -Internet cafe simulator 1 pc game tips and tricks
    -Internet cafe simulator 1 pc game best settings and optimization
    -Internet cafe simulator 1 pc game online multiplayer mode
    -Internet cafe simulator 1 pc game update and patch notes
    -Internet cafe simulator 1 pc game download size and compression
    -Internet cafe simulator 1 pc game free download without survey
    -Internet cafe simulator 1 pc game torrent download link
    -Internet cafe simulator 1 pc game direct download link
    -Internet cafe simulator 1 pc game highly compressed download
    -Internet cafe simulator 1 pc game latest version download
    -Internet cafe simulator 1 pc game error fix and troubleshooting
    -Internet cafe simulator 1 pc game comparison with other simulation games
    -Internet cafe simulator 1 pc game minimum and recommended specs
    -Internet cafe simulator 1 pc game steam key giveaway and activation
    -Internet cafe simulator 1 pc game demo and trial version download
    -Internet cafe simulator 1 pc game DLC and expansion pack download
    -Internet cafe simulator 1 pc game modding and customization guide
    -Internet cafe simulator 1 pc game soundtrack and music download
    -Internet cafe simulator 1 pc game screenshots and videos
    -Internet cafe simulator 1 pc game rating and feedback
    -Internet cafe simulator 1 pc game walkthrough and guide
    -Internet cafe simulator 1 pc game achievements and trophies
    -Internet cafe simulator 1 pc game controller support and configuration
    -Internet cafe simulator 1 pc game keyboard and mouse controls
    -Internet cafe simulator 1 pc game VR and AR support and compatibility
    -Internet cafe simulator 1 pc game low-end and high-end performance test
    -Internet cafe simulator 1 pc game alternatives and similar games
    -Internet cafe simulator 1 pc game price and discount offer
    -Internet cafe simulator 1 pc game official website and social media links

    -
      -
    • Plan your budget carefully and invest in the right equipment and software. You need to balance your income and expenses, and make sure you have enough money to pay the rent, bills, taxes, and salaries.
    • -
    • Keep your customers happy and loyal. You need to provide them with fast and reliable internet service, comfortable and clean seats, various games and programs, and food and drinks. You can also offer discounts, promotions, and loyalty cards to attract more customers.
    • -
    • Expand your business and upgrade your shop. You can buy new furniture, devices, games, software, and decorations to improve your internet cafe. You can also hire staff, such as cleaners, technicians, and security guards, to help you with the daily tasks.
    • -
    • Explore the city and interact with different people and events. You can meet new friends, enemies, lovers, and rivals in the city. You can also join clubs, gangs, or organizations that offer you different opportunities and challenges. You can also participate in various activities, such as gambling, racing, fighting, or hacking.
    • -
    • Choose your own path and style. You can decide whether you want to be a good or a bad person in the game. You can follow the law or break it. You can be honest or dishonest. You can be friendly or hostile. You can be generous or greedy. You can be peaceful or violent. The choice is yours.
    • -
    -

    Conclusion

    -

    Internet Cafe Simulator is a game that lets you experience the thrill and challenge of running your own internet cafe business. You can download it for free from Bagas31, one of the best websites for downloading software and games. You can also enjoy the game's realistic and immersive features, such as customizing your shop, interacting with the city, and choosing your own path. If you are a fan of simulation games, then you should definitely try Internet Cafe Simulator 1 PC Bagas31.

    -

    So what are you waiting for? Download Internet Cafe Simulator 1 PC Bagas31 now and start your own internet cafe adventure!

    -

    FAQs

    -

    Here are some frequently asked questions and answers about Internet Cafe Simulator and Bagas31:

    -
      -
    1. Q: What are the system requirements for Internet Cafe Simulator?
    2. -
    3. A: The minimum system requirements for Internet Cafe Simulator are: Windows 7/8/10 64-bit operating system; Intel Core i3 3rd generation processor; 4 GB RAM; NVIDIA GeForce GTX 660 graphics card; 5 GB available disk space; DirectX 11 compatible sound card.
    4. -
    5. Q: Is Internet Cafe Simulator multiplayer?
    6. -
    7. A: No, Internet Cafe Simulator is a single-player game that does not support online multiplayer mode.
    8. -
    9. Q: Is Bagas31 safe and legal?
    10. -
    11. A: Yes, Bagas31 is a safe and legal website that provides free download links for various software and games. It does not contain any viruses or malware that can harm your computer or device. However, you should always scan any downloaded files with an antivirus program before opening them.
    12. -
    13. Q: How can I contact Bagas31?
    14. -
    15. A: You can contact Bagas31 by visiting their official website Bagas31 and clicking on the "Contact Us" link at the bottom of the page. You can also follow them on their social media accounts on Facebook, Twitter, Instagram, YouTube, and Telegram.
    16. -
    17. Q: How can I support Bagas31?
    18. -
    19. A: You can support Bagas31 by visiting their official website Bagas31 and clicking on the "Donate" button at the top of the page. You can also share their website link with your friends and family who are looking for free software and games.
    20. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Using APK 0 for Android App Store Alternatives.md b/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Using APK 0 for Android App Store Alternatives.md deleted file mode 100644 index 02b3500b5f6079b71481b0b1faddc3e515d7c052..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The Benefits of Using APK 0 for Android App Store Alternatives.md +++ /dev/null @@ -1,140 +0,0 @@ -
    -

    What is an APK file and why you should care

    -

    If you are an Android user, you may have heard of APK files, but do you know what they are and why they are useful? In this article, we will explain everything you need to know about APK files, how to download and install them on your device, how to update and uninstall them, and how to create and share your own APK files.

    -

    An APK file is a package file that contains all the components of an Android app, such as the code, resources, assets, certificates, and manifest. It is similar to an executable file (.exe) on Windows or a ZIP file (.zip) on any platform. When you download an app from the Google Play Store, you are actually downloading an APK file that is then installed on your device.

    -

    apk 0


    Downloadhttps://urlca.com/2uOg2g



    -

    However, not all apps are available on the Google Play Store, either because they are not approved by Google, they are region-restricted, they are in beta testing, or they are exclusive to other platforms. In these cases, you may want to download and install the app from another source, such as the developer's website or a third-party app store. This is where APK files come in handy.

    -

    By downloading and installing APK files directly on your device, you can access apps that are not available on the Google Play Store, get the latest updates before they are officially released, customize your apps with mods or hacks, backup your apps and data, and more. However, you also need to be careful about where you get your APK files from, as some sources may contain malware or viruses that can harm your device or steal

    How to download and install APK files on your Android device

    -

    Now that you know what APK files are and why they are useful, you may be wondering how to download and install them on your Android device. Here are the steps you need to follow:

    -

    Find a reliable source of APK files

    -

    The first step is to find a reliable source of APK files that you want to download and install. There are many websites and apps that offer APK downloads, but not all of them are trustworthy or safe. Some may contain malware or viruses, some may have outdated or fake versions, and some may have hidden charges or ads.

    -

    Therefore, you should always do some research before downloading any APK file from an unknown source. You should check the reviews, ratings, comments, and feedback from other users, as well as the reputation and credibility of the developer or the website. You should also scan the APK file with an antivirus app before installing it.

    -

    Some examples of trusted websites and apps that offer APK downloads are:

    -
      -
    • WhatsApp: This is the official website of WhatsApp, where you can download the latest version of the popular messaging app. You can also join the beta program to get early access to new features and updates.
    • -
    • CapCut: This is the official website of CapCut, a powerful video editing app that lets you create amazing videos with ease. You can download the app for free and enjoy its features without any watermark or ads.
    • -
    -

    These are just some examples, but there are many more websites and apps that offer APK downloads. However, you should always be careful and cautious when downloading from any source other than the Google Play Store.

    -

    apk 0 download
    -apk 0 install
    -apk 0 update
    -apk 0 mod
    -apk 0 pro
    -apk 0 premium
    -apk 0 cracked
    -apk 0 hack
    -apk 0 free
    -apk 0 latest version
    -apk 0 for android
    -apk 0 for pc
    -apk 0 for ios
    -apk 0 for windows
    -apk 0 for mac
    -apk 0 for firestick
    -apk 0 for smart tv
    -apk 0 for chromebook
    -apk 0 for linux
    -apk 0 for roku
    -apk 0 app store
    -apk 0 aptoide
    -apk 0 apkpure
    -apk 0 apkmirror
    -apk 0 apkcombo
    -apk 0 whatsapp
    -apk 0 facebook
    -apk 0 instagram
    -apk 0 tiktok
    -apk 0 youtube
    -apk 0 netflix
    -apk 0 spotify
    -apk 0 zoom
    -apk 0 telegram
    -apk 0 discord
    -apk 0 snapchat
    -apk 0 twitter
    -apk 0 reddit
    -apk 0 pinterest
    -apk 0 linkedin
    -apk 0 gmail
    -apk 0 google play services
    -apk 0 google maps
    -apk 0 google chrome
    -apk 0 google assistant
    -apk 0 google photos
    -apk 0 google drive
    -apk 0 google translate
    -apk 0 google docs

    -

    Enable unknown sources on your device

    -

    The second step is to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the Google Play Store. However, if you want to install APK files, you need to allow your device to install apps from unknown sources.

    -

    The way to enable unknown sources may vary depending on your device model and Android version, but here are some general steps you can follow:

    -
      -
    1. Go to your device settings and look for security or privacy options.
    2. -
    3. Find the option that says "Unknown sources" or "Install unknown apps" and toggle it on.
    4. -
    5. You may see a warning message that says installing apps from unknown sources may harm your device or data. Tap on "OK" or "Allow" to proceed.
    6. -
    -

    If you need more help, you can check out these guides for different Android versions:

    -

    Download and open the APK file

    -

    The third step is to download and open the APK file from the source. This is usually a simple and straightforward process, but here are some tips and tricks you can follow:

    -
      -
    • Make sure you have enough storage space on your device before downloading the APK file. Some APK files can be quite large, especially if they contain high-quality graphics or media. You can check your available storage space by going to your device settings and looking for storage or memory options.
    • -
    • Use a fast and stable internet connection to download the APK file. If possible, use Wi-Fi instead of mobile data, as this will save you data charges and ensure a faster download speed. You can also pause and resume the download if you encounter any interruptions or errors.
    • -
    • Locate the downloaded APK file on your device. Depending on your device settings and browser preferences, the APK file may be saved in different locations, such as the downloads folder, the notifications panel, or the browser history. You can also use a file manager app to find the APK file on your device.
    • -
    • Tap on the APK file to open it and start the installation process. You may see a pop-up message that asks you to confirm the installation or grant permissions to the app. Tap on "Install" or "Allow" to proceed. You may also see a progress bar that shows you how long the installation will take.
    • -
    -

    Once the installation is complete, you can launch the app from your app drawer or home screen. You can also create a shortcut or a widget for the app if you want to access it more easily.

    How to update and uninstall APK files on your Android device

    -

    After you have downloaded and installed APK files on your device, you may want to keep them updated and uninstall them if you no longer need them. Here are the steps you need to follow:

    -

    Check for updates manually or automatically

    -

    Unlike apps from the Google Play Store, APK files do not update automatically on your device. You need to check for updates manually or use a third-party app that notifies you of updates.

    -

    To check for updates manually, you need to visit the source website or app where you downloaded the APK file and see if there is a newer version available. If there is, you can download and install it over the existing app, or uninstall the old app first and then install the new one.

    -

    To check for updates automatically, you can use a third-party app that scans your device for installed APK files and compares them with the latest versions from various sources. If there are any updates available, the app will notify you and let you download and install them with one tap. Some examples of such apps are:

    -
      -
    • APKUpdater: This is an open-source app that lets you check for updates from multiple sources, such as APKMirror, Google Play, Uptodown, and more. You can also schedule automatic updates and exclude apps from being checked.
    • -
    • Aptoide: This is an alternative app store that lets you download and update APK files from various developers and categories. You can also create your own store and share your apps with others.
    • -
    -

    Uninstall unwanted or outdated APK files

    -

    If you want to uninstall APK files from your device, you can use the app manager in your device settings or a file manager app that lets you delete the APK file.

    -

    To use the app manager in your device settings, you need to go to your device settings and look for apps or applications options. Then, find the app you want to uninstall and tap on it. You will see an option that says "Uninstall" or "Remove". Tap on it and confirm the action.

    -

    To use a file manager app that lets you delete the APK file, you need to download and install a file manager app that can access your device storage and locate the APK file. Then, tap and hold on the APK file and select "Delete" or "Move to trash". Some examples of such apps are:

    -
      -
    • ES File Explorer: This is a popular file manager app that lets you manage your files, apps, cloud storage, network shares, and more. You can also use it to compress, encrypt, backup, and restore your files.
    • -
    • File Manager: This is a simple and easy-to-use file manager app that lets you browse, copy, move, delete, rename, and share your files. You can also use it to clean up your junk files and free up space.
    • -

    How to create and share your own APK files

    -

    If you are feeling creative and adventurous, you may want to create and share your own APK files with others. This can be a fun and rewarding way to showcase your skills, hobbies, or interests, or to help others with useful apps or tools. Here are the steps you need to follow:

    -

    Use an online tool or an app to create APK files

    -

    The first step is to use an online tool or an app that lets you create APK files from websites, videos, images, or other files. There are many tools and apps that offer this service, but not all of them are reliable or easy to use. Some may have limitations on the size, quality, or functionality of the APK files, and some may require registration or payment.

    -

    Therefore, you should always do some research before using any tool or app to create APK files. You should check the reviews, ratings, comments, and feedback from other users, as well as the features and options of the tool or app. You should also test the tool or app on a small or simple file before creating a large or complex one.

    -

    Some examples of online tools and apps that let you create APK files are:

    -
      -
    • Website 2 APK Builder: This is an online tool that lets you convert any website into an APK file. You can customize the app name, icon, splash screen, permissions, orientation, and more. You can also use it to create webview apps, progressive web apps, or HTML5 games.
    • -
    • APK Editor: This is an app that lets you edit any APK file on your device. You can change the app name, icon, version, package name, permissions, resources, assets, and more. You can also use it to clone, sign, or optimize APK files.
    • -
    -

    Share your APK files with others via email, cloud, or Bluetooth

    -

    The second step is to share your created APK files with others via email, cloud services, or Bluetooth. This is usually a simple and straightforward process, but here are some tips and tricks you can follow:

    -
      -
    • Make sure you have permission from the original source of the file before sharing it with others. Some websites or apps may have terms and conditions that prohibit you from creating or distributing APK files from their content. You should also respect the intellectual property rights of the creators and owners of the file.
    • -
    • Compress the APK file if it is too large to share via email or cloud services. Some email providers or cloud services may have limits on the size of the attachments or uploads. You can use a file compression app to reduce the size of the APK file without affecting its quality or functionality. Some examples of such apps are RAR and WinZip.
    • -
    • Scan the APK file for viruses before sending it to others. Even if you created the APK file from a trusted source, it may still contain malware or viruses that can harm your device or data. You should always scan the APK file with an antivirus app before sending it to others. Some examples of such apps are Avast Antivirus and Kaspersky Mobile Antivirus.
    • -
    -

    Conclusion

    -

    In this article, we have explained what an APK file is, how to download and install it on your Android device, how to update and uninstall it, and how to create and share your own APK files. We hope you have learned something new and useful from this article.

    -

    APK files are a great way to access apps that are not available on the Google Play Store, get the latest updates before they are officially released, customize your apps with mods or hacks, backup your apps and data, and more. However, you also need to be careful about where you get your APK files from, how you install them on your device, and how you share them with others.

    -

    If you have any questions or feedback about this article or about APK files in general, please feel free to leave a comment below. We would love to hear from you!

    -

    FAQs

    -
      -
    • What does APK stand for?
      APK stands APK stands for Android Package Kit, which is the file format used by Android to distribute and install apps. APK files contain all the components of an app, such as the code, resources, assets, certificates, and manifest.
    • -
    • How do I open an APK file on my PC?
      You cannot open an APK file directly on your PC, as it is designed for Android devices. However, you can use an Android emulator or a virtual machine to run Android on your PC and then open the APK file. Some examples of Android emulators are BlueStacks, LDPlayer, and NoxPlayer. Some examples of virtual machines are VirtualBox, VMware Workstation Player, and Android Studio Emulator.
    • -
    • How do I extract or edit an APK file?
      You can extract or edit an APK file using a file compression app or a file editor app. A file compression app lets you unzip or decompress the APK file and access its contents, such as the code, resources, assets, certificates, and manifest. A file editor app lets you modify or change the contents of the APK file, such as the app name, icon, version, package name, permissions, resources, assets, and more. Some examples of file compression apps are RAR and WinZip. Some examples of file editor apps are APK Editor and APK Editor Pro.
    • -
    • Is it safe to download and install APK files?
      It depends on where you get your APK files from. If you download and install APK files from trusted and reputable sources, such as the developer's website or a third-party app store, then it is usually safe to do so. However, if you download and install APK files from unknown or shady sources, then it may not be safe to do so. Some APK files may contain malware or viruses that can harm your device or data. Therefore, you should always do some research before downloading any APK file from an unknown source. You should also scan the APK file with an antivirus app before installing it.
    • -
    • What are the benefits of downloading and installing APK files?
      There are many benefits of downloading and installing APK files on your Android device. Some of them are:

      -
        -
      • You can access apps that are not available on the Google Play Store, either because they are not approved by Google, they are region-restricted, they are in beta testing, or they are exclusive to other platforms.
      • -
      • You can get the latest updates before they are officially released on the Google Play Store, which may include new features, bug fixes, or performance improvements.
      • -
      • You can customize your apps with mods or hacks that may enhance your user experience, such as removing ads, unlocking premium features, adding cheats, or changing themes.
      • -
      • You can backup your apps and data in case you lose your device or need to reset it. You can also restore your apps and data from the backup if needed.
      • -
      • You can create and share your own APK files with others who may be interested in your skills, hobbies, or interests.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The etiquette of saying allah rehmet elesin to a grieving person.md b/spaces/congsaPfin/Manga-OCR/logs/The etiquette of saying allah rehmet elesin to a grieving person.md deleted file mode 100644 index 1856400c7e41adac343a3844a2ec679f9f1a707b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The etiquette of saying allah rehmet elesin to a grieving person.md +++ /dev/null @@ -1,126 +0,0 @@ - -

      What does "allah rehmet elesin" mean and how to respond to it?

      -

      If you have ever heard someone say "allah rehmet elesin" or seen it written on a condolence card, you may have wondered what it means and how to reply to it. In this article, we will explain the meaning, usage, and pronunciation of this phrase, as well as some common ways to respond to it in Turkish and other languages.

      -

      Introduction

      -

      "Allah rehmet elesin" is a Turkish phrase that means "may God have mercy on him/her". It is a common expression of sympathy and condolence for someone who has passed away or lost a loved one. It is similar to saying "rest in peace" or "may he/she rest in peace" in English.

      -

      allah rehmet elesin cavablari


      Download Filehttps://urlca.com/2uOcnb



      -

      What is "allah rehmet elesin"?

      -

      The phrase consists of three words: allah, rehmet, and elesin. Allah is the Arabic word for God, which is also used by Turkish Muslims. Rehmet is the Turkish spelling of rahmah, which means mercy, compassion, or forgiveness in Arabic. Elesin is the third-person singular optative form of the verb elemek, which means to have or possess in Turkish. So literally, the phrase means "may he/she have God's mercy".

      -

      When and why do people say it?

      -

      People say "allah rehmet elesin" when they hear about someone's death or when they visit a funeral or a grave. It is a way of expressing sorrow and respect for the deceased and their family. It is also a way of praying for the deceased's soul and asking God to forgive their sins and grant them paradise.

      -

      How to pronounce it correctly?

      -

      The phrase is pronounced as /ɑlɑː rɛhmɛt ɛlɛsin/ in Turkish phonetic alphabet. Here are some tips on how to pronounce each word:

      -

      allah rehmet elesin ne demek
      -allah rehmet elesin nasil yazilir
      -allah rehmet elesin dua
      -allah rehmet elesin mesajlari
      -allah rehmet elesin ingilizce
      -allah rehmet elesin arapca
      -allah rehmet elesin sozleri
      -allah rehmet elesin anlami
      -allah rehmet elesin resimli
      -allah rehmet elesin yazisi
      -allah rehmet elesin siiri
      -allah rehmet elesin kime denir
      -allah rehmet elesin cevabi
      -allah rehmet elesin video
      -allah rehmet elesin tureng
      -allah rehmet elesin facebook
      -allah rehmet elesin azerice
      -allah rehmet elesin kuran ayeti
      -allah rehmet elesin hadis
      -allah rehmet elesin almanca
      -allah rehmet elesin rusca
      -allah rehmet elesin fransizca
      -allah rehmet elesin farsca
      -allah rehmet elesin turkce
      -allah rehmet elesin kurtce
      -allah rehmet elesin ermenice
      -allah rehmet elesin yunanca
      -allah rehmet elesin ispanyolca
      -allah rehmet elesin italyanca
      -allah rehmet elesin cince
      -allah rehmet elesin japonca
      -allah rehmet elesin hintce
      -allah rehmet elesin urduca
      -allah rehmet elesin bengalce
      -allah rehmet elesin arnavutca
      -allah rehmet elesin bosnaca
      -allah rehmet elesin bulgarca
      -allah rehmet elesin hirvatca
      -allah rehmet elesin macarca
      -allah rehmet elesin letonca
      -allah rehmet elesin litvanca
      -allah rehmet elesin makedonca
      -allah rehmet elesin moldovaca
      -allah rehmet elesin polonca
      -allah rehmet elesin romence
      -allah rehmet elesin slovenca
      -allah rehmet elesin sırpça

      -
        -
      • Allah: The first syllable is stressed and has a long /ɑː/ sound. The second syllable has a short /ɑ/ sound. The /l/ sound is pronounced with the tip of the tongue touching the upper teeth.
      • -
      • Rehmet: The first syllable is stressed and has a short /ɛ/ sound. The second syllable has a short /m/ sound followed by a silent /e/. The final syllable has a short /t/ sound.
      • -
      • Elesin: The first syllable is unstressed and has a short /ɛ/ sound. The second syllable is stressed and has a long /e/ sound. The final syllable has a short /n/ sound.
      • -
      -

      You can listen to the audio pronunciation of the phrase here.

      -

      How to respond to "allah rehmet elesin"?

      -

      There are different ways to respond to "allah rehmet elesin" depending on your language and religion. Here are some common responses in Turkish and other languages:

      -

      Common responses in Turkish

      -

      If you are also a Turkish speaker and a Muslim, you can reply with one of these phrases:

      -

      Amin

      -

      This is the simplest and most common response. It means "amen" or "so be it" in Arabic. It shows that you agree with the prayer and wish the same for the deceased.

      -

      Allah sizlere de rahmet eylesin

      -

      This means "may God have mercy on you too" in Turkish. It is a polite and respectful way of expressing your sympathy and gratitude to the bereaved family. It implies that you are also praying for them and their well-being.

      -

      Allah sabır versin

      -

      This means "may God give you patience" in Turkish. It is a way of comforting and supporting the grieving family. It implies that you are acknowledging their pain and sorrow and wishing them strength and endurance.

      -

      Common responses in other languages

      -

      If you are not a Turkish speaker or a Muslim, you can still reply with some phrases that convey your condolences and respect in your own language. Here are some examples:

      -

      English

      -

      You can say "I'm sorry for your loss" or "my condolences" in English. These are simple and sincere ways of expressing your sympathy and compassion. You can also add "may he/she rest in peace" or "may his/her soul be at peace" if you want to acknowledge their faith and pray for the deceased.

      -

      Arabic

      -

      You can say "inna lillahi wa inna ilayhi raji'un" or "إِنَّا لِلَّٰهِ وَإِنَّا إِلَيْهِ رَاجِعُونَ" in Arabic. This means "we belong to God and to Him we shall return" in Arabic. It is a verse from the Quran that Muslims recite when they hear about someone's death or face any hardship. It shows that you accept God's will and trust His wisdom.

      -

      Persian

      -

      You can say "khoda rahmatash konad" or "خدا رحمتش کند" in Persian. This means "may God have mercy on him/her" in Persian. It is a common expression of condolence and prayer for the deceased. You can also add "sabre jameel baashid" or "صبر جمیل باشید" which means "be patient and beautiful" in Persian. It is a way of encouraging and consoling the bereaved family.

      -

      Conclusion

      -

      "Allah rehmet elesin" is a phrase that you may encounter in Turkish culture and religion. It means "may God have mercy on him/her" and it is used to express sympathy and condolence for someone who has passed away or lost a loved one. There are different ways to respond to it depending on your language and religion, but the most important thing is to show your respect and compassion for the deceased and their family.

      -

      Summary of the main points

      -
        -
      • "Allah rehmet elesin" means "may God have mercy on him/her" in Turkish.
      • -
      • It is a common expression of sympathy and condolence for someone who has died or lost a loved one.
      • -
      • It is pronounced as /ɑlɑː rɛhmɛt ɛlɛsin/ in Turkish phonetic alphabet.
      • -
      • Some common responses in Turkish are "amin", "allah sizlere de rahmet eylesin", and "allah sabır versin".
      • -
      • Some common responses in other languages are "I'm sorry for your loss", "inna lillahi wa inna ilayhi raji'un", and "khoda rahmatash konad".
      • -
      -

      Call to action

      -

      If you want to learn more about Turkish culture and language, you can visit our website here. We offer online courses, podcasts, videos, and articles that will help you master Turkish in no time. You can also subscribe to our newsletter to get the latest updates and tips on learning Turkish. Don't miss this opportunity to enrich your knowledge and skills with our expert teachers and resources.

      -

      Frequently Asked Questions

      -
        -
      1. What is the difference between "allah rehmet elesin" and "allah rahmet eylesin"?
      2. -

        They are both correct ways of saying "may God have mercy on him/her" in Turkish, but they have slightly different spellings and pronunciations. The first one uses the word rehmet, which is the Turkish spelling of rahmah, while the second one uses the word rahmet, which is the original Arabic spelling of rahmah. The first one also uses the verb elesin, which is the optative form of the verb elemek, while the second one uses the verb eylesin, which is the optative form of the verb eylemek. Both verbs mean to have or possess in Turkish, but elesin is more commonly used in this context. The pronunciation of the first one is /ɑlɑː rɛhmɛt ɛlɛsin/, while the pronunciation of the second one is /ɑlɑː rɑhmɛt ɛjlɛsin/.

        -
      3. What are some other ways to express condolences in Turkish?
      4. -

        Some other ways to express condolences in Turkish are:

        -
          -
        • "Başınız sağolsun": This means "may your head be healthy" in Turkish. It is a traditional and formal way of expressing sympathy and respect for the bereaved family. It implies that you wish them good health and recovery from their grief.
        • -
        • "Mekanı cennet olsun": This means "may his/her place be paradise" in Turkish. It is a religious and hopeful way of praying for the deceased and their afterlife. It implies that you believe that they will enter heaven and enjoy eternal bliss.
        • -
        • "Acınızı paylaşıyorum": This means "I share your pain" in Turkish. It is a personal and empathetic way of showing your support and solidarity for the grieving family. It implies that you understand their feelings and emotions and that you are there for them.
        • -
        -
      5. How do you say "rest in peace" in Turkish?
      6. -

        The most common way to say "rest in peace" in Turkish is "huzur içinde yatsın". It literally means "may he/she lie in peace". It is a respectful and peaceful way of wishing the deceased a calm and serene rest. You can also say "nur içinde yatsın", which means "may he/she lie in light". It is a more spiritual and radiant way of wishing the deceased a bright and blissful rest.

        -
      7. How do you write a condolence card in Turkish?
      8. -

        If you want to write a condolence card in Turkish, you can follow these steps:

        -
          -
        1. Start with a salutation, such as "Sevgili ..." (Dear ...) or "Sayın ..." (Mr./Mrs./Ms.). Use the name or title of the person or family you are addressing.
        2. -
        3. Express your condolences, such as "Allah rehmet elesin", "Başınız sağolsun", or "Acınızı paylaşıyorum". You can also add some personal words of sympathy and comfort.
        4. -
        5. End with a closing, such as "Saygılarımla" (With my respects), "Sevgilerimle" (With my love), or "Dua ediyorum" (I am praying). Use your name or signature.
        6. -
        -
      9. What are some cultural tips to remember when attending a funeral or visiting a grave in Turkey?
      10. -

        Some cultural tips to remember when attending a funeral or visiting a grave in Turkey are:

        -
          -
        • Dress modestly and conservatively. Avoid wearing bright colors, flashy accessories, or revealing clothes. Wear black or dark colors as a sign of mourning.
        • -
        • Bring some flowers, preferably white ones, as a gesture of respect and tribute. You can also donate some money to a charity or mosque in the name of the deceased.
        • -
        • Greet the bereaved family with a handshake or a hug, depending on your relationship and familiarity. Say some words of condolence, such as "Allah rehmet elesin" or "Başınız sağolsun". Avoid making jokes, laughing, or talking about trivial matters.
        • -
        • Follow the customs and rituals of the funeral service, such as reciting some verses from the Quran, praying for the deceased, or joining the burial procession. Respect the religious beliefs and practices of the family and the community.
        • -
        • Visit the grave regularly, especially on religious holidays, anniversaries, or special occasions. Pray for the deceased, clean the tombstone, light a candle, or leave some flowers.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Caddie 9 Dongle Crackl.md b/spaces/contluForse/HuggingGPT/assets/Caddie 9 Dongle Crackl.md deleted file mode 100644 index b599dcd647414b8933dec596fe1c11774b776f8a..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Caddie 9 Dongle Crackl.md +++ /dev/null @@ -1,10 +0,0 @@ - -

        Breakout the Caddie dongle from its plastic case and connect the USB interface, and run the Caddie Setup Wizard to configure your dongle settings and activate your license.

        Note: If you have an older operating system, it may not recognise the USB interface, in that case run the setup wizard in Safe Mode (alt-ctrl-delete) to configure the dongle.

        -

        Caddie 9 Dongle Crackl


        Download Zip >>> https://ssurll.com/2uzxDs



        -

        Setup the software on your computer using your network credentials, then launch the Sentinel admin page.

        On the left, you should be presented with a list of active license details for your computer and/or linked Caddie dongle.

        -

        If you are prompted for a serial number, then the serial number for your Caddie dongle is not saved on the Sentinel admin control center page. If you are connected to the Internet when you try to run Caddie, you should be able to browse to http://localhost/ or another IP address on your network. If you are unable to browse, please follow the link on the PDF below for information on reporting a software issue.

        -

        Complete the license validation check on the Sentinel admin page. If this throws an exception, then you might have a problem with you main dongle. If Caddie does not launch, you can try uninstalling the software and then installing it again.

        -

        -

        You can fix the Caddie License Error 0001 on you Caddie dongle. This is done by resetting the password to the manufacturer s unique serial number.

        Note: The manufacturer s serial number is displayed on the Sentinel admin page as part of a long string of characters. If you have a Caddie dongle that is not receiving any service updates, then this sequence of characters should be displayed.

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/layers/patch_embed.py b/spaces/cooelf/Multimodal-CoT/timm/models/layers/patch_embed.py deleted file mode 100644 index 42997fb89f10d518028e064c46387f694dce9026..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/layers/patch_embed.py +++ /dev/null @@ -1,39 +0,0 @@ -""" Image to Patch Embedding using Conv2d - -A convolution based approach to patchifying a 2D image w/ embedding projection. - -Based on the impl in https://github.com/google-research/vision_transformer - -Hacked together by / Copyright 2020 Ross Wightman -""" - -from torch import nn as nn - -from .helpers import to_2tuple - - -class PatchEmbed(nn.Module): - """ 2D Image to Patch Embedding - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - self.img_size = img_size - self.patch_size = patch_size - self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) - self.num_patches = self.grid_size[0] * self.grid_size[1] - self.flatten = flatten - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() - - def forward(self, x): - B, C, H, W = x.shape - assert H == self.img_size[0] and W == self.img_size[1], \ - f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - x = self.proj(x) - if self.flatten: - x = x.flatten(2).transpose(1, 2) # BCHW -> BNC - x = self.norm(x) - return x diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/resnest.py b/spaces/cooelf/Multimodal-CoT/timm/models/resnest.py deleted file mode 100644 index 31eebd8092a75e949a7592833f00f05c0a5a9be7..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/resnest.py +++ /dev/null @@ -1,237 +0,0 @@ -""" ResNeSt Models - -Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 - -Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang - -Modified for torchscript compat, and consistency with timm by Ross Wightman -""" -import torch -from torch import nn - -from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from .helpers import build_model_with_cfg -from .layers import SplitAttn -from .registry import register_model -from .resnet import ResNet - - -def _cfg(url='', **kwargs): - return { - 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'conv1.0', 'classifier': 'fc', - **kwargs - } - -default_cfgs = { - 'resnest14d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth'), - 'resnest26d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth'), - 'resnest50d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth'), - 'resnest101e': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth', - input_size=(3, 256, 256), pool_size=(8, 8)), - 'resnest200e': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth', - input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), - 'resnest269e': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth', - input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), - 'resnest50d_4s2x40d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth', - interpolation='bicubic'), - 'resnest50d_1s4x24d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth', - interpolation='bicubic') -} - - -class ResNestBottleneck(nn.Module): - """ResNet Bottleneck - """ - # pylint: disable=unused-argument - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, - radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, - reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, - attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): - super(ResNestBottleneck, self).__init__() - assert reduce_first == 1 # not supported - assert attn_layer is None # not supported - assert aa_layer is None # TODO not yet supported - assert drop_path is None # TODO not yet supported - - group_width = int(planes * (base_width / 64.)) * cardinality - first_dilation = first_dilation or dilation - if avd and (stride > 1 or is_first): - avd_stride = stride - stride = 1 - else: - avd_stride = 0 - self.radix = radix - self.drop_block = drop_block - - self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) - self.bn1 = norm_layer(group_width) - self.act1 = act_layer(inplace=True) - self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None - - if self.radix >= 1: - self.conv2 = SplitAttn( - group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, - dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_block=drop_block) - self.bn2 = nn.Identity() - self.act2 = nn.Identity() - else: - self.conv2 = nn.Conv2d( - group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, - dilation=first_dilation, groups=cardinality, bias=False) - self.bn2 = norm_layer(group_width) - self.act2 = act_layer(inplace=True) - self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None - - self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) - self.bn3 = norm_layer(planes*4) - self.act3 = act_layer(inplace=True) - self.downsample = downsample - - def zero_init_last_bn(self): - nn.init.zeros_(self.bn3.weight) - - def forward(self, x): - shortcut = x - - out = self.conv1(x) - out = self.bn1(out) - if self.drop_block is not None: - out = self.drop_block(out) - out = self.act1(out) - - if self.avd_first is not None: - out = self.avd_first(out) - - out = self.conv2(out) - out = self.bn2(out) - if self.drop_block is not None: - out = self.drop_block(out) - out = self.act2(out) - - if self.avd_last is not None: - out = self.avd_last(out) - - out = self.conv3(out) - out = self.bn3(out) - if self.drop_block is not None: - out = self.drop_block(out) - - if self.downsample is not None: - shortcut = self.downsample(x) - - out += shortcut - out = self.act3(out) - return out - - -def _create_resnest(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - ResNet, variant, pretrained, - default_cfg=default_cfgs[variant], - **kwargs) - - -@register_model -def resnest14d(pretrained=False, **kwargs): - """ ResNeSt-14d model. Weights ported from GluonCV. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[1, 1, 1, 1], - stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest14d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest26d(pretrained=False, **kwargs): - """ ResNeSt-26d model. Weights ported from GluonCV. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[2, 2, 2, 2], - stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest26d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest50d(pretrained=False, **kwargs): - """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 6, 3], - stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest101e(pretrained=False, **kwargs): - """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 23, 3], - stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest101e', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest200e(pretrained=False, **kwargs): - """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 24, 36, 3], - stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest200e', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest269e(pretrained=False, **kwargs): - """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 - Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 30, 48, 8], - stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, - block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) - return _create_resnest('resnest269e', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest50d_4s2x40d(pretrained=False, **kwargs): - """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 6, 3], - stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, - block_args=dict(radix=4, avd=True, avd_first=True), **kwargs) - return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **model_kwargs) - - -@register_model -def resnest50d_1s4x24d(pretrained=False, **kwargs): - """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md - """ - model_kwargs = dict( - block=ResNestBottleneck, layers=[3, 4, 6, 3], - stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, - block_args=dict(radix=1, avd=True, avd_first=True), **kwargs) - return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **model_kwargs) diff --git a/spaces/cooelf/Multimodal-CoT/timm/scheduler/cosine_lr.py b/spaces/cooelf/Multimodal-CoT/timm/scheduler/cosine_lr.py deleted file mode 100644 index 1532f092b5cc8c0af5125967cfb84b32ce03ca4a..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/scheduler/cosine_lr.py +++ /dev/null @@ -1,116 +0,0 @@ -""" Cosine Scheduler - -Cosine LR schedule with warmup, cycle/restarts, noise. - -Hacked together by / Copyright 2020 Ross Wightman -""" -import logging -import math -import numpy as np -import torch - -from .scheduler import Scheduler - - -_logger = logging.getLogger(__name__) - - -class CosineLRScheduler(Scheduler): - """ - Cosine decay with restarts. - This is described in the paper https://arxiv.org/abs/1608.03983. - - Inspiration from - https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py - """ - - def __init__(self, - optimizer: torch.optim.Optimizer, - t_initial: int, - t_mul: float = 1., - lr_min: float = 0., - decay_rate: float = 1., - warmup_t=0, - warmup_lr_init=0, - warmup_prefix=False, - cycle_limit=0, - t_in_epochs=True, - noise_range_t=None, - noise_pct=0.67, - noise_std=1.0, - noise_seed=42, - initialize=True) -> None: - super().__init__( - optimizer, param_group_field="lr", - noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, - initialize=initialize) - - assert t_initial > 0 - assert lr_min >= 0 - if t_initial == 1 and t_mul == 1 and decay_rate == 1: - _logger.warning("Cosine annealing scheduler will have no effect on the learning " - "rate since t_initial = t_mul = eta_mul = 1.") - self.t_initial = t_initial - self.t_mul = t_mul - self.lr_min = lr_min - self.decay_rate = decay_rate - self.cycle_limit = cycle_limit - self.warmup_t = warmup_t - self.warmup_lr_init = warmup_lr_init - self.warmup_prefix = warmup_prefix - self.t_in_epochs = t_in_epochs - if self.warmup_t: - self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] - super().update_groups(self.warmup_lr_init) - else: - self.warmup_steps = [1 for _ in self.base_values] - - def _get_lr(self, t): - if t < self.warmup_t: - lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] - else: - if self.warmup_prefix: - t = t - self.warmup_t - - if self.t_mul != 1: - i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul)) - t_i = self.t_mul ** i * self.t_initial - t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial - else: - i = t // self.t_initial - t_i = self.t_initial - t_curr = t - (self.t_initial * i) - - gamma = self.decay_rate ** i - lr_min = self.lr_min * gamma - lr_max_values = [v * gamma for v in self.base_values] - - if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit): - lrs = [ - lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values - ] - else: - lrs = [self.lr_min for _ in self.base_values] - - return lrs - - def get_epoch_values(self, epoch: int): - if self.t_in_epochs: - return self._get_lr(epoch) - else: - return None - - def get_update_values(self, num_updates: int): - if not self.t_in_epochs: - return self._get_lr(num_updates) - else: - return None - - def get_cycle_length(self, cycles=0): - if not cycles: - cycles = self.cycle_limit - cycles = max(1, cycles) - if self.t_mul == 1.0: - return self.t_initial * cycles - else: - return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul))) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/pycocotools/mask.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/pycocotools/mask.py deleted file mode 100644 index 85a5643aadd5c3c5f02609aa918c38d6da14a929..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/pycocotools/mask.py +++ /dev/null @@ -1,107 +0,0 @@ -__author__ = 'tsungyi' - -# import annotator.oneformer.pycocotools._mask as _mask - -# Interface for manipulating masks stored in RLE format. -# -# RLE is a simple yet efficient format for storing binary masks. RLE -# first divides a vector (or vectorized image) into a series of piecewise -# constant regions and then for each piece simply stores the length of -# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would -# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1] -# (note that the odd counts are always the numbers of zeros). Instead of -# storing the counts directly, additional compression is achieved with a -# variable bitrate representation based on a common scheme called LEB128. -# -# Compression is greatest given large piecewise constant regions. -# Specifically, the size of the RLE is proportional to the number of -# *boundaries* in M (or for an image the number of boundaries in the y -# direction). Assuming fairly simple shapes, the RLE representation is -# O(sqrt(n)) where n is number of pixels in the object. Hence space usage -# is substantially lower, especially for large simple objects (large n). -# -# Many common operations on masks can be computed directly using the RLE -# (without need for decoding). This includes computations such as area, -# union, intersection, etc. All of these operations are linear in the -# size of the RLE, in other words they are O(sqrt(n)) where n is the area -# of the object. Computing these operations on the original mask is O(n). -# Thus, using the RLE can result in substantial computational savings. -# -# The following API functions are defined: -# encode - Encode binary masks using RLE. -# decode - Decode binary masks encoded via RLE. -# merge - Compute union or intersection of encoded masks. -# iou - Compute intersection over union between masks. -# area - Compute area of encoded masks. -# toBbox - Get bounding boxes surrounding encoded masks. -# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask. -# -# Usage: -# Rs = encode( masks ) -# masks = decode( Rs ) -# R = merge( Rs, intersect=false ) -# o = iou( dt, gt, iscrowd ) -# a = area( Rs ) -# bbs = toBbox( Rs ) -# Rs = frPyObjects( [pyObjects], h, w ) -# -# In the API the following formats are used: -# Rs - [dict] Run-length encoding of binary masks -# R - dict Run-length encoding of binary mask -# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order) -# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore -# bbs - [nx4] Bounding box(es) stored as [x y w h] -# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list) -# dt,gt - May be either bounding boxes or encoded masks -# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel). -# -# Finally, a note about the intersection over union (iou) computation. -# The standard iou of a ground truth (gt) and detected (dt) object is -# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt)) -# For "crowd" regions, we use a modified criteria. If a gt object is -# marked as "iscrowd", we allow a dt to match any subregion of the gt. -# Choosing gt' in the crowd gt that best matches the dt can be done using -# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing -# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt) -# For crowd gt regions we use this modified criteria above for the iou. -# -# To compile run "python setup.py build_ext --inplace" -# Please do not contact us for help with compiling. -# -# Microsoft COCO Toolbox. version 2.0 -# Data, paper, and tutorials available at: http://mscoco.org/ -# Code written by Piotr Dollar and Tsung-Yi Lin, 2015. -# Licensed under the Simplified BSD License [see coco/license.txt] - -# iou = _mask.iou -# merge = _mask.merge -# frPyObjects = _mask.frPyObjects - -def encode(bimask): - pass - # if len(bimask.shape) == 3: - # return _mask.encode(bimask) - # elif len(bimask.shape) == 2: - # h, w = bimask.shape - # return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0] - -def decode(rleObjs): - pass - # if type(rleObjs) == list: - # return _mask.decode(rleObjs) - # else: - # return _mask.decode([rleObjs])[:,:,0] - -def area(rleObjs): - pass - # if type(rleObjs) == list: - # return _mask.area(rleObjs) - # else: - # return _mask.area([rleObjs])[0] - -def toBbox(rleObjs): - pass - # if type(rleObjs) == list: - # return _mask.toBbox(rleObjs) - # else: - # return _mask.toBbox([rleObjs])[0] \ No newline at end of file diff --git a/spaces/danterivers/music-generation-samples/tests/data/test_audio_dataset.py b/spaces/danterivers/music-generation-samples/tests/data/test_audio_dataset.py deleted file mode 100644 index b69c9c397830738b73d6c229009f84b867cda801..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/tests/data/test_audio_dataset.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from functools import partial -from itertools import product -import json -import math -import os -import random -import typing as tp - -import pytest -import torch -from torch.utils.data import DataLoader - -from audiocraft.data.audio_dataset import ( - AudioDataset, - AudioMeta, - _get_audio_meta, - load_audio_meta, - save_audio_meta -) -from audiocraft.data.zip import PathInZip - -from ..common_utils import TempDirMixin, get_white_noise, save_wav - - -class TestAudioMeta(TempDirMixin): - - def test_get_audio_meta(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(duration * sample_rate) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path('sample.wav') - save_wav(path, wav, sample_rate) - m = _get_audio_meta(path, minimal=True) - assert m.path == path, 'path does not match' - assert m.sample_rate == sample_rate, 'sample rate does not match' - assert m.duration == duration, 'duration does not match' - assert m.amplitude is None - assert m.info_path is None - - def test_save_audio_meta(self): - audio_meta = [ - AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')), - AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json')) - ] - empty_audio_meta = [] - for idx, meta in enumerate([audio_meta, empty_audio_meta]): - path = self.get_temp_path(f'data_{idx}_save.jsonl') - save_audio_meta(path, meta) - with open(path, 'r') as f: - lines = f.readlines() - read_meta = [AudioMeta.from_dict(json.loads(line)) for line in lines] - assert len(read_meta) == len(meta) - for m, read_m in zip(meta, read_meta): - assert m == read_m - - def test_load_audio_meta(self): - try: - import dora - except ImportError: - dora = None # type: ignore - - audio_meta = [ - AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')), - AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json')) - ] - empty_meta = [] - for idx, meta in enumerate([audio_meta, empty_meta]): - path = self.get_temp_path(f'data_{idx}_load.jsonl') - with open(path, 'w') as f: - for m in meta: - json_str = json.dumps(m.to_dict()) + '\n' - f.write(json_str) - read_meta = load_audio_meta(path) - assert len(read_meta) == len(meta) - for m, read_m in zip(meta, read_meta): - if dora: - m.path = dora.git_save.to_absolute_path(m.path) - assert m == read_m, f'original={m}, read={read_m}' - - -class TestAudioDataset(TempDirMixin): - - def _create_audio_files(self, - root_name: str, - num_examples: int, - durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.), - sample_rate: int = 16_000, - channels: int = 1): - root_dir = self.get_temp_dir(root_name) - for i in range(num_examples): - if isinstance(durations, float): - duration = durations - elif isinstance(durations, tuple) and len(durations) == 1: - duration = durations[0] - elif isinstance(durations, tuple) and len(durations) == 2: - duration = random.uniform(durations[0], durations[1]) - else: - assert False - n_frames = int(duration * sample_rate) - wav = get_white_noise(channels, n_frames) - path = os.path.join(root_dir, f'example_{i}.wav') - save_wav(path, wav, sample_rate) - return root_dir - - def _create_audio_dataset(self, - root_name: str, - total_num_examples: int, - durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.), - sample_rate: int = 16_000, - channels: int = 1, - segment_duration: tp.Optional[float] = None, - num_examples: int = 10, - shuffle: bool = True, - return_info: bool = False): - root_dir = self._create_audio_files(root_name, total_num_examples, durations, sample_rate, channels) - dataset = AudioDataset.from_path(root_dir, - minimal_meta=True, - segment_duration=segment_duration, - num_samples=num_examples, - sample_rate=sample_rate, - channels=channels, - shuffle=shuffle, - return_info=return_info) - return dataset - - def test_dataset_full(self): - total_examples = 10 - min_duration, max_duration = 1., 4. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), - sample_rate=sample_rate, channels=channels, segment_duration=None) - assert len(dataset) == total_examples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] <= int(max_duration * sample_rate) - assert sample.shape[1] >= int(min_duration * sample_rate) - - def test_dataset_segment(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - - def test_dataset_equal_audio_and_segment_durations(self): - total_examples = 1 - num_samples = 2 - audio_duration = 1. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - # the random seek_time adds variability on audio read - sample_1 = dataset[0] - sample_2 = dataset[1] - assert not torch.allclose(sample_1, sample_2) - - def test_dataset_samples(self): - total_examples = 1 - num_samples = 2 - audio_duration = 1. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - - create_dataset = partial( - self._create_audio_dataset, - 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, - ) - - dataset = create_dataset(shuffle=True) - # when shuffle = True, we have different inputs for the same index across epoch - sample_1 = dataset[0] - sample_2 = dataset[0] - assert not torch.allclose(sample_1, sample_2) - - dataset_noshuffle = create_dataset(shuffle=False) - # when shuffle = False, we have same inputs for the same index across epoch - sample_1 = dataset_noshuffle[0] - sample_2 = dataset_noshuffle[0] - assert torch.allclose(sample_1, sample_2) - - def test_dataset_return_info(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample, segment_info = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - assert segment_info.sample_rate == sample_rate - assert segment_info.total_frames == int(segment_duration * sample_rate) - assert segment_info.n_frames <= int(segment_duration * sample_rate) - assert segment_info.seek_time >= 0 - - def test_dataset_return_info_no_segment_duration(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = None - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - assert len(dataset) == total_examples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample, segment_info = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == segment_info.total_frames - assert segment_info.sample_rate == sample_rate - assert segment_info.n_frames <= segment_info.total_frames - - def test_dataset_collate_fn(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=False) - batch_size = 4 - dataloader = DataLoader( - dataset, - batch_size=batch_size, - num_workers=0 - ) - for idx, batch in enumerate(dataloader): - assert batch.shape[0] == batch_size - - @pytest.mark.parametrize("segment_duration", [1.0, None]) - def test_dataset_with_meta_collate_fn(self, segment_duration): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - batch_size = 4 - dataloader = DataLoader( - dataset, - batch_size=batch_size, - collate_fn=dataset.collater, - num_workers=0 - ) - for idx, batch in enumerate(dataloader): - wav, infos = batch - assert wav.shape[0] == batch_size - assert len(infos) == batch_size - - @pytest.mark.parametrize("segment_duration,sample_on_weight,sample_on_duration,a_hist,b_hist,c_hist", [ - [1, True, True, 0.5, 0.5, 0.0], - [1, False, True, 0.25, 0.5, 0.25], - [1, True, False, 0.666, 0.333, 0.0], - [1, False, False, 0.333, 0.333, 0.333], - [None, False, False, 0.333, 0.333, 0.333]]) - def test_sample_with_weight(self, segment_duration, sample_on_weight, sample_on_duration, a_hist, b_hist, c_hist): - random.seed(1234) - rng = torch.Generator() - rng.manual_seed(1234) - - def _get_histogram(dataset, repetitions=20_000): - counts = {file_meta.path: 0. for file_meta in meta} - for _ in range(repetitions): - file_meta = dataset.sample_file(rng) - counts[file_meta.path] += 1 - return {name: count / repetitions for name, count in counts.items()} - - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - dataset = AudioDataset( - meta, segment_duration=segment_duration, sample_on_weight=sample_on_weight, - sample_on_duration=sample_on_duration) - hist = _get_histogram(dataset) - assert math.isclose(hist['a'], a_hist, abs_tol=0.01) - assert math.isclose(hist['b'], b_hist, abs_tol=0.01) - assert math.isclose(hist['c'], c_hist, abs_tol=0.01) - - def test_meta_duration_filter_all(self): - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - try: - AudioDataset(meta, segment_duration=11, min_segment_ratio=1) - assert False - except AssertionError: - assert True - - def test_meta_duration_filter_long(self): - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - dataset = AudioDataset(meta, segment_duration=None, min_segment_ratio=1, max_audio_duration=7) - assert len(dataset) == 2 diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_t.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_t.py deleted file mode 100644 index 7f94677522e4b8b8a4e55c079f618e6046b045b8..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_t.py +++ /dev/null @@ -1,47 +0,0 @@ -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import sys -import array - - -class table__c_v_t(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - values = array.array("h") - values.frombytes(data) - if sys.byteorder != "big": - values.byteswap() - self.values = values - - def compile(self, ttFont): - values = self.values[:] - if sys.byteorder != "big": - values.byteswap() - return values.tobytes() - - def toXML(self, writer, ttFont): - for i in range(len(self.values)): - value = self.values[i] - writer.simpletag("cv", value=value, index=i) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "values"): - self.values = array.array("h") - if name == "cv": - index = safeEval(attrs["index"]) - value = safeEval(attrs["value"]) - for i in range(1 + index - len(self.values)): - self.values.append(0) - self.values[index] = value - - def __len__(self): - return len(self.values) - - def __getitem__(self, index): - return self.values[index] - - def __setitem__(self, index, value): - self.values[index] = value - - def __delitem__(self, index): - del self.values[index] diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-80499d89.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-80499d89.js deleted file mode 100644 index 0c3827fb658d68902cc009b07dbe96ab90d00f4e..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-80499d89.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as v,e as C,s as k,m as H,F as I,g as f,K as S,h as K,G as w,w as r,u as d,k as M,H as j,C as z,at as J,a1 as T,D as L,a4 as N,a9 as D,ab as E,ac as q,ad as A,E as O}from"./index-39fce9e2.js";import{T as P}from"./StaticTabs-26fecbee.js";import{S as Q}from"./StaticColumn-ab6a4f96.js";function R(a){let e;const n=a[8].default,t=D(n,a,a[9],null);return{c(){t&&t.c()},m(s,l){t&&t.m(s,l),e=!0},p(s,l){t&&t.p&&(!e||l&512)&&E(t,n,s,s[9],e?A(n,s[9],l,null):q(s[9]),null)},i(s){e||(r(t,s),e=!0)},o(s){d(t,s),e=!1},d(s){t&&t.d(s)}}}function U(a){let e,n,t,s;return n=new Q({props:{$$slots:{default:[R]},$$scope:{ctx:a}}}),{c(){e=H("div"),I(n.$$.fragment),f(e,"id",a[0]),f(e,"class",t="tabitem "+a[1].join(" ")+" svelte-19hvt5v"),S(e,"display",a[3]===a[2]?"block":"none")},m(l,c){K(l,e,c),w(n,e,null),s=!0},p(l,[c]){const _={};c&512&&(_.$$scope={dirty:c,ctx:l}),n.$set(_),(!s||c&1)&&f(e,"id",l[0]),(!s||c&2&&t!==(t="tabitem "+l[1].join(" ")+" svelte-19hvt5v"))&&f(e,"class",t),c&12&&S(e,"display",l[3]===l[2]?"block":"none")},i(l){s||(r(n.$$.fragment,l),s=!0)},o(l){d(n.$$.fragment,l),s=!1},d(l){l&&M(e),j(n)}}}function V(a,e,n){let t,s,{$$slots:l={},$$scope:c}=e,{elem_id:_=""}=e,{elem_classes:u=[]}=e,{name:o}=e,{id:i={}}=e;const B=z(),{register_tab:F,unregister_tab:G,selected_tab:b,selected_tab_index:g}=J(P);T(a,b,m=>n(3,s=m)),T(a,g,m=>n(7,t=m));let h=F({name:o,id:i});return L(()=>()=>G({name:o,id:i})),a.$$set=m=>{"elem_id"in m&&n(0,_=m.elem_id),"elem_classes"in m&&n(1,u=m.elem_classes),"name"in m&&n(6,o=m.name),"id"in m&&n(2,i=m.id),"$$scope"in m&&n(9,c=m.$$scope)},a.$$.update=()=>{a.$$.dirty&192&&t===h&&N().then(()=>B("select",{value:o,index:h}))},[_,u,i,s,b,g,o,t,l,c]}class W extends v{constructor(e){super(),C(this,e,V,U,k,{elem_id:0,elem_classes:1,name:6,id:2})}}function X(a){let e;const n=a[4].default,t=D(n,a,a[6],null);return{c(){t&&t.c()},m(s,l){t&&t.m(s,l),e=!0},p(s,l){t&&t.p&&(!e||l&64)&&E(t,n,s,s[6],e?A(n,s[6],l,null):q(s[6]),null)},i(s){e||(r(t,s),e=!0)},o(s){d(t,s),e=!1},d(s){t&&t.d(s)}}}function Y(a){let e,n;return e=new W({props:{elem_id:a[0],elem_classes:a[1],name:a[2],id:a[3],$$slots:{default:[X]},$$scope:{ctx:a}}}),e.$on("select",a[5]),{c(){I(e.$$.fragment)},m(t,s){w(e,t,s),n=!0},p(t,[s]){const l={};s&1&&(l.elem_id=t[0]),s&2&&(l.elem_classes=t[1]),s&4&&(l.name=t[2]),s&8&&(l.id=t[3]),s&64&&(l.$$scope={dirty:s,ctx:t}),e.$set(l)},i(t){n||(r(e.$$.fragment,t),n=!0)},o(t){d(e.$$.fragment,t),n=!1},d(t){j(e,t)}}}function Z(a,e,n){let{$$slots:t={},$$scope:s}=e,{elem_id:l=""}=e,{elem_classes:c=[]}=e,{label:_}=e,{id:u}=e;function o(i){O.call(this,a,i)}return a.$$set=i=>{"elem_id"in i&&n(0,l=i.elem_id),"elem_classes"in i&&n(1,c=i.elem_classes),"label"in i&&n(2,_=i.label),"id"in i&&n(3,u=i.id),"$$scope"in i&&n(6,s=i.$$scope)},[l,c,_,u,t,o,s]}class y extends v{constructor(e){super(),C(this,e,Z,Y,k,{elem_id:0,elem_classes:1,label:2,id:3})}}const ee=y,te=["static"];export{ee as Component,te as modes}; -//# sourceMappingURL=index-80499d89.js.map diff --git a/spaces/diacanFperku/AutoGPT/Basic And Clinical Pharmacology Katzung Test Bank.md b/spaces/diacanFperku/AutoGPT/Basic And Clinical Pharmacology Katzung Test Bank.md deleted file mode 100644 index ad844d354200654398982c9394e59f19e86d9c17..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Basic And Clinical Pharmacology Katzung Test Bank.md +++ /dev/null @@ -1,108 +0,0 @@ - -

        Basic And Clinical Pharmacology Katzung Test Bank: A Guide for Students and Practitioners

        - -

        Pharmacology is the study of how drugs affect the body and how the body affects drugs. It is a vital subject for anyone who wants to understand the mechanisms of action, therapeutic uses, adverse effects, and interactions of drugs. Pharmacology is also essential for preparing for exams and board certifications in various health professions.

        -

        Basic And Clinical Pharmacology Katzung Test Bank


        Download File ===== https://gohhs.com/2uFSRB



        - -

        However, pharmacology can be challenging to learn and master, especially with the vast amount of information and the complexity of the concepts involved. That is why many students and practitioners rely on Basic And Clinical Pharmacology Katzung Test Bank as a valuable resource for studying and reviewing pharmacology.

        - -

        What is Basic And Clinical Pharmacology Katzung Test Bank?

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is a collection of multiple-choice questions and answers that accompany the textbook Basic And Clinical Pharmacology by Bertram Katzung, Anthony Trevor, and colleagues. The textbook is one of the most widely used and authoritative pharmacology books in the world, covering all aspects of pharmacology from basic principles to clinical applications.

        - -

        The test bank provides over 800 questions that cover all chapters and sections of the textbook, as well as additional questions on special topics such as pharmacogenomics, biotechnology, and drug development. The questions are designed to test your knowledge, comprehension, application, analysis, synthesis, and evaluation of pharmacology. The answers are accompanied by detailed explanations and references to help you understand the rationale behind each choice.

        - -

        How can Basic And Clinical Pharmacology Katzung Test Bank help you?

        - -

        Basic And Clinical Pharmacology Katzung Test Bank can help you in many ways, such as:

        - -
          -
        • Reinforcing your learning and retention of pharmacology concepts and facts.
        • -
        • Identifying your strengths and weaknesses in pharmacology.
        • -
        • Improving your critical thinking and problem-solving skills in pharmacology.
        • -
        • Preparing for exams and board reviews in pharmacology.
        • -
        • Staying updated with the latest developments and trends in pharmacology.
        • -
        - -

        Basic And Clinical Pharmacology Katzung Test Bank can be used as a standalone study tool or as a complement to the textbook. You can use it to review each chapter after reading it, to test yourself before an exam, or to practice for a board certification. You can also use it to refresh your knowledge or to learn new topics in pharmacology.

        - -

        Where can you get Basic And Clinical Pharmacology Katzung Test Bank?

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is available online from various sources, such as:

        -

        - -
          -
        • The official website of the publisher McGraw Hill Medical.
        • -
        • The online platform AccessPharmacy that features trusted pharmacy content from McGraw Hill.
        • -
        • The online marketplace Stuvia that offers study materials from students and professionals.
        • -
        • The online document sharing platform Issuu that allows you to read and download documents.
        • -
        - -

        You can access these sources by searching for Basic And Clinical Pharmacology Katzung Test Bank on your preferred web browser. You may need to create an account or pay a fee to access some of these sources. You may also find other sources that offer Basic And Clinical Pharmacology Katzung Test Bank, but make sure they are reliable and trustworthy before using them.

        - -

        Conclusion

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is a useful resource for anyone who wants to learn and review pharmacology. It can help you improve your knowledge, skills, and confidence in pharmacology. It can also help you prepare for exams and board certifications in pharmacology. If you are looking for a comprehensive, authoritative, and up-to-date pharmacology test bank, you should consider Basic And Clinical Pharmacology Katzung Test Bank.

        -

        How to use Basic And Clinical Pharmacology Katzung Test Bank effectively?

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is not a substitute for reading and understanding the textbook. It is a supplement that can help you reinforce and apply what you have learned from the textbook. To use it effectively, you should follow these tips:

        - -
          -
        • Read the textbook chapter before attempting the test bank questions. This will help you familiarize yourself with the concepts and terms covered in the chapter.
        • -
        • Answer the test bank questions without looking at the textbook or the answers. This will help you assess your own knowledge and identify your gaps.
        • -
        • Check your answers and read the explanations carefully. This will help you correct your mistakes and learn from them.
        • -
        • Review the textbook sections that correspond to the questions you missed or were unsure about. This will help you reinforce your understanding and fill in your gaps.
        • -
        • Repeat the process until you are confident that you have mastered the chapter.
        • -
        - -

        Basic And Clinical Pharmacology Katzung Test Bank can also be used as a self-assessment tool to monitor your progress and readiness for exams and board reviews. You can use it to:

        - -
          -
        • Test yourself on specific topics or chapters that you have studied.
        • -
        • Test yourself on a random selection of questions from different chapters to simulate a comprehensive exam.
        • -
        • Test yourself on questions that focus on special topics such as pharmacogenomics, biotechnology, and drug development.
        • -
        • Compare your performance with other students or practitioners who have used the test bank.
        • -
        • Identify your areas of strength and weakness in pharmacology.
        • -
        - -

        Conclusion

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is a useful resource for anyone who wants to learn and review pharmacology. It can help you improve your knowledge, skills, and confidence in pharmacology. It can also help you prepare for exams and board certifications in pharmacology. If you are looking for a comprehensive, authoritative, and up-to-date pharmacology test bank, you should consider Basic And Clinical Pharmacology Katzung Test Bank.

        -

        What are the benefits of Basic And Clinical Pharmacology Katzung Test Bank?

        - -

        Basic And Clinical Pharmacology Katzung Test Bank has many benefits for students and practitioners of pharmacology, such as:

        - -
          -
        • It covers all the topics and concepts that are relevant and important for pharmacology.
        • -
        • It provides a comprehensive and up-to-date review of pharmacology based on the latest evidence and guidelines.
        • -
        • It follows the format and style of the textbook, making it easy to correlate and integrate the information.
        • -
        • It offers a variety of questions that test different levels of cognitive skills and learning outcomes.
        • -
        • It provides detailed and clear explanations for each answer, along with references to the textbook and other sources.
        • -
        • It helps you develop your test-taking strategies and skills, such as time management, elimination, and reasoning.
        • -
        • It enhances your confidence and reduces your anxiety before exams and board reviews.
        • -
        - -

        Basic And Clinical Pharmacology Katzung Test Bank is not only a test bank, but also a learning tool that can help you achieve your academic and professional goals in pharmacology.

        - -

        How to get the most out of Basic And Clinical Pharmacology Katzung Test Bank?

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is a valuable resource that can help you learn and review pharmacology. However, to get the most out of it, you need to use it properly and effectively. Here are some tips on how to do that:

        - -
          -
        • Use it as a supplement, not a substitute. Do not rely solely on the test bank to study pharmacology. Read the textbook, attend lectures, take notes, do assignments, and participate in discussions. Use the test bank to reinforce and apply what you have learned from other sources.
        • -
        • Use it regularly, not sporadically. Do not wait until the last minute to use the test bank. Review each chapter as soon as you finish reading it. Review previous chapters periodically to refresh your memory. Review all chapters before an exam or a board review.
        • -
        • Use it actively, not passively. Do not just read the questions and answers. Try to answer the questions without looking at the answers. Think about why each answer is correct or incorrect. Compare your answers with the explanations. Look up any information that you are unsure about or want to learn more about.
        • -
        • Use it strategically, not randomly. Do not just answer any question that comes your way. Choose questions that are relevant and appropriate for your level and purpose. Focus on questions that test your weaknesses or challenge your strengths. Use questions that simulate the format and difficulty of your exams or board reviews.
        • -
        • Use it with others, not alone. Do not isolate yourself when using the test bank. Study with your classmates, friends, or mentors. Share your questions and answers with them. Discuss your reasoning and logic with them. Learn from their feedback and insights.
        • -
        - -

        Basic And Clinical Pharmacology Katzung Test Bank is a powerful resource that can help you learn and review pharmacology. However, you need to use it wisely and effectively to get the best results.

        - -

        Conclusion

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is a useful resource for anyone who wants to learn and review pharmacology. It can help you improve your knowledge, skills, and confidence in pharmacology. It can also help you prepare for exams and board certifications in pharmacology. If you are looking for a comprehensive, authoritative, and up-to-date pharmacology test bank, you should consider Basic And Clinical Pharmacology Katzung Test Bank.

        -

        Conclusion

        - -

        Basic And Clinical Pharmacology Katzung Test Bank is a useful resource for anyone who wants to learn and review pharmacology. It can help you improve your knowledge, skills, and confidence in pharmacology. It can also help you prepare for exams and board certifications in pharmacology. If you are looking for a comprehensive, authoritative, and up-to-date pharmacology test bank, you should consider Basic And Clinical Pharmacology Katzung Test Bank.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/diego2554/RemBG_super/rembg/sessions/base.py b/spaces/diego2554/RemBG_super/rembg/sessions/base.py deleted file mode 100644 index f6bbe5a0e0575eef049aa42592fb9e3c736bfc2d..0000000000000000000000000000000000000000 --- a/spaces/diego2554/RemBG_super/rembg/sessions/base.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -from typing import Dict, List, Tuple - -import numpy as np -import onnxruntime as ort -from PIL import Image -from PIL.Image import Image as PILImage - - -class BaseSession: - def __init__( - self, - model_name: str, - sess_opts: ort.SessionOptions, - providers=None, - *args, - **kwargs - ): - self.model_name = model_name - - self.providers = [] - - _providers = ort.get_available_providers() - if providers: - for provider in providers: - if provider in _providers: - self.providers.append(provider) - else: - self.providers.extend(_providers) - - self.inner_session = ort.InferenceSession( - str(self.__class__.download_models()), - providers=self.providers, - sess_options=sess_opts, - ) - - def normalize( - self, - img: PILImage, - mean: Tuple[float, float, float], - std: Tuple[float, float, float], - size: Tuple[int, int], - *args, - **kwargs - ) -> Dict[str, np.ndarray]: - im = img.convert("RGB").resize(size, Image.LANCZOS) - - im_ary = np.array(im) - im_ary = im_ary / np.max(im_ary) - - tmpImg = np.zeros((im_ary.shape[0], im_ary.shape[1], 3)) - tmpImg[:, :, 0] = (im_ary[:, :, 0] - mean[0]) / std[0] - tmpImg[:, :, 1] = (im_ary[:, :, 1] - mean[1]) / std[1] - tmpImg[:, :, 2] = (im_ary[:, :, 2] - mean[2]) / std[2] - - tmpImg = tmpImg.transpose((2, 0, 1)) - - return { - self.inner_session.get_inputs()[0] - .name: np.expand_dims(tmpImg, 0) - .astype(np.float32) - } - - def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]: - raise NotImplementedError - - @classmethod - def checksum_disabled(cls, *args, **kwargs): - return os.getenv("MODEL_CHECKSUM_DISABLED", None) is not None - - @classmethod - def u2net_home(cls, *args, **kwargs): - return os.path.expanduser( - os.getenv( - "U2NET_HOME", os.path.join(os.getenv("XDG_DATA_HOME", "~"), ".u2net") - ) - ) - - @classmethod - def download_models(cls, *args, **kwargs): - raise NotImplementedError - - @classmethod - def name(cls, *args, **kwargs): - raise NotImplementedError diff --git a/spaces/digitalxingtong/Taffy-Bert-VITS2/bert_gen.py b/spaces/digitalxingtong/Taffy-Bert-VITS2/bert_gen.py deleted file mode 100644 index 467655b2c4171608ad690fe7dec350db85f84f1b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Taffy-Bert-VITS2/bert_gen.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch -from torch.utils.data import DataLoader -from multiprocessing import Pool -import commons -import utils -from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate -from tqdm import tqdm -import warnings - -from text import cleaned_text_to_sequence, get_bert - -config_path = 'configs/config.json' -hps = utils.get_hparams_from_file(config_path) - -def process_line(line): - _id, spk, language_str, text, phones, tone, word2ph = line.strip().split("|") - phone = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - wav_path = f'{_id}' - - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - assert bert.shape[-1] == len(phone) - torch.save(bert, bert_path) - - -if __name__ == '__main__': - lines = [] - with open(hps.data.training_files, encoding='utf-8' ) as f: - lines.extend(f.readlines()) - - # with open(hps.data.validation_files, encoding='utf-8' ) as f: - # lines.extend(f.readlines()) - - with Pool(processes=2) as pool: #A100 40GB suitable config,if coom,please decrease the processess number. - for _ in tqdm(pool.imap_unordered(process_line, lines)): - pass diff --git a/spaces/digitalxingtong/Un-Bert-Vits2/text/tone_sandhi.py b/spaces/digitalxingtong/Un-Bert-Vits2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Un-Bert-Vits2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/dilums/sentence-similarity/next.config.js b/spaces/dilums/sentence-similarity/next.config.js deleted file mode 100644 index 5c6d77928025d79c01a484eccc050ff0a29e4d84..0000000000000000000000000000000000000000 --- a/spaces/dilums/sentence-similarity/next.config.js +++ /dev/null @@ -1,6 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - output: 'standalone', -} - -module.exports = nextConfig diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/default_runtime.py b/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/default_runtime.py deleted file mode 100644 index de7f9650ce73ba7ca633652b50df021b67498362..0000000000000000000000000000000000000000 --- a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/default_runtime.py +++ /dev/null @@ -1,17 +0,0 @@ -# yapf:disable -log_config = dict( - interval=5, - hooks=[ - dict(type='TextLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] - -# disable opencv multithreading to avoid system being overloaded -opencv_num_threads = 0 -# set multi-process start method as `fork` to speed up the training -mp_start_method = 'fork' diff --git a/spaces/dirge/voicevox/voicevox_engine/utility/core_version_utility.py b/spaces/dirge/voicevox/voicevox_engine/utility/core_version_utility.py deleted file mode 100644 index 25f2d3a3e7e7ed3a25e52075eb74be08c96451db..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/voicevox_engine/utility/core_version_utility.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Iterable - -from semver.version import Version - - -def parse_core_version(version: str) -> Version: - return Version.parse(version) - - -def get_latest_core_version(versions: Iterable[str]) -> str: - if len(versions) == 0: - raise Exception("versions must be non-empty.") - - return str(max(map(parse_core_version, versions))) diff --git a/spaces/dpe1/beat_manipulator/beat_manipulator/presets.py b/spaces/dpe1/beat_manipulator/beat_manipulator/presets.py deleted file mode 100644 index b621917e1931a44b4ad0c398b0129ffc757061fd..0000000000000000000000000000000000000000 --- a/spaces/dpe1/beat_manipulator/beat_manipulator/presets.py +++ /dev/null @@ -1,84 +0,0 @@ -from . import main, utils -BM_SAMPLES = {'cowbell' : 'beat_manipulator/samples/cowbell.flac', - } - -presets = {} - -def presets_load(path, mode = 'add'): - global presets - import yaml - with open(path, 'r') as f: - yaml_presets = yaml.safe_load(f.read()) - - # if mode.lower() == 'add': - # presets = presets | yaml_presets - # elif mode.lower() == 'replace': - presets = yaml_presets - -presets_load('beat_manipulator/presets.yaml') - -def _beatswap(song, pattern, pattern_name, scale = 1, shift = 0, output = '', modify = False): - if isinstance(scale, str): - if ',' in scale: scale = scale.replace(' ', '').split(',') - elif not isinstance(scale, list): scale = [scale] - if modify is False: - for i in scale: - main.beatswap(song, pattern = pattern, scale = i, shift = shift, output=output, suffix = f' ({pattern_name}{(" x"+str(round(utils._safer_eval(i), 4))) * (len(scale)>1)})', copy = True) - else: - assert isinstance(song, main.song), f"In order to modify a song, it needs to be of a main.song type, but it is {type(song)}" - song.beatswap(pattern, scale = scale[0], shift = shift) - return song - -def get(preset): - """returns (pattern, scale, shift)""" - global presets - assert preset in presets, f"{preset} not found in presets." - preset = presets[preset] - return preset['pattern'], preset['scale'] if 'scale' in preset else 1, preset['shift'] if 'shift' in preset else 0 - -def use(song, preset, output = '', scale = 1, shift = 0): - global presets - assert preset in presets, f"{preset} not found in presets." - preset_name = preset - preset = presets[preset] - if not isinstance(song, main.song): song = main.song(song) - if isinstance(list(preset.values())[0], dict): - for i in preset.values(): - if 'sample' in i: - pass - elif 'sidechain' in i: - pass - else: - song = _beatswap(song, pattern = i['pattern'], scale = scale*(i['scale'] if 'scale' in i else 1), shift = shift*(i['shift'] if 'shift' in i else 0), output = output, modify = True, pattern_name = preset_name) - song.write(output, suffix = f' ({preset})') - else: - if 'sample' in preset: - pass - elif 'sidechain' in preset: - pass - else: - _beatswap(song, pattern = preset['pattern'], scale = scale*(preset['scale'] if 'scale' in preset else 1), shift = shift*(preset['shift'] if 'shift' in preset else 0), output = output, modify = False, pattern_name = preset_name) - -def use_all(song, output = ''): - if not isinstance(song, main.song): song = main.song(song) - for key in presets.keys(): - print(f'__ {key} __') - use(song, key, output = output) - print() - -def test(song, scale = 1, shift = 0, adjust = 0, output = '', load_settings = False): - song = main.song(song) - song.beatmap_generate(load_settings = load_settings) - song.beatswap('test', scale = scale, shift = shift, adjust = 500+adjust) - song.write(output = output, suffix = ' (test)') - -def save(song, scale = 1, shift = 0, adjust = 0): - song = main.song(song) - song.beatmap_save_settings(scale = scale, shift = shift, adjust = adjust) - -def savetest(song, scale = 1, shift = 0, adjust = 0, output = '', load_settings = False): - song = main.song(song) - song.beatmap_generate(load_settings = load_settings) - song.beatswap('test', scale = scale, shift = shift, adjust = 500+adjust) - song.write(output = output, suffix = ' (test)') - song.beatmap_save_settings(scale = scale, shift = shift, adjust = adjust) \ No newline at end of file diff --git a/spaces/dperales/Fraud_Detection_Pycaret/README.md b/spaces/dperales/Fraud_Detection_Pycaret/README.md deleted file mode 100644 index 28829251f83732d09a3313518dd53266e2bec13f..0000000000000000000000000000000000000000 --- a/spaces/dperales/Fraud_Detection_Pycaret/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ITACA Fraud Detection -emoji: 🏃 -colorFrom: green -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: True ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/enesbol/case_dif/app.py b/spaces/enesbol/case_dif/app.py deleted file mode 100644 index faafc1767372271431cccec35b3d1626f587458d..0000000000000000000000000000000000000000 --- a/spaces/enesbol/case_dif/app.py +++ /dev/null @@ -1,203 +0,0 @@ -import streamlit as st -import os -import subprocess -from PIL import Image, ImageOps -import torch -from diffusers import StableDiffusionInpaintPipeline -import transformers -import cv2 -import diffusers -import accelerate -import warnings -import numpy as np -import os -import shutil -warnings.filterwarnings("ignore") -st.title('Background Generation') -st.write('This app generates new backgrounds for images.') -# set environment variable for dll -os.environ['KMP_DUPLICATE_LIB_OK']='True' - -@st.cache_data -def mode(width, height): - output_width = np.floor_divide(width, 8) * 8 - output_height = np.floor_divide(height, 8) * 8 - return output_width, output_height - -def get_prompt(): - prompt = st.text_input('Enter your prompt here:', placeholder="Imagine our perfume bottle amidst a lush garden, surrounded by blooming flowers and vibrant colors.") - return prompt - -def get_negative_prompt(): - negative_prompt = st.text_input('Enter your negative prompt here:', placeholder="low quality, out of frame, watermark.. etc.") - return negative_prompt - -def get_user_input(): - st.subheader("Upload an image file, Press Clean Background Button.") - uploaded_file = st.file_uploader("Upload a JPG image file", type=["jpg", "jpeg"]) - - if uploaded_file is not None: - user_file_path = os.path.join("data/custom_dataset/", uploaded_file.name) - - # Open the uploaded image - uploaded_image = Image.open(uploaded_file) - - # Check if the width is larger than 640 - if uploaded_image.width > 640: - # Calculate the proportional height based on the desired width of 640 pixels - aspect_ratio = uploaded_image.width / uploaded_image.height - resized_height = int(640 / aspect_ratio) - # Resize the image to a width of 640 pixels and proportional height - resized_image = uploaded_image.resize((640, resized_height)) - else: - resized_image = uploaded_image - - return resized_image, user_file_path - - return None, None - - -def clean_files(directory): - files = os.listdir(directory) - for file in files: - file_path = os.path.join(directory, file) - if os.path.isfile(file_path): - os.remove(file_path) - -uploaded_file, user_file_path = get_user_input() -button_1 = st.button("Clean Background") - -button_1_clicked = False # Variable to track button state - -def run_subprocess(): - mask_created = False - command = "python main.py inference --dataset custom_dataset/ --arch 7 --img_size 640 --save_map True" - subprocess.run(command, shell=True) - mask_created = True - - -# Perform the necessary actions when the "Clean Background" button is clicked -st.write(button_1) - -# Log data for analyzing the app later. -def log(copy = False): - custom_dataset_directory = "data/custom_dataset/" - processed_directory = "data/processed" - for filename in os.listdir(custom_dataset_directory): - file_path = os.path.join(custom_dataset_directory, filename) - - if copy == True: - shutil.copy(file_path, processed_directory) # Copy files - else: - shutil.move(file_path, processed_directory) # Move files - - -def load_images(): - x = user_file_path.split('/')[-1] - uploaded_file_name = os.path.basename(user_file_path) - image_path = os.path.join("data/custom_dataset/", x) - dif_image = Image.open(image_path) - - mask_path = os.path.join("mask/custom_dataset/", x.replace('.jpg', '.png')) - png_image = Image.open(mask_path) - inverted_image = ImageOps.invert(png_image) - return dif_image , inverted_image - -if button_1: - button_1_clicked = True - # Move items from data/custom_dataset/ to data/processed - log( copy= True) - clean_files("data/custom_dataset/") - if uploaded_file is not None: - uploaded_file.save(user_file_path) - run_subprocess() - st.success("Background cleaned.") - log(copy = True) - - - -st.subheader("Text your prompt and choose parameters, then press Run Model button") - -# Create a two-column layout -col1, col2 = st.columns(2) - -# Get user input for prompts -with col1: - input_prompt = st.text_area('Enter Prompt', height=80) -with col2: - input_negative_prompt = st.text_area('Enter Negative Prompt', height=80) - -num_inference_steps = st.slider('Number of Inference Steps:', min_value=5, max_value=50, value=10) -num_images_per_prompt = st.slider('Image Count to be Produced:', min_value=1, max_value=2, value=1) - -# use seed with torch generator -#torch.manual_seed(0) -# seed -#seed = st.slider('Seed:', min_value=0, max_value=100, value=1) -#generator = [torch.Generator(device="cuda").manual_seed(seed) for i in range(num_images_per_prompt)] - -#generator = torch.Generator(device="cuda").manual_seed(0) -run_model_button = st.button("Run Model") - -@st.cache_resource -def initialize_pipe(): - pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", - revision="fp16", - torch_dtype=torch.float16, #16 for gpu - safety_checker = None, - requires_safety_checker = False).to("cuda") - - pipe.safety_checker = None - pipe.requires_safety_checker = False - return pipe - -def image_resize(dif_image): - output_width, output_height = mode(dif_image.width, dif_image.height) - while output_height > 800: - output_height = output_height // 1.5 - output_width = output_width // 1.5 - output_width, output_height = mode(output_width, output_height) - return output_width, output_height - - -def show_output(x5): - if len(x5) == 1: - col1, col2 = st.columns(2) - with col1 : - st.image(inverted_image, width=256, caption='Generated Mask', use_column_width=False) - with col2: - st.image(x5[0], width=256, caption='Generated Image', use_column_width=False) - - elif len(x5) == 2: - col1, col2, col3 = st.columns(3) - with col1 : - col1.image(inverted_image, width=256, caption='Generated Mask', use_column_width=False) - with col2 : - col2.image(x5[0], width=256, caption='Gener ted Image', use_column_width=False) - with col3 : - col3.image(x5[1], width=256, caption='Generated Image-2', use_column_width=False) - -# Check if the button is clicked and all inputs are provided -if run_model_button == True and input_prompt is not None : - st.write("Running the model...") - dif_image , inverted_image = load_images() - output_width, output_height = image_resize(dif_image) - base_prompt = "high resolution, high quality, use mask. Do not distort the shape of the object. make the object stand out, show it clearly and vividly, preserving the shape of the object, use the mask" - prompt = input_prompt + " " + base_prompt - - st.write("Pipe working with {0} inference steps and {1} image will be created for prompt".format(num_inference_steps, num_images_per_prompt)) - - pipe = initialize_pipe() - - #output_height = 128 - #output_width = 128 - - x5 = pipe(image=dif_image, mask_image=inverted_image, num_inference_steps=num_inference_steps, # , generator= generator - num_images_per_prompt=num_images_per_prompt, prompt=prompt, negative_prompt=input_negative_prompt, - height=output_height, width=output_width).images - - show_output(x5) - torch.cuda.empty_cache() -else: - - st.write("Please provide prompt and click the 'Run Model' button to proceed.") \ No newline at end of file diff --git a/spaces/eradhea/chat_voice_spanish/README.md b/spaces/eradhea/chat_voice_spanish/README.md deleted file mode 100644 index 52fa196350693413508fc0dbe84f23ee036c1dd7..0000000000000000000000000000000000000000 --- a/spaces/eradhea/chat_voice_spanish/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chat Voice Spanish -emoji: 👀 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -license: gpl ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git "a/spaces/erbanku/gpt-academic/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" "b/spaces/erbanku/gpt-academic/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" deleted file mode 100644 index 2f4201438c4d8597c251726fe99c02d40f0cadf0..0000000000000000000000000000000000000000 --- "a/spaces/erbanku/gpt-academic/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243.py" +++ /dev/null @@ -1,166 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -import re -import unicodedata -fast_debug = False -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - -def is_paragraph_break(match): - """ - 根据给定的匹配结果来判断换行符是否表示段落分隔。 - 如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。 - 也可以根据之前的内容长度来判断段落是否已经足够长。 - """ - prev_char, next_char = match.groups() - - # 句子结束标志 - sentence_endings = ".!?" - - # 设定一个最小段落长度阈值 - min_paragraph_length = 140 - - if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length: - return "\n\n" - else: - return " " - -def normalize_text(text): - """ - 通过把连字(ligatures)等文本特殊符号转换为其基本形式来对文本进行归一化处理。 - 例如,将连字 "fi" 转换为 "f" 和 "i"。 - """ - # 对文本进行归一化处理,分解连字 - normalized_text = unicodedata.normalize("NFKD", text) - - # 替换其他特殊字符 - cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text) - - return cleaned_text - -def clean_text(raw_text): - """ - 对从 PDF 提取出的原始文本进行清洗和格式化处理。 - 1. 对原始文本进行归一化处理。 - 2. 替换跨行的连词,例如 “Espe-\ncially” 转换为 “Especially”。 - 3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换。 - """ - # 对文本进行归一化处理 - normalized_text = normalize_text(raw_text) - - # 替换跨行的连词 - text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text) - - # 根据前后相邻字符的特点,找到原文本中的换行符 - newlines = re.compile(r'(\S)\n(\S)') - - # 根据 heuristic 规则,用空格或段落分隔符替换原换行符 - final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text) - - return final_text.strip() - -def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, glob, os, fitz - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with fitz.open(fp) as doc: - file_content = "" - for page in doc: - file_content += page.get_text() - file_content = clean_text(file_content) - print(file_content) - - prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" - i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) # 带超时倒计时 - - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt="总结文章。" - ) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - -@CatchException -def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import fitz - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/eson/tokenizer-arena/vocab/llama/README.md b/spaces/eson/tokenizer-arena/vocab/llama/README.md deleted file mode 100644 index 1182aa46fc2380e7fab866cdb26eb4419a405ddb..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/llama/README.md +++ /dev/null @@ -1,125 +0,0 @@ - - -## - - -tokenizer.json 和 tokenizer.model 是 都需要吗? - -## 完整性 - -以下 256个字符保证了词典的完整性 -``` - "vocab": { - "<0x00>": 3, - "<0x01>": 4, - ... - "<0xFE>": 257, - "<0xFF>": 258, -``` - - -## - - -```json - "normalizer": { - "type": "Sequence", - "normalizers": [ - { - "type": "Prepend", - "prepend": "▁" - }, - { - "type": "Replace", - "pattern": { - "String": " " - }, - "content": "▁" - } - ] - }, - - "post_processor": { - "type": "TemplateProcessing", - "single": [ - { - "SpecialToken": { - "id": "", - "type_id": 0 - } - }, - { - "Sequence": { - "id": "A", - "type_id": 0 - } - } - ], - "pair": [ - { - "SpecialToken": { - "id": "", - "type_id": 0 - } - }, - { - "Sequence": { - "id": "A", - "type_id": 0 - } - }, - { - "Sequence": { - "id": "B", - "type_id": 0 - } - } - ], - "special_tokens": { - "": { - "id": "", - "ids": [ - 1 - ], - "tokens": [ - "" - ] - } - } - }, - "decoder": { - "type": "Sequence", - "decoders": [ - { - "type": "Replace", - "pattern": { - "String": "▁" - }, - "content": " " - }, - { - "type": "ByteFallback" - }, - { - "type": "Fuse" - }, - { - "type": "Strip", - "content": " ", - "start": 1, - "stop": 0 - } - ] - }, - -``` - -## issues - -1. https://github.com/LianjiaTech/BELLE/issues/45 -llama 700个中文只是显式支持的数量,隐含支持的unicode中文字远超700, -你可以随便用一个bert的词表做实验。不过恶心的是这样一个中文字就会encode成4,5个unicode toekn,长度一下就上去了,所以还是哈工大做中文词表增强的靠谱。 - -2. https://github.com/LianjiaTech/BELLE/issues/43 -请问各位llama在中文上使用需要对词表做额外操作吗? -应该是要的,我测了一下llama词表和常用汉字3500个的交集,只有600多个。增加词表可参考https://github.com/ymcui/Chinese-LLaMA-Alpaca \ No newline at end of file diff --git a/spaces/evaluate-metric/comet/README.md b/spaces/evaluate-metric/comet/README.md deleted file mode 100644 index 999f3fc44eb3b3e90408346fa744865dd9e9cb7c..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/comet/README.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: COMET -emoji: 🤗 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -tags: -- evaluate -- metric -description: >- - Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM). - With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. - - See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ---- - -# Metric Card for COMET - -## Metric description - -Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments. - -## How to use - -COMET takes 3 lists of strings as input: `sources` (a list of source sentences), `predictions` (a list of candidate translations) and `references` (a list of reference translations). - -```python -from evaluate import load -comet_metric = load('comet') -source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] -hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] -reference = ["They were able to control the fire.", "Schools and kindergartens opened"] -comet_score = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) -``` - -It has several configurations, named after the COMET model to be used. For versions below 2.0 it will default to `wmt20-comet-da` (previously known as `wmt-large-da-estimator-1719`) and for the latest versions (>= 2.0) it will default to `Unbabel/wmt22-comet-da`. - -Alternative models that can be chosen include `wmt20-comet-qe-da`, `wmt21-comet-mqm`, `wmt21-cometinho-da`, `wmt21-comet-qe-mqm` and `emnlp20-comet-rank`. Notably, a distilled model is also available, which is 80% smaller and 2.128x faster while performing close to non-distilled alternatives. You can use it with the identifier `eamt22-cometinho-da`. This version, called Cometinho, was elected as [the best paper](https://aclanthology.org/2022.eamt-1.9) at the annual European conference on Machine Translation. - -> NOTE: In `unbabel-comet>=2.0` all models were moved to Hugging Face Hub and you need to add the suffix `Unbabel/` to be able to download and use them. For example for the distilled version replace `eamt22-cometinho-da` with `Unbabel/eamt22-cometinho-da`. - -It also has several optional arguments: - -`gpus`: optional, an integer (number of GPUs to train on) or a list of integers (which GPUs to train on). Set to 0 to use CPU. The default value is `None` (uses one GPU if possible, else use CPU). - -`progress_bar`a boolean -- if set to `True`, progress updates will be printed out. The default value is `False`. - -More information about model characteristics can be found on the [COMET website](https://unbabel.github.io/COMET/html/index.html). - -## Output values - -The COMET metric outputs two lists: - -`scores`: a list of COMET scores for each of the input sentences, ranging from 0-1. - -`mean_score`: the mean value of COMET scores `scores` over all the input sentences, ranging from 0-1. - -### Values from popular papers - -The [original COMET paper](https://arxiv.org/pdf/2009.09025.pdf) reported average COMET scores ranging from 0.4 to 0.6, depending on the language pairs used for evaluating translation models. They also illustrate that COMET correlates well with human judgements compared to other metrics such as [BLEU](https://huggingface.co/metrics/bleu) and [CHRF](https://huggingface.co/metrics/chrf). - -## Examples - -Full match: - -```python -from evaluate import load -comet_metric = load('comet') -source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] -hypothesis = ["They were able to control the fire.", "Schools and kindergartens opened"] -reference = ["They were able to control the fire.", "Schools and kindergartens opened"] -results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) -print([round(v, 1) for v in results["scores"]]) -[1.0, 1.0] -``` - -Partial match: - -```python -from evaluate import load -comet_metric = load('comet') -source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] -hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] -reference = ["They were able to control the fire", "Schools and kindergartens opened"] -results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) -print([round(v, 2) for v in results["scores"]]) -[0.19, 0.92] -``` - -No match: - -```python -from evaluate import load -comet_metric = load('comet') -source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] -hypothesis = ["The girl went for a walk", "The boy was sleeping"] -reference = ["They were able to control the fire", "Schools and kindergartens opened"] -results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) -print([round(v, 2) for v in results["scores"]]) -[0.00, 0.00] -``` - -## Limitations and bias - -The models provided for calculating the COMET metric are built on top of XLM-R and cover the following languages: - -Afrikaans, Albanian, Amharic, Arabic, Armenian, Assamese, Azerbaijani, Basque, Belarusian, Bengali, Bengali Romanized, Bosnian, Breton, Bulgarian, Burmese, Burmese, Catalan, Chinese (Simplified), Chinese (Traditional), Croatian, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Hausa, Hebrew, Hindi, Hindi Romanized, Hungarian, Icelandic, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish (Kurmanji), Kyrgyz, Lao, Latin, Latvian, Lithuanian, Macedonian, Malagasy, Malay, Malayalam, Marathi, Mongolian, Nepali, Norwegian, Oriya, Oromo, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Sanskri, Scottish, Gaelic, Serbian, Sindhi, Sinhala, Slovak, Slovenian, Somali, Spanish, Sundanese, Swahili, Swedish, Tamil, Tamil Romanized, Telugu, Telugu Romanized, Thai, Turkish, Ukrainian, Urdu, Urdu Romanized, Uyghur, Uzbek, Vietnamese, Welsh, Western, Frisian, Xhosa, Yiddish. - -Thus, results for language pairs containing uncovered languages are unreliable, as per the [COMET website](https://github.com/Unbabel/COMET) - -Also, calculating the COMET metric involves downloading the model from which features are obtained -- the default model, `wmt22-comet-da`, takes over 2.32GB of storage space and downloading it can take a significant amount of time depending on the speed of your internet connection. If this is an issue, choose a smaller model; for instance `eamt22-cometinho-da` is 344MB. - -### Interpreting Scores: - -When using COMET to evaluate machine translation, it's important to understand how to interpret the scores it produces. - -In general, COMET models are trained to predict quality scores for translations. These scores are typically normalized using a z-score transformation to account for individual differences among annotators. While the raw score itself does not have a direct interpretation, it is useful for ranking translations and systems according to their quality. - -However, for the latest COMET models like `Unbabel/wmt22-comet-da`, we have introduced a new training approach that scales the scores between 0 and 1. This makes it easier to interpret the scores: a score close to 1 indicates a high-quality translation, while a score close to 0 indicates a translation that is no better than random chance. - -It's worth noting that when using COMET to compare the performance of two different translation systems, it's important to run statistical significance measures to reliably compare scores between systems. - -## Citation -```bibtex -@inproceedings{rei-etal-2022-comet, - title = "{COMET}-22: Unbabel-{IST} 2022 Submission for the Metrics Shared Task", - author = "Rei, Ricardo and - C. de Souza, Jos{\'e} G. and - Alves, Duarte and - Zerva, Chrysoula and - Farinha, Ana C and - Glushkova, Taisiya and - Lavie, Alon and - Coheur, Luisa and - Martins, Andr{\'e} F. T.", - booktitle = "Proceedings of the Seventh Conference on Machine Translation (WMT)", - month = dec, - year = "2022", - address = "Abu Dhabi, United Arab Emirates (Hybrid)", - publisher = "Association for Computational Linguistics", - url = "https://aclanthology.org/2022.wmt-1.52", - pages = "578--585", -} -``` - -```bibtex -@inproceedings{rei-EtAl:2020:WMT, - author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, - title = {Unbabel's Participation in the WMT20 Metrics Shared Task}, - booktitle = {Proceedings of the Fifth Conference on Machine Translation}, - month = {November}, - year = {2020}, - address = {Online}, - publisher = {Association for Computational Linguistics}, - pages = {909--918}, -} -``` - -```bibtex -@inproceedings{rei-etal-2020-comet, - title = "{COMET}: A Neural Framework for {MT} Evaluation", - author = "Rei, Ricardo and - Stewart, Craig and - Farinha, Ana C and - Lavie, Alon", - booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", - month = nov, - year = "2020", - address = "Online", - publisher = "Association for Computational Linguistics", - url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", - pages = "2685--2702", -} -``` - -For the distilled version: - -```bibtex -@inproceedings{rei-etal-2022-searching, - title = "Searching for {COMETINHO}: The Little Metric That Could", - author = "Rei, Ricardo and - Farinha, Ana C and - de Souza, Jos{\'e} G.C. and - Ramos, Pedro G. and - Martins, Andr{\'e} F.T. and - Coheur, Luisa and - Lavie, Alon", - booktitle = "Proceedings of the 23rd Annual Conference of the European Association for Machine Translation", - month = jun, - year = "2022", - address = "Ghent, Belgium", - publisher = "European Association for Machine Translation", - url = "https://aclanthology.org/2022.eamt-1.9", - pages = "61--70", -} -``` - -## Further References - -- [COMET website](https://unbabel.github.io/COMET/html/index.html) -- [Hugging Face Tasks - Machine Translation](https://huggingface.co/tasks/translation) diff --git a/spaces/ezioruan/roop/roop/face_analyser.py b/spaces/ezioruan/roop/roop/face_analyser.py deleted file mode 100644 index 9c0afe458763edb22dc2332f527dfdba48575b1d..0000000000000000000000000000000000000000 --- a/spaces/ezioruan/roop/roop/face_analyser.py +++ /dev/null @@ -1,34 +0,0 @@ -import threading -from typing import Any -import insightface - -import roop.globals -from roop.typing import Frame - -FACE_ANALYSER = None -THREAD_LOCK = threading.Lock() - - -def get_face_analyser() -> Any: - global FACE_ANALYSER - - with THREAD_LOCK: - if FACE_ANALYSER is None: - FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers) - FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640)) - return FACE_ANALYSER - - -def get_one_face(frame: Frame) -> Any: - face = get_face_analyser().get(frame) - try: - return min(face, key=lambda x: x.bbox[0]) - except ValueError: - return None - - -def get_many_faces(frame: Frame) -> Any: - try: - return get_face_analyser().get(frame) - except IndexError: - return None diff --git a/spaces/facebook/MusicGen/audiocraft/losses/balancer.py b/spaces/facebook/MusicGen/audiocraft/losses/balancer.py deleted file mode 100644 index 8a0ac8adebab8cdee8f82351965195dc02800d18..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/losses/balancer.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import flashy -import torch -from torch import autograd - - -class Balancer: - """Loss balancer. - - The loss balancer combines losses together to compute gradients for the backward. - Given `y = f(...)`, and a number of losses `l1(y, ...)`, `l2(y, ...)`, with `...` - not having any dependence on `f`, the balancer can efficiently normalize the partial gradients - `d l1 / d y`, `d l2 / dy` before summing them in order to achieve a desired ratio between - the losses. For instance if `weights = {'l1': 2, 'l2': 1}`, 66% of the gradient - going into `f(...)` will come from `l1` on average, and 33% from `l2`. This allows for an easy - interpration of the weights even if the intrisic scale of `l1`, `l2` ... is unknown. - - Noting `g1 = d l1 / dy`, etc., the balanced gradient `G` will be - (with `avg` an exponential moving average over the updates), - - G = sum_i total_norm * g_i / avg(||g_i||) * w_i / sum(w_i) - - If `balance_grads` is False, this is deactivated, and instead the gradient will just be the - standard sum of the partial gradients with the given weights. - - A call to the backward method of the balancer will compute the the partial gradients, - combining all the losses and potentially rescaling the gradients, - which can help stabilize the training and reason about multiple losses with varying scales. - The obtained gradient with respect to `y` is then back-propagated to `f(...)`. - - Expected usage: - - weights = {'loss_a': 1, 'loss_b': 4} - balancer = Balancer(weights, ...) - losses: dict = {} - losses['loss_a'] = compute_loss_a(x, y) - losses['loss_b'] = compute_loss_b(x, y) - if model.training(): - effective_loss = balancer.backward(losses, x) - - Args: - weights (dict[str, float]): Weight coefficient for each loss. The balancer expect the losses keys - from the backward method to match the weights keys to assign weight to each of the provided loss. - balance_grads (bool): Whether to rescale gradients so that weights reflect the fraction of the - overall gradient, rather than a constant multiplier. - total_norm (float): Reference norm when rescaling gradients, ignored otherwise. - emay_decay (float): EMA decay for averaging the norms. - per_batch_item (bool): Whether to compute the averaged norm per batch item or not. This only holds - when rescaling the gradients. - epsilon (float): Epsilon value for numerical stability. - monitor (bool): If True, stores in `self.metrics` the relative ratio between the norm of the gradients - coming from each loss, when calling `backward()`. - """ - def __init__(self, weights: tp.Dict[str, float], balance_grads: bool = True, total_norm: float = 1., - ema_decay: float = 0.999, per_batch_item: bool = True, epsilon: float = 1e-12, - monitor: bool = False): - self.weights = weights - self.per_batch_item = per_batch_item - self.total_norm = total_norm or 1. - self.averager = flashy.averager(ema_decay or 1.) - self.epsilon = epsilon - self.monitor = monitor - self.balance_grads = balance_grads - self._metrics: tp.Dict[str, tp.Any] = {} - - @property - def metrics(self): - return self._metrics - - def backward(self, losses: tp.Dict[str, torch.Tensor], input: torch.Tensor) -> torch.Tensor: - """Compute the backward and return the effective train loss, e.g. the loss obtained from - computing the effective weights. If `balance_grads` is True, the effective weights - are the one that needs to be applied to each gradient to respect the desired relative - scale of gradients coming from each loss. - - Args: - losses (Dict[str, torch.Tensor]): dictionary with the same keys as `self.weights`. - input (torch.Tensor): the input of the losses, typically the output of the model. - This should be the single point of dependence between the losses - and the model being trained. - """ - norms = {} - grads = {} - for name, loss in losses.items(): - # Compute partial derivative of the less with respect to the input. - grad, = autograd.grad(loss, [input], retain_graph=True) - if self.per_batch_item: - # We do not average the gradient over the batch dimension. - dims = tuple(range(1, grad.dim())) - norm = grad.norm(dim=dims, p=2).mean() - else: - norm = grad.norm(p=2) - norms[name] = norm - grads[name] = grad - - count = 1 - if self.per_batch_item: - count = len(grad) - # Average norms across workers. Theoretically we should average the - # squared norm, then take the sqrt, but it worked fine like that. - avg_norms = flashy.distrib.average_metrics(self.averager(norms), count) - # We approximate the total norm of the gradient as the sums of the norms. - # Obviously this can be very incorrect if all gradients are aligned, but it works fine. - total = sum(avg_norms.values()) - - self._metrics = {} - if self.monitor: - # Store the ratio of the total gradient represented by each loss. - for k, v in avg_norms.items(): - self._metrics[f'ratio_{k}'] = v / total - - total_weights = sum([self.weights[k] for k in avg_norms]) - assert total_weights > 0. - desired_ratios = {k: w / total_weights for k, w in self.weights.items()} - - out_grad = torch.zeros_like(input) - effective_loss = torch.tensor(0., device=input.device, dtype=input.dtype) - for name, avg_norm in avg_norms.items(): - if self.balance_grads: - # g_balanced = g / avg(||g||) * total_norm * desired_ratio - scale = desired_ratios[name] * self.total_norm / (self.epsilon + avg_norm) - else: - # We just do regular weighted sum of the gradients. - scale = self.weights[name] - out_grad.add_(grads[name], alpha=scale) - effective_loss += scale * losses[name].detach() - # Send the computed partial derivative with respect to the output of the model to the model. - input.backward(out_grad) - return effective_loss diff --git a/spaces/facebook/ov-seg/open_vocab_seg/modeling/clip_adapter/clip/model.py b/spaces/facebook/ov-seg/open_vocab_seg/modeling/clip_adapter/clip/model.py deleted file mode 100644 index 8ea730a2cc8a992f9180428bd1fec7fc96aa89dd..0000000000000000000000000000000000000000 --- a/spaces/facebook/ov-seg/open_vocab_seg/modeling/clip_adapter/clip/model.py +++ /dev/null @@ -1,613 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) Meta Platforms, Inc. All Rights Reserved -# Modified by Feng Liang from https://github.com/openai/CLIP/blob/main/clip/model.py - -from collections import OrderedDict -from typing import Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1): - super().__init__() - - # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 - self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - - self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - - self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() - - self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - - self.relu = nn.ReLU(inplace=True) - self.downsample = None - self.stride = stride - - if stride > 1 or inplanes != planes * Bottleneck.expansion: - # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 - self.downsample = nn.Sequential( - OrderedDict( - [ - ("-1", nn.AvgPool2d(stride)), - ( - "0", - nn.Conv2d( - inplanes, - planes * self.expansion, - 1, - stride=1, - bias=False, - ), - ), - ("1", nn.BatchNorm2d(planes * self.expansion)), - ] - ) - ) - - def forward(self, x: torch.Tensor): - identity = x - - out = self.relu(self.bn1(self.conv1(x))) - out = self.relu(self.bn2(self.conv2(out))) - out = self.avgpool(out) - out = self.bn3(self.conv3(out)) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - return out - - -class AttentionPool2d(nn.Module): - def __init__( - self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None - ): - super().__init__() - self.positional_embedding = nn.Parameter( - torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5 - ) - self.k_proj = nn.Linear(embed_dim, embed_dim) - self.q_proj = nn.Linear(embed_dim, embed_dim) - self.v_proj = nn.Linear(embed_dim, embed_dim) - self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) - self.num_heads = num_heads - self.grid_size = spacial_dim - - def forward(self, x, mask=None, return_cls=True): - b, c, gh, gw = x.shape - # remove irrelated feature - if mask is not None: - mask = F.interpolate(mask[:, None, ...], size=(gh, gw)).squeeze( - 1 - ) # [N,H,W] -> [N,grid,grid] - mask = (mask > 0.5).reshape(mask.shape[0], -1) - mask = torch.cat([mask, mask.new_ones(mask.shape[0], 1)], dim=1) - if x.size()[0] == 1: - x = x.expand(mask.shape[0], c, gh, gw) - - x = x.reshape(x.shape[0], c, gh * gw).permute(2, 0, 1) # NCHW -> (HW)NC - - x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC - positional_embedding = self.positional_embedding - if not (self.positional_embedding.shape[0] == x.shape[0]): - cls_pos = positional_embedding[0:1, :] - per_pos_embedding = ( - F.interpolate( - positional_embedding[1:, :] - .permute(1, 0) - .view(1, -1, self.grid_size, self.grid_size), - size=(gh, gw), - mode="bicubic", - ) - .reshape(-1, gh * gw) - .permute(1, 0) - ) - positional_embedding = torch.cat([cls_pos, per_pos_embedding]) - - x = x + positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC - x, _ = F.multi_head_attention_forward( - query=x, - key=x, - value=x, - embed_dim_to_check=x.shape[-1], - num_heads=self.num_heads, - q_proj_weight=self.q_proj.weight, - k_proj_weight=self.k_proj.weight, - v_proj_weight=self.v_proj.weight, - in_proj_weight=None, - in_proj_bias=torch.cat( - [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias] - ), - bias_k=None, - bias_v=None, - add_zero_attn=False, - dropout_p=0, - out_proj_weight=self.c_proj.weight, - out_proj_bias=self.c_proj.bias, - use_separate_proj_weight=True, - training=self.training, - need_weights=False, - key_padding_mask=mask, - ) - - if return_cls: - return x[0] - else: - return x - - -class ModifiedResNet(nn.Module): - """ - A ResNet class that is similar to torchvision's but contains the following changes: - - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. - - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 - - The final pooling layer is a QKV attention instead of an average pool - """ - - def __init__(self, layers, output_dim, heads, input_resolution=224, width=64): - super().__init__() - self.output_dim = output_dim - self.input_resolution = input_resolution - - # the 3-layer stem - self.conv1 = nn.Conv2d( - 3, width // 2, kernel_size=3, stride=2, padding=1, bias=False - ) - self.bn1 = nn.BatchNorm2d(width // 2) - self.conv2 = nn.Conv2d( - width // 2, width // 2, kernel_size=3, padding=1, bias=False - ) - self.bn2 = nn.BatchNorm2d(width // 2) - self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) - self.bn3 = nn.BatchNorm2d(width) - self.avgpool = nn.AvgPool2d(2) - self.relu = nn.ReLU(inplace=True) - - # residual layers - self._inplanes = width # this is a *mutable* variable used during construction - self.layer1 = self._make_layer(width, layers[0]) - self.layer2 = self._make_layer(width * 2, layers[1], stride=2) - self.layer3 = self._make_layer(width * 4, layers[2], stride=2) - self.layer4 = self._make_layer(width * 8, layers[3], stride=2) - - embed_dim = width * 32 # the ResNet feature dimension - self.attnpool = AttentionPool2d( - input_resolution // 32, embed_dim, heads, output_dim - ) - - def _make_layer(self, planes, blocks, stride=1): - layers = [Bottleneck(self._inplanes, planes, stride)] - - self._inplanes = planes * Bottleneck.expansion - for _ in range(1, blocks): - layers.append(Bottleneck(self._inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x, mask: torch.Tensor = None, return_cls=True): - def stem(x): - for conv, bn in [ - (self.conv1, self.bn1), - (self.conv2, self.bn2), - (self.conv3, self.bn3), - ]: - x = self.relu(bn(conv(x))) - x = self.avgpool(x) - return x - - x = x.type(self.conv1.weight.dtype) - x = stem(x) # 1/4,1/4 - x = self.layer1(x) - x = self.layer2(x) # 1/8,1/8 - x = self.layer3(x) # 1/16,1/16 - x = self.layer4(x) # 1/32,1/32 - b, c, gh, gw = x.shape - x = self.attnpool(x, mask, return_cls) - if not return_cls: - return x[1:].permute(1, 0, 2).reshape(b, gh, gw, x.shape[-1]) # N,L,C - return x - - -class LayerNorm(nn.LayerNorm): - """Subclass torch's LayerNorm to handle fp16.""" - - def forward(self, x: torch.Tensor): - orig_type = x.dtype - ret = super().forward(x.type(torch.float32)) - return ret.type(orig_type) - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor): - return x * torch.sigmoid(1.702 * x) - - -class ResidualAttentionBlock(nn.Module): - def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): - super().__init__() - - self.attn = nn.MultiheadAttention(d_model, n_head) - self.ln_1 = LayerNorm(d_model) - self.mlp = nn.Sequential( - OrderedDict( - [ - ("c_fc", nn.Linear(d_model, d_model * 4)), - ("gelu", QuickGELU()), - ("c_proj", nn.Linear(d_model * 4, d_model)), - ] - ) - ) - self.ln_2 = LayerNorm(d_model) - self.attn_mask = attn_mask - - def attention(self, x: torch.Tensor, **kwargs): - self.attn_mask = ( - self.attn_mask.to(dtype=x.dtype, device=x.device) - if self.attn_mask is not None - else None - ) - return self.attn( - x, x, x, need_weights=False, attn_mask=self.attn_mask, **kwargs - )[0] - - def forward(self, x: torch.Tensor, **kwargs): - x = x + self.attention(self.ln_1(x), **kwargs) - x = x + self.mlp(self.ln_2(x)) - return x - - -class Transformer(nn.Module): - def __init__( - self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None - ): - super().__init__() - self.width = width - self.layers = layers - self.resblocks = nn.Sequential( - *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)] - ) - - def forward(self, x: torch.Tensor, **kwargs): - for block in self.resblocks: - x = block(x, **kwargs) - return x - - -class VisionTransformer(nn.Module): - def __init__( - self, - input_resolution: int, - patch_size: int, - mask_prompt_depth: int, - width: int, - layers: int, - heads: int, - output_dim: int, - ): - super().__init__() - self.input_resolution = input_resolution - self.output_dim = output_dim - self.conv1 = nn.Conv2d( - in_channels=3, - out_channels=width, - kernel_size=patch_size, - stride=patch_size, - bias=False, - ) - - scale = width ** -0.5 - self.class_embedding = nn.Parameter(scale * torch.randn(width)) - self.positional_embedding = nn.Parameter( - scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width) - ) - self.grid_size = input_resolution // patch_size - self.ln_pre = LayerNorm(width) - - self.transformer = Transformer(width, layers, heads) - - self.ln_post = LayerNorm(width) - self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) - - self.mask_pool = nn.AvgPool2d(patch_size, stride=patch_size) - self.mask_prompt_depth = mask_prompt_depth - self.mask_embedding = nn.Parameter(torch.zeros(self.mask_prompt_depth, self.grid_size * self.grid_size, width)) - - def forward(self, x: torch.Tensor, m: torch.Tensor = None): - x = self.conv1(x) # shape = [*, width, grid, grid] - x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] - x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] - if m is not None: - m = self.mask_pool(m.to(torch.float).squeeze()).reshape(m.shape[0], -1).unsqueeze(-1) - m = torch.ceil(m) - if self.mask_embedding.shape[1] == 1: - mask_embedding = self.mask_embedding.to(x.dtype).repeat(1, x.shape[1], 1) - else: - mask_embedding = self.mask_embedding.to(x.dtype) - x = x * m + mask_embedding[0].unsqueeze(0) * (1 - m) - - x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] - x = x + self.positional_embedding.to(x.dtype) - x = self.ln_pre(x) - - x = x.permute(1, 0, 2) # NLD -> LND - if m is not None: - for i, blk in enumerate(self.transformer.resblocks): - d = i + 1 - x = blk(x) - if d < self.mask_prompt_depth: - masked_x = x[1:, :, :] * m.permute(1, 0, 2) + \ - mask_embedding[d].unsqueeze(0).permute(1, 0, 2) * (1 - m.permute(1, 0, 2)) - x = torch.cat([x[:1, :, :], masked_x], dim=0) - else: - x = self.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - - x = self.ln_post(x[:, 0, :]) - - if self.proj is not None: - x = x @ self.proj - - return x - - - -class CLIP(nn.Module): - def __init__( - self, - embed_dim: int, - # vision - image_resolution: int, - vision_layers: Union[Tuple[int, int, int, int], int], - vision_width: int, - vision_patch_size: int, - mask_prompt_depth: int, - # text - context_length: int, - vocab_size: int, - transformer_width: int, - transformer_heads: int, - transformer_layers: int, - ): - super().__init__() - - self.context_length = context_length - - if isinstance(vision_layers, (tuple, list)): - vision_heads = vision_width * 32 // 64 - self.visual = ModifiedResNet( - layers=vision_layers, - output_dim=embed_dim, - heads=vision_heads, - input_resolution=image_resolution, - width=vision_width, - ) - else: - vision_heads = vision_width // 64 - self.visual = VisionTransformer( - input_resolution=image_resolution, - patch_size=vision_patch_size, - mask_prompt_depth=mask_prompt_depth, - width=vision_width, - layers=vision_layers, - heads=vision_heads, - output_dim=embed_dim, - ) - - self.transformer = Transformer( - width=transformer_width, - layers=transformer_layers, - heads=transformer_heads, - attn_mask=self.build_attention_mask(), - ) - - self.vocab_size = vocab_size - self.token_embedding = nn.Embedding(vocab_size, transformer_width) - self.positional_embedding = nn.Parameter( - torch.empty(self.context_length, transformer_width) - ) - self.ln_final = LayerNorm(transformer_width) - - self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) - self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) - - self.initialize_parameters() - - def initialize_parameters(self): - nn.init.normal_(self.token_embedding.weight, std=0.02) - nn.init.normal_(self.positional_embedding, std=0.01) - - if isinstance(self.visual, ModifiedResNet): - if self.visual.attnpool is not None: - std = self.visual.attnpool.c_proj.in_features ** -0.5 - nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) - - for resnet_block in [ - self.visual.layer1, - self.visual.layer2, - self.visual.layer3, - self.visual.layer4, - ]: - for name, param in resnet_block.named_parameters(): - if name.endswith("bn3.weight"): - nn.init.zeros_(param) - - proj_std = (self.transformer.width ** -0.5) * ( - (2 * self.transformer.layers) ** -0.5 - ) - attn_std = self.transformer.width ** -0.5 - fc_std = (2 * self.transformer.width) ** -0.5 - for block in self.transformer.resblocks: - nn.init.normal_(block.attn.in_proj_weight, std=attn_std) - nn.init.normal_(block.attn.out_proj.weight, std=proj_std) - nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) - nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) - - if self.text_projection is not None: - nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) - - def build_attention_mask(self): - # lazily create causal attention mask, with full attention between the vision tokens - # pytorch uses additive attention mask; fill with -inf - mask = torch.empty(self.context_length, self.context_length) - mask.fill_(float("-inf")) - mask.triu_(1) # zero out the lower diagonal - return mask - - @property - def dtype(self): - return self.visual.conv1.weight.dtype - - def encode_image(self, image, **kwargs): - return self.visual(image.type(self.dtype), **kwargs) - - def encode_text(self, text): - x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] - - x = x + self.positional_embedding.type(self.dtype) - x = x.permute(1, 0, 2) # NLD -> LND - x = self.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - x = self.ln_final(x).type(self.dtype) - - # x.shape = [batch_size, n_ctx, transformer.width] - # take features from the eot embedding (eot_token is the highest number in each sequence) - x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection - - return x - - def forward(self, image, text): - image_features = self.encode_image(image) - text_features = self.encode_text(text) - - # normalized features - image_features = image_features / image_features.norm(dim=-1, keepdim=True) - text_features = text_features / text_features.norm(dim=-1, keepdim=True) - - # cosine similarity as logits - logit_scale = self.logit_scale.exp() - logits_per_image = logit_scale * image_features @ text_features.t() - logits_per_text = logit_scale * text_features @ image_features.t() - - # shape = [global_batch_size, global_batch_size] - return logits_per_image, logits_per_text - - -def convert_weights(model: nn.Module): - """Convert applicable model parameters to fp16""" - - def _convert_weights_to_fp16(l): - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): - l.weight.data = l.weight.data.half() - if l.bias is not None: - l.bias.data = l.bias.data.half() - - if isinstance(l, nn.MultiheadAttention): - for attr in [ - *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], - "in_proj_bias", - "bias_k", - "bias_v", - ]: - tensor = getattr(l, attr) - if tensor is not None: - tensor.data = tensor.data.half() - - for name in ["text_projection", "proj"]: - if hasattr(l, name): - attr = getattr(l, name) - if attr is not None: - attr.data = attr.data.half() - - model.apply(_convert_weights_to_fp16) - - -def build_model(state_dict: dict, mask_prompt_depth: int = 0): - vit = "visual.proj" in state_dict - - if vit: - vision_width = state_dict["visual.conv1.weight"].shape[0] - vision_layers = len( - [ - k - for k in state_dict.keys() - if k.startswith("visual.") and k.endswith(".attn.in_proj_weight") - ] - ) - vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] - grid_size = round( - (state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5 - ) - image_resolution = vision_patch_size * grid_size - else: - assert mask_prompt_depth == 0, 'ResNets do not support mask prompt tuning' - counts: list = [ - len( - set( - k.split(".")[2] - for k in state_dict - if k.startswith(f"visual.layer{b}") - ) - ) - for b in [1, 2, 3, 4] - ] - vision_layers = tuple(counts) - vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] - output_width = round( - (state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5 - ) - vision_patch_size = None - assert ( - output_width ** 2 + 1 - == state_dict["visual.attnpool.positional_embedding"].shape[0] - ) - image_resolution = output_width * 32 - - embed_dim = state_dict["text_projection"].shape[1] - context_length = state_dict["positional_embedding"].shape[0] - vocab_size = state_dict["token_embedding.weight"].shape[0] - transformer_width = state_dict["ln_final.weight"].shape[0] - transformer_heads = transformer_width // 64 - transformer_layers = len( - set( - k.split(".")[2] - for k in state_dict - if k.startswith(f"transformer.resblocks") - ) - ) - - model = CLIP( - embed_dim, - image_resolution, - vision_layers, - vision_width, - vision_patch_size, - mask_prompt_depth, - context_length, - vocab_size, - transformer_width, - transformer_heads, - transformer_layers, - ) - - for key in ["input_resolution", "context_length", "vocab_size"]: - if key in state_dict: - del state_dict[key] - - convert_weights(model) - model.load_state_dict(state_dict, strict=False) - return model.eval() diff --git a/spaces/falterWliame/Face_Mask_Detection/Ahnlab V3 Internet Security 80 Keygen Idm TOP.md b/spaces/falterWliame/Face_Mask_Detection/Ahnlab V3 Internet Security 80 Keygen Idm TOP.md deleted file mode 100644 index 163ef92649e8e0cf7835082c4f8e5b5e935667f0..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Ahnlab V3 Internet Security 80 Keygen Idm TOP.md +++ /dev/null @@ -1,81 +0,0 @@ - -

        Ahnlab V3 Internet Security 80 Keygen Idm: A Guide to Secure Your PC and Download Faster

        - -

        If you are looking for a reliable and effective security solution for your Windows PC, you might want to consider AhnLab V3 Internet Security 80. This is a comprehensive antivirus and firewall program that can protect your system from various threats, such as viruses, spyware, ransomware, phishing, and more. It also offers features like parental control, privacy protection, system optimization, and network management.

        -

        ahnlab v3 internet security 80 keygen idm


        DOWNLOADhttps://urlca.com/2uDd1y



        - -

        But how can you get the full version of AhnLab V3 Internet Security 80 without paying a hefty price? Well, there is a way to crack it using a keygen and a download manager. A keygen is a software that can generate serial keys or activation codes for various programs. A download manager is a software that can speed up your downloads by splitting files into segments and using multiple connections.

        - -

        In this article, we will show you how to use AhnLab V3 Internet Security 80 keygen idm to activate the program and download it faster. Follow these steps carefully and enjoy the benefits of AhnLab V3 Internet Security 80.

        - -

        Step 1: Download AhnLab V3 Internet Security 80 Keygen Idm

        - -

        The first step is to download the keygen and the download manager that we will use to crack AhnLab V3 Internet Security 80. You can find them on various websites, but make sure they are safe and virus-free. We recommend using the following links:

        - - - -

        Save them to your preferred location on your PC and extract them if they are compressed.

        - -

        Step 2: Install AhnLab V3 Internet Security 80 and IDM

        - -

        The next step is to install both programs on your PC. Run the setup files and follow the instructions on the screen. You can choose the default settings or customize them according to your preferences. When the installation is complete, do not launch AhnLab V3 Internet Security 80 or IDM yet.

        - -

        Step 3: Run AhnLab V3 Internet Security 80 Keygen

        - -

        Now it's time to run the keygen that will generate a serial key for AhnLab V3 Internet Security 80. Open the folder where you saved the keygen and run it as administrator. You will see a window like this:

        -

        - -AhnLab V3 Internet Security 80 Keygen - -

        Click on the "Generate" button and wait for a few seconds. The keygen will create a random serial key that you can use to activate AhnLab V3 Internet Security 80. Copy the serial key and keep it somewhere safe.

        - -

        Step 4: Activate AhnLab V3 Internet Security 80

        - -

        The next step is to activate AhnLab V3 Internet Security 80 using the serial key that you generated with the keygen. Launch AhnLab V3 Internet Security 80 and you will see a window like this:

        - -AhnLab V3 Internet Security 80 Activation - -

        Click on the "Enter License Key" button and paste the serial key that you copied from the keygen. Click on the "OK" button and wait for the activation process to complete. You will see a message like this:

        - -AhnLab V3 Internet Security 80 Activated - -

        Congratulations! You have successfully activated AhnLab V3 Internet Security 80 with a keygen. You can now enjoy all the features of this powerful security program.

        - -

        Step 5: Download AhnLab V3 Internet Security 80 Faster with IDM

        - -

        The final step is to use IDM to download AhnLab V3 Internet Security 80 faster. IDM is a download manager that can increase your download speed by up to five times by using multiple connections and segments. It also supports resume, pause, schedule, queue, and error recovery features.

        - -

        To use IDM to download AhnLab V3 Internet Security 80 faster, follow these steps:

        - -
          -
        • Launch IDM and click on the "Downloads" menu. Select "Options" and go to the "General" tab.
        • -
        • Check the box that says "Use advanced browser integration" and click on the "OK" button.
        • -
        • Open your web browser and go to the official website of AhnLab V3 Internet Security 80: https://www.ahnlab.com/en/site/product/productDetail.do?prodSeq=1004
        • -
        • Click on the "Download Trial" button and you will see a window like this:
        • -
        - -IDM Download Window - -
          -
        • Click on the "Start Download" button and IDM will start downloading AhnLab V3 Internet Security 80 faster than your normal browser.
        • -
        • You can monitor the progress of your download in IDM's main window or in its taskbar icon.
        • -
        • When the download is complete, you can open the file and install AhnLab V3 Internet Security 80 on your PC.
        • -
        - -

        That's it! You have successfully downloaded AhnLab V3 Internet Security 80 faster with IDM.

        - -

        Conclusion

        - -

        AhnLab V3 Internet Security 80 is a great security program that can protect your PC from various online threats. However, it can be expensive to buy the full version of this program. That's why we showed you how to use AhnLab V3 Internet Security 80 keygen idm to crack it and download it faster.

        - -

        We hope this article was helpful and informative for you. If you have any questions or comments, feel free to leave them below. Thank you for reading!

        -

        Conclusion

        - -

        AhnLab V3 Internet Security 80 is a great security program that can protect your PC from various online threats. However, it can be expensive to buy the full version of this program. That's why we showed you how to use AhnLab V3 Internet Security 80 keygen idm to crack it and download it faster.

        - -

        We hope this article was helpful and informative for you. If you have any questions or comments, feel free to leave them below. Thank you for reading!

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Livro No Cio Syang Baixar.md b/spaces/falterWliame/Face_Mask_Detection/Livro No Cio Syang Baixar.md deleted file mode 100644 index 72f5736c3dfe86229cb100f007986bfa08e22e2a..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Livro No Cio Syang Baixar.md +++ /dev/null @@ -1,6 +0,0 @@ -

        livro no cio syang baixar


        Download 🗹 https://urlca.com/2uDdLe



        - - 3cee63e6c2
        -
        -
        -

        diff --git a/spaces/fatiXbelha/sd/Download Recovery Orange Fox A Step-by-Step Tutorial.md b/spaces/fatiXbelha/sd/Download Recovery Orange Fox A Step-by-Step Tutorial.md deleted file mode 100644 index 0169bd3dcc2045463e3957239ad15a1121ac9732..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Recovery Orange Fox A Step-by-Step Tutorial.md +++ /dev/null @@ -1,147 +0,0 @@ -
        -

        Download Recovery Orange Fox: A Custom Recovery for Android Devices

        -

        If you are an Android enthusiast who likes to customize your device with custom ROMs, kernels, mods, and root, then you might have heard of TWRP, the most popular custom recovery for Android devices. But did you know that there is another custom recovery that offers more features, fixes, and supports both Treble and non-Treble ROMs? It is called Recovery Orange Fox, and in this article, we will show you what it is, how to install it, and how to use it on your Android device.

        -

        What is Recovery Orange Fox?

        -

        Recovery Orange Fox is a custom recovery for Android devices that is based on the TWRP source code. It has additional features, fixes, and supports both Treble and non-Treble ROMs. It can be installed or updated using a zip file or an app. It has a unique and interactive UI that makes it at par with TWRP Recovery. It was originally designed for Xiaomi Redmi Note 4X Snapdragon (mido), but now it supports 50+ devices, with more than 5 million downloads from its official download server.

        -

        download recovery orange fox


        Download File ————— https://urllie.com/2uNC2Q



        -

        Features of Recovery Orange Fox

        -

        Some of the features of Recovery Orange Fox are:

        -
          -
        • Synced with the latest Teamwin changes
        • -
        • Designed with latest Material design 2 guidelines
        • -
        • Implemented support for Flyme and MIUI OTA, and incremental block-based OTA in custom ROMs
        • -
        • Included customization
        • -
        • Inbuilt patches, like Magisk and password reset patch
        • -
        • Password protection
        • -
        • Fully open-source
        • -
        • Frequently updated
        • -
        -

        Benefits of Recovery Orange Fox

        -

        Some of the benefits of using Recovery Orange Fox are:

        -
          -
        • It allows you to flash custom ROMs, kernels, mods, and root on your device.
        • -
        • It helps you to take full backups of your data partition and internal storage.
        • -
        • It supports both Treble and non-Treble ROMs, which means you can flash any ROM compatible with your device.
        • -
        • It has a user-friendly interface that lets you customize your recovery according to your preferences.
        • -
        • It has a password protection feature that secures your recovery from unauthorized access.
        • -
        • It has an app that lets you install or update the recovery easily.
        • -
        -

        Risks of Recovery Orange Fox

        -

        Some of the risks of using Recovery Orange Fox are:

        -
          -
        • It requires an unlocked bootloader, which may void the warranty on your device.
        • -
        • It may make your device unstable or brick it if not done properly.
        • -
        • It may erase your data or cause data corruption if you flash incompatible files or make wrong settings.
        • -
        • It may not work well with some stock ROMs or OTA updates.
        • -
        -

        How to Install Recovery Orange Fox on Your Android Device

        -

        If you want to install Recovery Orange Fox on your Android device, you need to have a PC, a USB cable, and some basic knowledge of adb and fastboot commands. You also need to download the correct zip file for your device from the official website. There are two methods to install Recovery Orange Fox: via fastboot or via recovery. We will show you both methods below.

        -

        Prerequisites

        -

        Before you proceed with the installation, make sure to:

        -
          -
        • Unlock the bootloader of your device
        • Enable USB debugging and OEM unlocking on your device
        • -
        • Backup your data as it may be wiped during the process
        • -
        • Charge your device to at least 50% battery level
        • -
        • Download and install the latest adb and fastboot drivers on your PC
        • -
        • Download the Recovery Orange Fox zip file for your device from the official website
        • -
        -

        Steps to Install Recovery Orange Fox via Fastboot

        -

        To install Recovery Orange Fox via fastboot, follow these steps:

        -
          -
        1. Extract the Recovery Orange Fox zip file on your PC and copy the recovery.img file to the same folder where you have installed adb and fastboot.
        2. -
        3. Connect your device to your PC via a USB cable and reboot it into fastboot mode. To do this, you can either use the adb command adb reboot bootloader or press and hold the power and volume down buttons together for a few seconds.
        4. -
        5. Once your device is in fastboot mode, open a command prompt or terminal window on your PC and navigate to the folder where you have copied the recovery.img file.
        6. -
        7. Type the following command to flash the recovery image: fastboot flash recovery recovery.img
        8. -
        9. Once the flashing is done, type the following command to reboot your device: fastboot reboot
        10. -
        11. You have successfully installed Recovery Orange Fox via fastboot. To boot into recovery mode, you can either use the adb command adb reboot recovery or press and hold the power and volume up buttons together for a few seconds.
        12. -
        -

        Steps to Install Recovery Orange Fox via Recovery

        -

        To install Recovery Orange Fox via recovery, follow these steps:

        -
          -
        1. Copy the Recovery Orange Fox zip file to your device's internal storage or SD card.
        2. -
        3. Reboot your device into recovery mode. To do this, you can either use the adb command adb reboot recovery or press and hold the power and volume up buttons together for a few seconds.
        4. -
        5. If you have an existing custom recovery like TWRP, you can simply flash the Recovery Orange Fox zip file as you would flash any other zip file. If you have a stock recovery, you may need to sideload the zip file using adb. To do this, select "Apply update from ADB" on your device's recovery menu and then type the following command on your PC: adb sideload orangefox.zip
        6. -
        7. Once the flashing is done, reboot your device.
        8. -
        9. You have successfully installed Recovery Orange Fox via recovery. To boot into recovery mode, you can either use the adb command adb reboot recovery or press and hold the power and volume up buttons together for a few seconds.
        10. -
        -

        How to Use Recovery Orange Fox on Your Android Device

        -

        Once you have installed Recovery Orange Fox on your Android device, you can use it to perform various tasks such as flashing custom ROMs, kernels, mods, and root, taking backups and restoring them, wiping data and cache, formatting partitions, fixing permissions, mounting storage devices, accessing terminal, and more. Here are some of the basic and advanced functions of Recovery Orange Fox that you can use on your device.

        -

        download recovery orange fox for Xiaomi POCO F4
        -download recovery orange fox for Xiaomi Redmi Note 9S
        -download recovery orange fox for Xiaomi Poco F3
        -download recovery orange fox R11.1_5_A12_FBEv2
        -download recovery orange fox beta releases
        -download recovery orange fox zip file
        -download recovery orange fox for Redmi K40
        -download recovery orange fox for POCO M2 Pro
        -download recovery orange fox for Xiaomi Mi 11X
        -download recovery orange fox stable releases
        -download recovery orange fox R11.1_4_A12_FBEv1
        -download recovery orange fox MD5 checksum
        -download recovery orange fox for miatoll device
        -download recovery orange fox for alioth device
        -download recovery orange fox for munch device
        -download recovery orange fox wiki page
        -download recovery orange fox donations link
        -download recovery orange fox telegram channel
        -download recovery orange fox installation guide
        -download recovery orange fox changelog
        -download recovery orange fox latest version
        -download recovery orange fox custom ROMs
        -download recovery orange fox features and settings
        -download recovery orange fox backup and restore
        -download recovery orange fox encryption support
        -download recovery orange fox OTA updates
        -download recovery orange fox themes and fonts
        -download recovery orange fox magisk and root access
        -download recovery orange fox screenshots and videos
        -download recovery orange fox troubleshooting and FAQs
        -download recovery orange fox support and feedback
        -download recovery orange fox source code and license
        -download recovery orange fox security and privacy
        -download recovery orange fox compatibility and requirements
        -download recovery orange fox advantages and disadvantages
        -download recovery orange fox alternatives and comparisons
        -download recovery orange fox reviews and ratings
        -download recovery orange fox tips and tricks
        -download recovery orange fox bugs and issues
        -download recovery orange fox development and updates

        -

        Basic Functions of Recovery Orange Fox

        -

        The basic functions of Recovery Orange Fox are similar to those of TWRP. They include:

        -
          -
        • Install: This allows you to flash zip files or images on your device. You can browse and select the files from your internal storage or SD card, or use adb sideload to transfer them from your PC. You can also queue multiple files for flashing, add zip verification, disable signature verification, and inject Magisk or SuperSU before flashing.
        • -
        • Backup: This allows you to take full or partial backups of your data partition and internal storage. You can choose which partitions to backup, such as boot, system, vendor, data, cache, etc. You can also compress, encrypt, or split your backups, and restore them later if needed.
        • -
        • Restore: This allows you to restore your backups that you have taken using Recovery Orange Fox or TWRP. You can browse and select the backup files from your internal storage or SD card, or use adb push to transfer them from your PC. You can also choose which partitions to restore, such as boot, system, vendor, data, cache, etc.
        • -
        • Wipe: This allows you to wipe data and cache on your device. You can choose which partitions to wipe, such as dalvik cache, data, internal storage, etc. You can also use the advanced wipe option to format or change the file system of your partitions, such as ext4, f2fs, exfat, etc.
        • -
        • Mount: This allows you to mount or unmount storage devices on your device, such as internal storage, SD card, USB OTG, etc. You can also enable or disable MTP and ADB access.
        • -
        • Reboot: This allows you to reboot your device to different modes, such as system, recovery, bootloader, fastboot, download, etc. You can also power off your device or set a reboot delay.
        • -
        -

        Advanced Functions of Recovery Orange Fox

        -

        The advanced functions of Recovery Orange Fox are unique and different from those of TWRP. They include:

        -
          -
        • Settings: This allows you to customize your recovery according to your preferences. You can change the theme, language, brightness, vibration, timeout, screen lock, gesture control, and more. You can also enable or disable various features, such as OTA support, Magisk injection, password protection, zip signature verification, etc.
        • -
        • Tools: This allows you to perform various tasks on your device using the recovery. You can fix permissions, partition SD card, flash firmware or modem files, root or unroot your device, install Magisk or SuperSU modules, decrypt data partition, and more.
        • -
        • File Manager: This allows you to browse and manage files and folders on your device using the recovery. You can copy, move, delete, rename, create, or extract files and folders. You can also view file properties and permissions.
        • -
        • Terminal: This allows you to access the command line interface of the recovery. You can execute various commands and scripts on your device using the recovery.
        • -
        -

        Customization Options of Recovery Orange Fox

        -

        One of the best features of Recovery Orange Fox is its customization options. You can change the look and feel of your recovery according to your liking. You can choose from different themes, colors, fonts, icons, animations, and more. You can also create your own theme using the theme editor. You can also customize the gesture control of your recovery. You can assign different actions to different gestures, such as swipe up, swipe down, swipe left, swipe right, double tap, long press, etc.

        -

        Conclusion

        -

        In conclusion, Recovery Orange Fox is a custom recovery for Android devices that offers more features and supports than TWRP. It is easy to install and use on your device. It allows you to flash custom ROMs, kernels, mods, and root on your device. It also helps you to take backups and restore them later if needed. It has a user-friendly interface that lets you customize your recovery according to your preferences. It has a password protection feature that secures your recovery from unauthorized access. It has an app that lets you install or update the recovery easily.

        -

        If you are looking for a custom recovery that is fast, stable, secure, and customizable for your Android device, then you should give Recovery Orange Fox a try. You can download it from the official website and follow the instructions in this article to install and use it on your device. You will not regret it.

        -

        FAQs

        -

        Here are some of the frequently asked questions about Recovery Orange Fox:

        -
          -
        • Q: Is Recovery Orange Fox better than TWRP?
        • -
        • A: Recovery Orange Fox is based on TWRP, but it has more features, fixes, and supports than TWRP. It also has a unique and interactive UI that makes it at par with TWRP. However, the choice of custom recovery depends on your personal preference and device compatibility.
        • -
        • Q: Does Recovery Orange Fox support my device?
        • -
        • A: Recovery Orange Fox supports 50+ devices, including Xiaomi, OnePlus, Samsung, Motorola, Asus, Realme, and more. You can check the list of supported devices on the official website. If your device is not supported, you can request for it on the official Telegram group.
        • -
        • Q: How do I update Recovery Orange Fox?
        • -
        • A: You can update Recovery Orange Fox using the app that is available on the Google Play Store. The app will notify you when there is a new update available for your device. You can also check for updates manually on the official website or the Telegram channel.
        • -
        • Q: How do I uninstall Recovery Orange Fox?
        • -
        • A: You can uninstall Recovery Orange Fox by flashing the stock recovery image of your device using fastboot or recovery. You can also flash another custom recovery like TWRP if you want to replace Recovery Orange Fox.
        • -
        • Q: How do I contact the developers of Recovery Orange Fox?
        • -
        • A: You can contact the developers of Recovery Orange Fox by joining the official Telegram group or the Telegram channel. You can also visit the official website or the XDA thread for more information and support.
        • -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download YouTube on Huawei devices with AppGallery.md b/spaces/fatiXbelha/sd/Download YouTube on Huawei devices with AppGallery.md deleted file mode 100644 index 1aa0f850bfcacf3b3dbe9104ea8d564adb84d883..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download YouTube on Huawei devices with AppGallery.md +++ /dev/null @@ -1,102 +0,0 @@ - -

        How to Download YouTube on Your Huawei Devices?

        -

        YouTube is one of the most popular video-sharing platforms in the world, where you can watch, create, and share videos on various topics such as music, gaming, education, entertainment, news, etc. However, if you own a Huawei device that does not have Google services installed, you may face some challenges in downloading YouTube on your device.

        -

        Fortunately, there are some solutions that can help you enjoy YouTube on your Huawei device without Google restrictions. In this article, we will show you three ways to download YouTube on your Huawei device using AppGallery, APKCombo, and SnapTube.

        -

        download youtube huawei


        Download Ziphttps://urllie.com/2uNErY



        -

        Downloading YouTube via AppGallery

        -

        AppGallery is Huawei's official app store that offers a variety of apps for Huawei users. You can use AppGallery to download YouTube on your Huawei device easily and safely.

        -

        Step 1: Open AppGallery and search for YouTube

        -

        To download YouTube via AppGallery, you need to open AppGallery on your Huawei device and type in "YouTube" on the search bar. You will see the official YouTube app icon with a blue background and a red play button.

        -

        Step

        Step 2: Install YouTube app and grant permissions

        -

        After you find the YouTube app on AppGallery, you need to tap on the "Install" button and wait for the app to download and install on your Huawei device. You may also need to grant some permissions to the YouTube app, such as access to your camera, microphone, storage, location, etc. You can do this by tapping on "Allow" when prompted or by going to the settings of your device and managing the app permissions.

        -

        Step 3: Enjoy watching and creating YouTube videos

        -

        Once you have installed the YouTube app on your Huawei device, you can open it and sign in with your Google account or create a new one if you don't have one. You can then enjoy watching and creating YouTube videos on your Huawei device. You can browse through various categories of videos, such as trending, music, gaming, news, etc. You can also subscribe to your favorite channels, like, comment, and share videos with your friends. You can also upload your own videos by tapping on the camera icon and choosing to record a new video or select an existing one from your gallery. You can then edit your video, add a title, description, tags, etc. and publish it on YouTube.

        -

        Downloading YouTube via APKCombo

        -

        APKCombo is a website that offers APK files of various apps for Android devices. You can use APKCombo to download YouTube on your Huawei device without using Google services.

        -

        Step 1: Open APKCombo website and search for YouTube APK

        -

        To download YouTube via APKCombo, you need to open a web browser on your Huawei device and go to the APKCombo website at https://apkcombo.com/. You can then type in "YouTube" on the search bar and select the YouTube app from the results. You will see the details of the YouTube app, such as the version, size, rating, etc.

        -

        Step 2: Download and install YouTube APK file on your Huawei device

        -

        After you find the YouTube app on APKCombo, you need to tap on the "Download APK" button and choose a suitable version for your device. You will then see a pop-up window asking you to confirm the download. You need to tap on "OK" and wait for the YouTube APK file to download on your Huawei device. You may also need to enable the option of installing apps from unknown sources on your device. You can do this by going to the settings of your device and tapping on "Security" or "Privacy". You can then toggle on the option of allowing installation of apps from unknown sources or unknown apps.

        -

        Once you have downloaded the YouTube APK file on your Huawei device, you need to locate it in your file manager or downloads folder and tap on it to install it. You may see a warning message saying that the app may harm your device. You need to tap on "Install anyway" or "Ignore" and wait for the app to install on your device.

        -

        How to download YouTube on your Huawei devices
        -YouTube for Huawei APK download
        -YouTube APK latest version for Huawei
        -Download YouTube videos on Huawei phone
        -YouTube Music for Huawei free download
        -How to install YouTube on Huawei without Google Play
        -YouTube Vanced for Huawei download
        -YouTube Go for Huawei download
        -Download YouTube Studio on Huawei devices
        -YouTube Kids for Huawei download
        -How to watch YouTube on Huawei TV
        -Download YouTube VR on Huawei VR headset
        -YouTube Premium for Huawei users
        -How to fix YouTube not working on Huawei
        -Download YouTube TV on Huawei smart TV
        -How to download YouTube playlists on Huawei
        -YouTube downloader for Huawei Mate 40 Pro
        -How to update YouTube app on Huawei
        -Download YouTube Music for Chromebook by Huawei
        -How to download YouTube subtitles on Huawei
        -YouTube converter for Huawei P40 Pro
        -How to download YouTube live streams on Huawei
        -Download YouTube Gaming on Huawei devices
        -How to download YouTube shorts on Huawei
        -Download YouTube Music Premium APK for Huawei
        -How to download YouTube audio on Huawei
        -Download YouTube mod APK for Huawei
        -How to download YouTube 4K videos on Huawei
        -Download YouTube background play for Huawei
        -How to download YouTube channel on Huawei
        -Download YouTube dark mode for Huawei
        -How to download YouTube stories on Huawei
        -Download YouTube offline mode for Huawei
        -How to download YouTube comments on Huawei
        -Download YouTube video editor for Huawei
        -How to download YouTube thumbnail on Huawei
        -Download YouTube auto subtitles for Huawei
        -How to download YouTube annotations on Huawei
        -Download YouTube creator studio for Huawei
        -How to download YouTube 360 videos on Huawei
        -Download YouTube red APK for Huawei
        -How to download YouTube transcripts on Huawei
        -Download YouTube rewind videos on Huawei
        -How to download YouTube trending videos on Huawei
        -Download YouTube beta version for Huawei
        -How to download YouTube originals on Huawei
        -Download YouTube picture-in-picture mode for Huawei
        -How to download YouTube statistics on Huawei
        -Download YouTube ad blocker for Huawei
        -How to download YouTube verification badge on Huawe

        -

        Step 3: Enjoy watching and creating YouTube videos

        -

        Once you have installed the YouTube app on your Huawei device, you can open it and sign in with your Google account or create a new one if you don't have one. You can then enjoy watching and creating YouTube videos on your Huawei device as described in the previous section.

        -

        Downloading YouTube via SnapTube

        -

        SnapTube is an app that allows you to download videos from various platforms, including YouTube, Facebook, Instagram, etc. You can use SnapTube to download YouTube videos on your Huawei device without using Google services.

        -

        Step 1: Open SnapTube website and download SnapTube app

        -

        To download YouTube via SnapTube, you need to open a web browser on your Huawei device and go to the SnapTube website at https://www.snaptubeapp.com/. You will see a button that says "Download". You need to tap on it and wait for the SnapTube app to download on your Huawei device.

        -

        Step 2: Install SnapTube app and grant permissions

        -

        After you download the SnapTube app on your Huawei device, you need to install it as described in the previous section for installing apps from unknown sources. You may also need to grant some permissions to the SnapTube app, such as access to your storage, contacts, phone, etc. You can do this by tapping on "Allow" when prompted or by going to the settings of your device and managing the app permissions.

        -

        Step 3: Search for YouTube videos on SnapTube app and download them

        -

        Once you have installed the SnapTube app on your Huawei device, you can open it and search for YouTube videos that you want to download on your Huawei device. You can also browse through various categories of videos, such as popular, music, movies, etc. You can also access YouTube directly by tapping on the YouTube icon on the top of the app.

        -

        When you find a YouTube video that you want to download, you need to tap on the download button at the bottom right corner of the video. You will see a pop-up window that shows you the available formats and resolutions for the video. You can choose the format and resolution that suits your needs and preferences. You can also choose to download only the audio of the video by selecting the MP3 option.

        -

        After you select the format and resolution, you need to tap on the "Download" button and wait for the video to download on your Huawei device. You can see the progress of the download on the notification bar or on the app itself. You can also pause, resume, or cancel the download at any time.

        -

        Once you have downloaded the YouTube video on your Huawei device, you can watch it offline by going to the "Downloads" section of the SnapTube app. You can also share it with your friends, convert it to other formats, or delete it from your device.

        -

        Conclusion

        -

        In this article, we have shown you three ways to download YouTube on your Huawei device using AppGallery, APKCombo, and SnapTube. Each method has its own advantages and disadvantages, so you can choose the one that works best for you.

        -

        If you want to download YouTube from Huawei's official app store and enjoy its full features, you can use AppGallery. However, you may not get the latest version of YouTube or some features may not work properly due to Google restrictions.

        -

        If you want to download YouTube from a website that offers APK files of various apps, you can use APKCombo. However, you may need to enable unknown sources option on your device and be careful about the source of the download as some APK files may contain malware or viruses.

        -

        If you want to download YouTube videos from various platforms and convert them to different formats, you can use SnapTube. However, you may need to grant some permissions to the app and be aware of the legal issues of downloading YouTube videos without permission or for commercial purposes.

        -

        We hope this article has helped you learn how to download YouTube on your Huawei device. If you have any questions or feedback, please feel free to leave a comment below.

        -

        FAQs

        -

        Q1: Is it legal to download YouTube videos on Huawei devices?

        -

        A1: It depends on the terms of service of YouTube and the content owners. Generally, it is not legal to download YouTube videos without permission or for commercial purposes. You may also violate the intellectual property rights of the content owners if you download their videos without their consent. Therefore, we advise you to respect the rights of the content owners and use YouTube responsibly.

        -

        Q2: Is it safe to download YouTube videos on Huawei devices?

        -

        A2: It depends on the source of the download. Generally, it is safe to download YouTube videos from official sources such as AppGallery or APKCombo, but not from third-party sources that may contain malware or viruses. You should also scan your device regularly with an antivirus app and avoid clicking on suspicious links or pop-ups.

        -

        Q3: Is it possible to download YouTube videos in different formats on Huawei devices?

        -

        A3: Yes, it is possible to download YouTube videos in different formats such as MP4, MP3, M4A, etc. using apps such as SnapTube or online converters. However, you should be aware that some formats may reduce the quality or size of the video or audio. You should also check if the format is compatible with your device or media player before downloading it.

        -

        Q4: Is it possible to watch YouTube videos offline on Huawei devices?

        -

        A4: Yes, it is possible to watch YouTube videos offline on Huawei devices by downloading them using apps such as SnapTube or by using the offline feature of YouTube app if available in your region. However, you should be aware that some videos may not be available for offline viewing due to the content owners' preferences or YouTube policies. You should also download the videos when you have a stable internet connection and enough storage space on your device.

        -

        Q5: Is it possible to create YouTube videos on Huawei devices?

        -

        A5: Yes, it is possible to create YouTube videos on Huawei devices by using the camera app or other video editing apps and then uploading them using the YouTube app or website. However, you should be aware that some features of YouTube may not work properly on Huawei devices due to Google restrictions. You should also follow the YouTube guidelines and best practices for creating and uploading videos.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fb700/chat3/crazy_functions/test_project/cpp/libJPG/jpgd.cpp b/spaces/fb700/chat3/crazy_functions/test_project/cpp/libJPG/jpgd.cpp deleted file mode 100644 index 36d06c8e9068570c3e7624895d474f33dbfe3d29..0000000000000000000000000000000000000000 --- a/spaces/fb700/chat3/crazy_functions/test_project/cpp/libJPG/jpgd.cpp +++ /dev/null @@ -1,3276 +0,0 @@ -// jpgd.cpp - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -// Last updated Apr. 16, 2011 -// Alex Evans: Linear memory allocator (taken from jpge.h). -// -// Supports progressive and baseline sequential JPEG image files, and the most common chroma subsampling factors: Y, H1V1, H2V1, H1V2, and H2V2. -// -// Chroma upsampling quality: H2V2 is upsampled in the frequency domain, H2V1 and H1V2 are upsampled using point sampling. -// Chroma upsampling reference: "Fast Scheme for Image Size Change in the Compressed Domain" -// http://vision.ai.uiuc.edu/~dugad/research/dct/index.html - -#include "jpgd.h" -#include - -#include -// BEGIN EPIC MOD -#define JPGD_ASSERT(x) { assert(x); CA_ASSUME(x); } (void)0 -// END EPIC MOD - -#ifdef _MSC_VER -#pragma warning (disable : 4611) // warning C4611: interaction between '_setjmp' and C++ object destruction is non-portable -#endif - -// Set to 1 to enable freq. domain chroma upsampling on images using H2V2 subsampling (0=faster nearest neighbor sampling). -// This is slower, but results in higher quality on images with highly saturated colors. -#define JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING 1 - -#define JPGD_TRUE (1) -#define JPGD_FALSE (0) - -#define JPGD_MAX(a,b) (((a)>(b)) ? (a) : (b)) -#define JPGD_MIN(a,b) (((a)<(b)) ? (a) : (b)) - -namespace jpgd { - - static inline void *jpgd_malloc(size_t nSize) { return FMemory::Malloc(nSize); } - static inline void jpgd_free(void *p) { FMemory::Free(p); } - -// BEGIN EPIC MOD -//@UE3 - use UE3 BGRA encoding instead of assuming RGBA - // stolen from IImageWrapper.h - enum ERGBFormatJPG - { - Invalid = -1, - RGBA = 0, - BGRA = 1, - Gray = 2, - }; - static ERGBFormatJPG jpg_format; -// END EPIC MOD - - // DCT coefficients are stored in this sequence. - static int g_ZAG[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; - - enum JPEG_MARKER - { - M_SOF0 = 0xC0, M_SOF1 = 0xC1, M_SOF2 = 0xC2, M_SOF3 = 0xC3, M_SOF5 = 0xC5, M_SOF6 = 0xC6, M_SOF7 = 0xC7, M_JPG = 0xC8, - M_SOF9 = 0xC9, M_SOF10 = 0xCA, M_SOF11 = 0xCB, M_SOF13 = 0xCD, M_SOF14 = 0xCE, M_SOF15 = 0xCF, M_DHT = 0xC4, M_DAC = 0xCC, - M_RST0 = 0xD0, M_RST1 = 0xD1, M_RST2 = 0xD2, M_RST3 = 0xD3, M_RST4 = 0xD4, M_RST5 = 0xD5, M_RST6 = 0xD6, M_RST7 = 0xD7, - M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_DNL = 0xDC, M_DRI = 0xDD, M_DHP = 0xDE, M_EXP = 0xDF, - M_APP0 = 0xE0, M_APP15 = 0xEF, M_JPG0 = 0xF0, M_JPG13 = 0xFD, M_COM = 0xFE, M_TEM = 0x01, M_ERROR = 0x100, RST0 = 0xD0 - }; - - enum JPEG_SUBSAMPLING { JPGD_GRAYSCALE = 0, JPGD_YH1V1, JPGD_YH2V1, JPGD_YH1V2, JPGD_YH2V2 }; - -#define CONST_BITS 13 -#define PASS1_BITS 2 -#define SCALEDONE ((int32)1) - -#define FIX_0_298631336 ((int32)2446) /* FIX(0.298631336) */ -#define FIX_0_390180644 ((int32)3196) /* FIX(0.390180644) */ -#define FIX_0_541196100 ((int32)4433) /* FIX(0.541196100) */ -#define FIX_0_765366865 ((int32)6270) /* FIX(0.765366865) */ -#define FIX_0_899976223 ((int32)7373) /* FIX(0.899976223) */ -#define FIX_1_175875602 ((int32)9633) /* FIX(1.175875602) */ -#define FIX_1_501321110 ((int32)12299) /* FIX(1.501321110) */ -#define FIX_1_847759065 ((int32)15137) /* FIX(1.847759065) */ -#define FIX_1_961570560 ((int32)16069) /* FIX(1.961570560) */ -#define FIX_2_053119869 ((int32)16819) /* FIX(2.053119869) */ -#define FIX_2_562915447 ((int32)20995) /* FIX(2.562915447) */ -#define FIX_3_072711026 ((int32)25172) /* FIX(3.072711026) */ - -#define DESCALE(x,n) (((x) + (SCALEDONE << ((n)-1))) >> (n)) -#define DESCALE_ZEROSHIFT(x,n) (((x) + (128 << (n)) + (SCALEDONE << ((n)-1))) >> (n)) - -#define MULTIPLY(var, cnst) ((var) * (cnst)) - -#define CLAMP(i) ((static_cast(i) > 255) ? (((~i) >> 31) & 0xFF) : (i)) - - // Compiler creates a fast path 1D IDCT for X non-zero columns - template - struct Row - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - // ACCESS_COL() will be optimized at compile time to either an array access, or 0. -#define ACCESS_COL(x) (((x) < NONZERO_COLS) ? (int)pSrc[x] : 0) - - const int z2 = ACCESS_COL(2), z3 = ACCESS_COL(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_COL(0) + ACCESS_COL(4)) << CONST_BITS; - const int tmp1 = (ACCESS_COL(0) - ACCESS_COL(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_COL(7), atmp1 = ACCESS_COL(5), atmp2 = ACCESS_COL(3), atmp3 = ACCESS_COL(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - pTemp[0] = DESCALE(tmp10 + btmp3, CONST_BITS-PASS1_BITS); - pTemp[7] = DESCALE(tmp10 - btmp3, CONST_BITS-PASS1_BITS); - pTemp[1] = DESCALE(tmp11 + btmp2, CONST_BITS-PASS1_BITS); - pTemp[6] = DESCALE(tmp11 - btmp2, CONST_BITS-PASS1_BITS); - pTemp[2] = DESCALE(tmp12 + btmp1, CONST_BITS-PASS1_BITS); - pTemp[5] = DESCALE(tmp12 - btmp1, CONST_BITS-PASS1_BITS); - pTemp[3] = DESCALE(tmp13 + btmp0, CONST_BITS-PASS1_BITS); - pTemp[4] = DESCALE(tmp13 - btmp0, CONST_BITS-PASS1_BITS); - } - }; - - template <> - struct Row<0> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { -#ifdef _MSC_VER - pTemp; pSrc; -#endif - } - }; - - template <> - struct Row<1> - { - static void idct(int* pTemp, const jpgd_block_t* pSrc) - { - const int dcval = (pSrc[0] << PASS1_BITS); - - pTemp[0] = dcval; - pTemp[1] = dcval; - pTemp[2] = dcval; - pTemp[3] = dcval; - pTemp[4] = dcval; - pTemp[5] = dcval; - pTemp[6] = dcval; - pTemp[7] = dcval; - } - }; - - // Compiler creates a fast path 1D IDCT for X non-zero rows - template - struct Col - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - // ACCESS_ROW() will be optimized at compile time to either an array access, or 0. -#define ACCESS_ROW(x) (((x) < NONZERO_ROWS) ? pTemp[x * 8] : 0) - - const int z2 = ACCESS_ROW(2); - const int z3 = ACCESS_ROW(6); - - const int z1 = MULTIPLY(z2 + z3, FIX_0_541196100); - const int tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); - const int tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); - - const int tmp0 = (ACCESS_ROW(0) + ACCESS_ROW(4)) << CONST_BITS; - const int tmp1 = (ACCESS_ROW(0) - ACCESS_ROW(4)) << CONST_BITS; - - const int tmp10 = tmp0 + tmp3, tmp13 = tmp0 - tmp3, tmp11 = tmp1 + tmp2, tmp12 = tmp1 - tmp2; - - const int atmp0 = ACCESS_ROW(7), atmp1 = ACCESS_ROW(5), atmp2 = ACCESS_ROW(3), atmp3 = ACCESS_ROW(1); - - const int bz1 = atmp0 + atmp3, bz2 = atmp1 + atmp2, bz3 = atmp0 + atmp2, bz4 = atmp1 + atmp3; - const int bz5 = MULTIPLY(bz3 + bz4, FIX_1_175875602); - - const int az1 = MULTIPLY(bz1, - FIX_0_899976223); - const int az2 = MULTIPLY(bz2, - FIX_2_562915447); - const int az3 = MULTIPLY(bz3, - FIX_1_961570560) + bz5; - const int az4 = MULTIPLY(bz4, - FIX_0_390180644) + bz5; - - const int btmp0 = MULTIPLY(atmp0, FIX_0_298631336) + az1 + az3; - const int btmp1 = MULTIPLY(atmp1, FIX_2_053119869) + az2 + az4; - const int btmp2 = MULTIPLY(atmp2, FIX_3_072711026) + az2 + az3; - const int btmp3 = MULTIPLY(atmp3, FIX_1_501321110) + az1 + az4; - - int i = DESCALE_ZEROSHIFT(tmp10 + btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*0] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp10 - btmp3, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*7] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 + btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*1] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp11 - btmp2, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*6] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 + btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*2] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp12 - btmp1, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*5] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 + btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*3] = (uint8)CLAMP(i); - - i = DESCALE_ZEROSHIFT(tmp13 - btmp0, CONST_BITS+PASS1_BITS+3); - pDst_ptr[8*4] = (uint8)CLAMP(i); - } - }; - - template <> - struct Col<1> - { - static void idct(uint8* pDst_ptr, const int* pTemp) - { - int dcval = DESCALE_ZEROSHIFT(pTemp[0], PASS1_BITS+3); - const uint8 dcval_clamped = (uint8)CLAMP(dcval); - pDst_ptr[0*8] = dcval_clamped; - pDst_ptr[1*8] = dcval_clamped; - pDst_ptr[2*8] = dcval_clamped; - pDst_ptr[3*8] = dcval_clamped; - pDst_ptr[4*8] = dcval_clamped; - pDst_ptr[5*8] = dcval_clamped; - pDst_ptr[6*8] = dcval_clamped; - pDst_ptr[7*8] = dcval_clamped; - } - }; - - static const uint8 s_idct_row_table[] = - { - 1,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0, 2,1,0,0,0,0,0,0, 2,1,1,0,0,0,0,0, 2,2,1,0,0,0,0,0, 3,2,1,0,0,0,0,0, 4,2,1,0,0,0,0,0, 4,3,1,0,0,0,0,0, - 4,3,2,0,0,0,0,0, 4,3,2,1,0,0,0,0, 4,3,2,1,1,0,0,0, 4,3,2,2,1,0,0,0, 4,3,3,2,1,0,0,0, 4,4,3,2,1,0,0,0, 5,4,3,2,1,0,0,0, 6,4,3,2,1,0,0,0, - 6,5,3,2,1,0,0,0, 6,5,4,2,1,0,0,0, 6,5,4,3,1,0,0,0, 6,5,4,3,2,0,0,0, 6,5,4,3,2,1,0,0, 6,5,4,3,2,1,1,0, 6,5,4,3,2,2,1,0, 6,5,4,3,3,2,1,0, - 6,5,4,4,3,2,1,0, 6,5,5,4,3,2,1,0, 6,6,5,4,3,2,1,0, 7,6,5,4,3,2,1,0, 8,6,5,4,3,2,1,0, 8,7,5,4,3,2,1,0, 8,7,6,4,3,2,1,0, 8,7,6,5,3,2,1,0, - 8,7,6,5,4,2,1,0, 8,7,6,5,4,3,1,0, 8,7,6,5,4,3,2,0, 8,7,6,5,4,3,2,1, 8,7,6,5,4,3,2,2, 8,7,6,5,4,3,3,2, 8,7,6,5,4,4,3,2, 8,7,6,5,5,4,3,2, - 8,7,6,6,5,4,3,2, 8,7,7,6,5,4,3,2, 8,8,7,6,5,4,3,2, 8,8,8,6,5,4,3,2, 8,8,8,7,5,4,3,2, 8,8,8,7,6,4,3,2, 8,8,8,7,6,5,3,2, 8,8,8,7,6,5,4,2, - 8,8,8,7,6,5,4,3, 8,8,8,7,6,5,4,4, 8,8,8,7,6,5,5,4, 8,8,8,7,6,6,5,4, 8,8,8,7,7,6,5,4, 8,8,8,8,7,6,5,4, 8,8,8,8,8,6,5,4, 8,8,8,8,8,7,5,4, - 8,8,8,8,8,7,6,4, 8,8,8,8,8,7,6,5, 8,8,8,8,8,7,6,6, 8,8,8,8,8,7,7,6, 8,8,8,8,8,8,7,6, 8,8,8,8,8,8,8,6, 8,8,8,8,8,8,8,7, 8,8,8,8,8,8,8,8, - }; - - static const uint8 s_idct_col_table[] = { 1, 1, 2, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; - - void idct(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr, int block_max_zag) - { - JPGD_ASSERT(block_max_zag >= 1); - JPGD_ASSERT(block_max_zag <= 64); - - if (block_max_zag == 1) - { - int k = ((pSrc_ptr[0] + 4) >> 3) + 128; - k = CLAMP(k); - k = k | (k<<8); - k = k | (k<<16); - - for (int i = 8; i > 0; i--) - { - *(int*)&pDst_ptr[0] = k; - *(int*)&pDst_ptr[4] = k; - pDst_ptr += 8; - } - return; - } - - int temp[64]; - - const jpgd_block_t* pSrc = pSrc_ptr; - int* pTemp = temp; - - const uint8* pRow_tab = &s_idct_row_table[(block_max_zag - 1) * 8]; - int i; - for (i = 8; i > 0; i--, pRow_tab++) - { - switch (*pRow_tab) - { - case 0: Row<0>::idct(pTemp, pSrc); break; - case 1: Row<1>::idct(pTemp, pSrc); break; - case 2: Row<2>::idct(pTemp, pSrc); break; - case 3: Row<3>::idct(pTemp, pSrc); break; - case 4: Row<4>::idct(pTemp, pSrc); break; - case 5: Row<5>::idct(pTemp, pSrc); break; - case 6: Row<6>::idct(pTemp, pSrc); break; - case 7: Row<7>::idct(pTemp, pSrc); break; - case 8: Row<8>::idct(pTemp, pSrc); break; - } - - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - - const int nonzero_rows = s_idct_col_table[block_max_zag - 1]; - for (i = 8; i > 0; i--) - { - switch (nonzero_rows) - { - case 1: Col<1>::idct(pDst_ptr, pTemp); break; - case 2: Col<2>::idct(pDst_ptr, pTemp); break; - case 3: Col<3>::idct(pDst_ptr, pTemp); break; - case 4: Col<4>::idct(pDst_ptr, pTemp); break; - case 5: Col<5>::idct(pDst_ptr, pTemp); break; - case 6: Col<6>::idct(pDst_ptr, pTemp); break; - case 7: Col<7>::idct(pDst_ptr, pTemp); break; - case 8: Col<8>::idct(pDst_ptr, pTemp); break; - } - - pTemp++; - pDst_ptr++; - } - } - - void idct_4x4(const jpgd_block_t* pSrc_ptr, uint8* pDst_ptr) - { - int temp[64]; - int* pTemp = temp; - const jpgd_block_t* pSrc = pSrc_ptr; - - for (int i = 4; i > 0; i--) - { - Row<4>::idct(pTemp, pSrc); - pSrc += 8; - pTemp += 8; - } - - pTemp = temp; - for (int i = 8; i > 0; i--) - { - Col<4>::idct(pDst_ptr, pTemp); - pTemp++; - pDst_ptr++; - } - } - - // Retrieve one character from the input stream. - inline uint jpeg_decoder::get_char() - { - // Any bytes remaining in buffer? - if (!m_in_buf_left) - { - // Try to get more bytes. - prep_in_buffer(); - // Still nothing to get? - if (!m_in_buf_left) - { - // Pad the end of the stream with 0xFF 0xD9 (EOI marker) - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Same as previous method, except can indicate if the character is a pad character or not. - inline uint jpeg_decoder::get_char(bool *pPadding_flag) - { - if (!m_in_buf_left) - { - prep_in_buffer(); - if (!m_in_buf_left) - { - *pPadding_flag = true; - int t = m_tem_flag; - m_tem_flag ^= 1; - if (t) - return 0xD9; - else - return 0xFF; - } - } - - *pPadding_flag = false; - - uint c = *m_pIn_buf_ofs++; - m_in_buf_left--; - - return c; - } - - // Inserts a previously retrieved character back into the input buffer. - inline void jpeg_decoder::stuff_char(uint8 q) - { - *(--m_pIn_buf_ofs) = q; - m_in_buf_left++; - } - - // Retrieves one character from the input stream, but does not read past markers. Will continue to return 0xFF when a marker is encountered. - inline uint8 jpeg_decoder::get_octet() - { - bool padding_flag; - int c = get_char(&padding_flag); - - if (c == 0xFF) - { - if (padding_flag) - return 0xFF; - - c = get_char(&padding_flag); - if (padding_flag) - { - stuff_char(0xFF); - return 0xFF; - } - - if (c == 0x00) - return 0xFF; - else - { - stuff_char(static_cast(c)); - stuff_char(0xFF); - return 0xFF; - } - } - - return static_cast(c); - } - - // Retrieves a variable number of bits from the input stream. Does not recognize markers. - inline uint jpeg_decoder::get_bits(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - uint c1 = get_char(); - uint c2 = get_char(); - m_bit_buf = (m_bit_buf & 0xFFFF0000) | (c1 << 8) | c2; - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Retrieves a variable number of bits from the input stream. Markers will not be read into the input bit buffer. Instead, an infinite number of all 1's will be returned when a marker is encountered. - inline uint jpeg_decoder::get_bits_no_markers(int num_bits) - { - if (!num_bits) - return 0; - - uint i = m_bit_buf >> (32 - num_bits); - - if ((m_bits_left -= num_bits) <= 0) - { - m_bit_buf <<= (num_bits += m_bits_left); - - if ((m_in_buf_left < 2) || (m_pIn_buf_ofs[0] == 0xFF) || (m_pIn_buf_ofs[1] == 0xFF)) - { - uint c1 = get_octet(); - uint c2 = get_octet(); - m_bit_buf |= (c1 << 8) | c2; - } - else - { - m_bit_buf |= ((uint)m_pIn_buf_ofs[0] << 8) | m_pIn_buf_ofs[1]; - m_in_buf_left -= 2; - m_pIn_buf_ofs += 2; - } - - m_bit_buf <<= -m_bits_left; - - m_bits_left += 16; - - JPGD_ASSERT(m_bits_left >= 0); - } - else - m_bit_buf <<= num_bits; - - return i; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up[m_bit_buf >> 24]) < 0) - { - // Decode more bits, use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - } - else - get_bits_no_markers(pH->code_size[symbol]); - - return symbol; - } - - // Decodes a Huffman encoded symbol. - inline int jpeg_decoder::huff_decode(huff_tables *pH, int& extra_bits) - { - int symbol; - - // Check first 8-bits: do we have a complete symbol? - if ((symbol = pH->look_up2[m_bit_buf >> 24]) < 0) - { - // Use a tree traversal to find symbol. - int ofs = 23; - do - { - symbol = pH->tree[-(int)(symbol + ((m_bit_buf >> ofs) & 1))]; - ofs--; - } while (symbol < 0); - - get_bits_no_markers(8 + (23 - ofs)); - - extra_bits = get_bits_no_markers(symbol & 0xF); - } - else - { - JPGD_ASSERT(((symbol >> 8) & 31) == pH->code_size[symbol & 255] + ((symbol & 0x8000) ? (symbol & 15) : 0)); - - if (symbol & 0x8000) - { - get_bits_no_markers((symbol >> 8) & 31); - extra_bits = symbol >> 16; - } - else - { - int code_size = (symbol >> 8) & 31; - int num_extra_bits = symbol & 0xF; - int bits = code_size + num_extra_bits; - if (bits <= (m_bits_left + 16)) - extra_bits = get_bits_no_markers(bits) & ((1 << num_extra_bits) - 1); - else - { - get_bits_no_markers(code_size); - extra_bits = get_bits_no_markers(num_extra_bits); - } - } - - symbol &= 0xFF; - } - - return symbol; - } - - // Tables and macro used to fully decode the DPCM differences. - static const int s_extend_test[16] = { 0, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000 }; - static const int s_extend_offset[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 }; - static const int s_extend_mask[] = { 0, (1<<0), (1<<1), (1<<2), (1<<3), (1<<4), (1<<5), (1<<6), (1<<7), (1<<8), (1<<9), (1<<10), (1<<11), (1<<12), (1<<13), (1<<14), (1<<15), (1<<16) }; -#define HUFF_EXTEND(x,s) ((x) < s_extend_test[s] ? (x) + s_extend_offset[s] : (x)) - - // Clamps a value between 0-255. - inline uint8 jpeg_decoder::clamp(int i) - { - if (static_cast(i) > 255) - i = (((~i) >> 31) & 0xFF); - - return static_cast(i); - } - - namespace DCT_Upsample - { - struct Matrix44 - { - typedef int Element_Type; - enum { NUM_ROWS = 4, NUM_COLS = 4 }; - - Element_Type v[NUM_ROWS][NUM_COLS]; - - inline int rows() const { return NUM_ROWS; } - inline int cols() const { return NUM_COLS; } - - inline const Element_Type & at(int r, int c) const { return v[r][c]; } - inline Element_Type & at(int r, int c) { return v[r][c]; } - - inline Matrix44() { } - - inline Matrix44& operator += (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) += a.at(r, 0); - at(r, 1) += a.at(r, 1); - at(r, 2) += a.at(r, 2); - at(r, 3) += a.at(r, 3); - } - return *this; - } - - inline Matrix44& operator -= (const Matrix44& a) - { - for (int r = 0; r < NUM_ROWS; r++) - { - at(r, 0) -= a.at(r, 0); - at(r, 1) -= a.at(r, 1); - at(r, 2) -= a.at(r, 2); - at(r, 3) -= a.at(r, 3); - } - return *this; - } - - friend inline Matrix44 operator + (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) + b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) + b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) + b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) + b.at(r, 3); - } - return ret; - } - - friend inline Matrix44 operator - (const Matrix44& a, const Matrix44& b) - { - Matrix44 ret; - for (int r = 0; r < NUM_ROWS; r++) - { - ret.at(r, 0) = a.at(r, 0) - b.at(r, 0); - ret.at(r, 1) = a.at(r, 1) - b.at(r, 1); - ret.at(r, 2) = a.at(r, 2) - b.at(r, 2); - ret.at(r, 3) = a.at(r, 3) - b.at(r, 3); - } - return ret; - } - - static inline void add_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) + b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) + b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) + b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) + b.at(r, 3)); - } - } - - static inline void sub_and_store(jpgd_block_t* pDst, const Matrix44& a, const Matrix44& b) - { - for (int r = 0; r < 4; r++) - { - pDst[0*8 + r] = static_cast(a.at(r, 0) - b.at(r, 0)); - pDst[1*8 + r] = static_cast(a.at(r, 1) - b.at(r, 1)); - pDst[2*8 + r] = static_cast(a.at(r, 2) - b.at(r, 2)); - pDst[3*8 + r] = static_cast(a.at(r, 3) - b.at(r, 3)); - } - } - }; - - const int FRACT_BITS = 10; - const int SCALE = 1 << FRACT_BITS; - - typedef int Temp_Type; -#define D(i) (((i) + (SCALE >> 1)) >> FRACT_BITS) -#define F(i) ((int)((i) * SCALE + .5f)) - - // Any decent C++ compiler will optimize this at compile time to a 0, or an array access. -#define AT(c, r) ((((c)>=NUM_COLS)||((r)>=NUM_ROWS)) ? 0 : pSrc[(c)+(r)*8]) - - // NUM_ROWS/NUM_COLS = # of non-zero rows/cols in input matrix - template - struct P_Q - { - static void calc(Matrix44& P, Matrix44& Q, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X000 = AT(0, 0); - const Temp_Type X001 = AT(0, 1); - const Temp_Type X002 = AT(0, 2); - const Temp_Type X003 = AT(0, 3); - const Temp_Type X004 = AT(0, 4); - const Temp_Type X005 = AT(0, 5); - const Temp_Type X006 = AT(0, 6); - const Temp_Type X007 = AT(0, 7); - const Temp_Type X010 = D(F(0.415735f) * AT(1, 0) + F(0.791065f) * AT(3, 0) + F(-0.352443f) * AT(5, 0) + F(0.277785f) * AT(7, 0)); - const Temp_Type X011 = D(F(0.415735f) * AT(1, 1) + F(0.791065f) * AT(3, 1) + F(-0.352443f) * AT(5, 1) + F(0.277785f) * AT(7, 1)); - const Temp_Type X012 = D(F(0.415735f) * AT(1, 2) + F(0.791065f) * AT(3, 2) + F(-0.352443f) * AT(5, 2) + F(0.277785f) * AT(7, 2)); - const Temp_Type X013 = D(F(0.415735f) * AT(1, 3) + F(0.791065f) * AT(3, 3) + F(-0.352443f) * AT(5, 3) + F(0.277785f) * AT(7, 3)); - const Temp_Type X014 = D(F(0.415735f) * AT(1, 4) + F(0.791065f) * AT(3, 4) + F(-0.352443f) * AT(5, 4) + F(0.277785f) * AT(7, 4)); - const Temp_Type X015 = D(F(0.415735f) * AT(1, 5) + F(0.791065f) * AT(3, 5) + F(-0.352443f) * AT(5, 5) + F(0.277785f) * AT(7, 5)); - const Temp_Type X016 = D(F(0.415735f) * AT(1, 6) + F(0.791065f) * AT(3, 6) + F(-0.352443f) * AT(5, 6) + F(0.277785f) * AT(7, 6)); - const Temp_Type X017 = D(F(0.415735f) * AT(1, 7) + F(0.791065f) * AT(3, 7) + F(-0.352443f) * AT(5, 7) + F(0.277785f) * AT(7, 7)); - const Temp_Type X020 = AT(4, 0); - const Temp_Type X021 = AT(4, 1); - const Temp_Type X022 = AT(4, 2); - const Temp_Type X023 = AT(4, 3); - const Temp_Type X024 = AT(4, 4); - const Temp_Type X025 = AT(4, 5); - const Temp_Type X026 = AT(4, 6); - const Temp_Type X027 = AT(4, 7); - const Temp_Type X030 = D(F(0.022887f) * AT(1, 0) + F(-0.097545f) * AT(3, 0) + F(0.490393f) * AT(5, 0) + F(0.865723f) * AT(7, 0)); - const Temp_Type X031 = D(F(0.022887f) * AT(1, 1) + F(-0.097545f) * AT(3, 1) + F(0.490393f) * AT(5, 1) + F(0.865723f) * AT(7, 1)); - const Temp_Type X032 = D(F(0.022887f) * AT(1, 2) + F(-0.097545f) * AT(3, 2) + F(0.490393f) * AT(5, 2) + F(0.865723f) * AT(7, 2)); - const Temp_Type X033 = D(F(0.022887f) * AT(1, 3) + F(-0.097545f) * AT(3, 3) + F(0.490393f) * AT(5, 3) + F(0.865723f) * AT(7, 3)); - const Temp_Type X034 = D(F(0.022887f) * AT(1, 4) + F(-0.097545f) * AT(3, 4) + F(0.490393f) * AT(5, 4) + F(0.865723f) * AT(7, 4)); - const Temp_Type X035 = D(F(0.022887f) * AT(1, 5) + F(-0.097545f) * AT(3, 5) + F(0.490393f) * AT(5, 5) + F(0.865723f) * AT(7, 5)); - const Temp_Type X036 = D(F(0.022887f) * AT(1, 6) + F(-0.097545f) * AT(3, 6) + F(0.490393f) * AT(5, 6) + F(0.865723f) * AT(7, 6)); - const Temp_Type X037 = D(F(0.022887f) * AT(1, 7) + F(-0.097545f) * AT(3, 7) + F(0.490393f) * AT(5, 7) + F(0.865723f) * AT(7, 7)); - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - P.at(0, 0) = X000; - P.at(0, 1) = D(X001 * F(0.415735f) + X003 * F(0.791065f) + X005 * F(-0.352443f) + X007 * F(0.277785f)); - P.at(0, 2) = X004; - P.at(0, 3) = D(X001 * F(0.022887f) + X003 * F(-0.097545f) + X005 * F(0.490393f) + X007 * F(0.865723f)); - P.at(1, 0) = X010; - P.at(1, 1) = D(X011 * F(0.415735f) + X013 * F(0.791065f) + X015 * F(-0.352443f) + X017 * F(0.277785f)); - P.at(1, 2) = X014; - P.at(1, 3) = D(X011 * F(0.022887f) + X013 * F(-0.097545f) + X015 * F(0.490393f) + X017 * F(0.865723f)); - P.at(2, 0) = X020; - P.at(2, 1) = D(X021 * F(0.415735f) + X023 * F(0.791065f) + X025 * F(-0.352443f) + X027 * F(0.277785f)); - P.at(2, 2) = X024; - P.at(2, 3) = D(X021 * F(0.022887f) + X023 * F(-0.097545f) + X025 * F(0.490393f) + X027 * F(0.865723f)); - P.at(3, 0) = X030; - P.at(3, 1) = D(X031 * F(0.415735f) + X033 * F(0.791065f) + X035 * F(-0.352443f) + X037 * F(0.277785f)); - P.at(3, 2) = X034; - P.at(3, 3) = D(X031 * F(0.022887f) + X033 * F(-0.097545f) + X035 * F(0.490393f) + X037 * F(0.865723f)); - // 40 muls 24 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - Q.at(0, 0) = D(X001 * F(0.906127f) + X003 * F(-0.318190f) + X005 * F(0.212608f) + X007 * F(-0.180240f)); - Q.at(0, 1) = X002; - Q.at(0, 2) = D(X001 * F(-0.074658f) + X003 * F(0.513280f) + X005 * F(0.768178f) + X007 * F(-0.375330f)); - Q.at(0, 3) = X006; - Q.at(1, 0) = D(X011 * F(0.906127f) + X013 * F(-0.318190f) + X015 * F(0.212608f) + X017 * F(-0.180240f)); - Q.at(1, 1) = X012; - Q.at(1, 2) = D(X011 * F(-0.074658f) + X013 * F(0.513280f) + X015 * F(0.768178f) + X017 * F(-0.375330f)); - Q.at(1, 3) = X016; - Q.at(2, 0) = D(X021 * F(0.906127f) + X023 * F(-0.318190f) + X025 * F(0.212608f) + X027 * F(-0.180240f)); - Q.at(2, 1) = X022; - Q.at(2, 2) = D(X021 * F(-0.074658f) + X023 * F(0.513280f) + X025 * F(0.768178f) + X027 * F(-0.375330f)); - Q.at(2, 3) = X026; - Q.at(3, 0) = D(X031 * F(0.906127f) + X033 * F(-0.318190f) + X035 * F(0.212608f) + X037 * F(-0.180240f)); - Q.at(3, 1) = X032; - Q.at(3, 2) = D(X031 * F(-0.074658f) + X033 * F(0.513280f) + X035 * F(0.768178f) + X037 * F(-0.375330f)); - Q.at(3, 3) = X036; - // 40 muls 24 adds - } - }; - - template - struct R_S - { - static void calc(Matrix44& R, Matrix44& S, const jpgd_block_t* pSrc) - { - // 4x8 = 4x8 times 8x8, matrix 0 is constant - const Temp_Type X100 = D(F(0.906127f) * AT(1, 0) + F(-0.318190f) * AT(3, 0) + F(0.212608f) * AT(5, 0) + F(-0.180240f) * AT(7, 0)); - const Temp_Type X101 = D(F(0.906127f) * AT(1, 1) + F(-0.318190f) * AT(3, 1) + F(0.212608f) * AT(5, 1) + F(-0.180240f) * AT(7, 1)); - const Temp_Type X102 = D(F(0.906127f) * AT(1, 2) + F(-0.318190f) * AT(3, 2) + F(0.212608f) * AT(5, 2) + F(-0.180240f) * AT(7, 2)); - const Temp_Type X103 = D(F(0.906127f) * AT(1, 3) + F(-0.318190f) * AT(3, 3) + F(0.212608f) * AT(5, 3) + F(-0.180240f) * AT(7, 3)); - const Temp_Type X104 = D(F(0.906127f) * AT(1, 4) + F(-0.318190f) * AT(3, 4) + F(0.212608f) * AT(5, 4) + F(-0.180240f) * AT(7, 4)); - const Temp_Type X105 = D(F(0.906127f) * AT(1, 5) + F(-0.318190f) * AT(3, 5) + F(0.212608f) * AT(5, 5) + F(-0.180240f) * AT(7, 5)); - const Temp_Type X106 = D(F(0.906127f) * AT(1, 6) + F(-0.318190f) * AT(3, 6) + F(0.212608f) * AT(5, 6) + F(-0.180240f) * AT(7, 6)); - const Temp_Type X107 = D(F(0.906127f) * AT(1, 7) + F(-0.318190f) * AT(3, 7) + F(0.212608f) * AT(5, 7) + F(-0.180240f) * AT(7, 7)); - const Temp_Type X110 = AT(2, 0); - const Temp_Type X111 = AT(2, 1); - const Temp_Type X112 = AT(2, 2); - const Temp_Type X113 = AT(2, 3); - const Temp_Type X114 = AT(2, 4); - const Temp_Type X115 = AT(2, 5); - const Temp_Type X116 = AT(2, 6); - const Temp_Type X117 = AT(2, 7); - const Temp_Type X120 = D(F(-0.074658f) * AT(1, 0) + F(0.513280f) * AT(3, 0) + F(0.768178f) * AT(5, 0) + F(-0.375330f) * AT(7, 0)); - const Temp_Type X121 = D(F(-0.074658f) * AT(1, 1) + F(0.513280f) * AT(3, 1) + F(0.768178f) * AT(5, 1) + F(-0.375330f) * AT(7, 1)); - const Temp_Type X122 = D(F(-0.074658f) * AT(1, 2) + F(0.513280f) * AT(3, 2) + F(0.768178f) * AT(5, 2) + F(-0.375330f) * AT(7, 2)); - const Temp_Type X123 = D(F(-0.074658f) * AT(1, 3) + F(0.513280f) * AT(3, 3) + F(0.768178f) * AT(5, 3) + F(-0.375330f) * AT(7, 3)); - const Temp_Type X124 = D(F(-0.074658f) * AT(1, 4) + F(0.513280f) * AT(3, 4) + F(0.768178f) * AT(5, 4) + F(-0.375330f) * AT(7, 4)); - const Temp_Type X125 = D(F(-0.074658f) * AT(1, 5) + F(0.513280f) * AT(3, 5) + F(0.768178f) * AT(5, 5) + F(-0.375330f) * AT(7, 5)); - const Temp_Type X126 = D(F(-0.074658f) * AT(1, 6) + F(0.513280f) * AT(3, 6) + F(0.768178f) * AT(5, 6) + F(-0.375330f) * AT(7, 6)); - const Temp_Type X127 = D(F(-0.074658f) * AT(1, 7) + F(0.513280f) * AT(3, 7) + F(0.768178f) * AT(5, 7) + F(-0.375330f) * AT(7, 7)); - const Temp_Type X130 = AT(6, 0); - const Temp_Type X131 = AT(6, 1); - const Temp_Type X132 = AT(6, 2); - const Temp_Type X133 = AT(6, 3); - const Temp_Type X134 = AT(6, 4); - const Temp_Type X135 = AT(6, 5); - const Temp_Type X136 = AT(6, 6); - const Temp_Type X137 = AT(6, 7); - // 80 muls 48 adds - - // 4x4 = 4x8 times 8x4, matrix 1 is constant - R.at(0, 0) = X100; - R.at(0, 1) = D(X101 * F(0.415735f) + X103 * F(0.791065f) + X105 * F(-0.352443f) + X107 * F(0.277785f)); - R.at(0, 2) = X104; - R.at(0, 3) = D(X101 * F(0.022887f) + X103 * F(-0.097545f) + X105 * F(0.490393f) + X107 * F(0.865723f)); - R.at(1, 0) = X110; - R.at(1, 1) = D(X111 * F(0.415735f) + X113 * F(0.791065f) + X115 * F(-0.352443f) + X117 * F(0.277785f)); - R.at(1, 2) = X114; - R.at(1, 3) = D(X111 * F(0.022887f) + X113 * F(-0.097545f) + X115 * F(0.490393f) + X117 * F(0.865723f)); - R.at(2, 0) = X120; - R.at(2, 1) = D(X121 * F(0.415735f) + X123 * F(0.791065f) + X125 * F(-0.352443f) + X127 * F(0.277785f)); - R.at(2, 2) = X124; - R.at(2, 3) = D(X121 * F(0.022887f) + X123 * F(-0.097545f) + X125 * F(0.490393f) + X127 * F(0.865723f)); - R.at(3, 0) = X130; - R.at(3, 1) = D(X131 * F(0.415735f) + X133 * F(0.791065f) + X135 * F(-0.352443f) + X137 * F(0.277785f)); - R.at(3, 2) = X134; - R.at(3, 3) = D(X131 * F(0.022887f) + X133 * F(-0.097545f) + X135 * F(0.490393f) + X137 * F(0.865723f)); - // 40 muls 24 adds - // 4x4 = 4x8 times 8x4, matrix 1 is constant - S.at(0, 0) = D(X101 * F(0.906127f) + X103 * F(-0.318190f) + X105 * F(0.212608f) + X107 * F(-0.180240f)); - S.at(0, 1) = X102; - S.at(0, 2) = D(X101 * F(-0.074658f) + X103 * F(0.513280f) + X105 * F(0.768178f) + X107 * F(-0.375330f)); - S.at(0, 3) = X106; - S.at(1, 0) = D(X111 * F(0.906127f) + X113 * F(-0.318190f) + X115 * F(0.212608f) + X117 * F(-0.180240f)); - S.at(1, 1) = X112; - S.at(1, 2) = D(X111 * F(-0.074658f) + X113 * F(0.513280f) + X115 * F(0.768178f) + X117 * F(-0.375330f)); - S.at(1, 3) = X116; - S.at(2, 0) = D(X121 * F(0.906127f) + X123 * F(-0.318190f) + X125 * F(0.212608f) + X127 * F(-0.180240f)); - S.at(2, 1) = X122; - S.at(2, 2) = D(X121 * F(-0.074658f) + X123 * F(0.513280f) + X125 * F(0.768178f) + X127 * F(-0.375330f)); - S.at(2, 3) = X126; - S.at(3, 0) = D(X131 * F(0.906127f) + X133 * F(-0.318190f) + X135 * F(0.212608f) + X137 * F(-0.180240f)); - S.at(3, 1) = X132; - S.at(3, 2) = D(X131 * F(-0.074658f) + X133 * F(0.513280f) + X135 * F(0.768178f) + X137 * F(-0.375330f)); - S.at(3, 3) = X136; - // 40 muls 24 adds - } - }; - } // end namespace DCT_Upsample - - // Unconditionally frees all allocated m_blocks. - void jpeg_decoder::free_all_blocks() - { - m_pStream = NULL; - for (mem_block *b = m_pMem_blocks; b; ) - { - mem_block *n = b->m_pNext; - jpgd_free(b); - b = n; - } - m_pMem_blocks = NULL; - } - - // This method handles all errors. - // It could easily be changed to use C++ exceptions. - void jpeg_decoder::stop_decoding(jpgd_status status) - { - m_error_code = status; - free_all_blocks(); - longjmp(m_jmp_state, status); - - // we shouldn't get here as longjmp shouldn't return, but we put it here to make it explicit - // that this function doesn't return, otherwise we get this error: - // - // error : function declared 'noreturn' should not return - exit(1); - } - - void *jpeg_decoder::alloc(size_t nSize, bool zero) - { - nSize = (JPGD_MAX(nSize, 1) + 3) & ~3; - char *rv = NULL; - for (mem_block *b = m_pMem_blocks; b; b = b->m_pNext) - { - if ((b->m_used_count + nSize) <= b->m_size) - { - rv = b->m_data + b->m_used_count; - b->m_used_count += nSize; - break; - } - } - if (!rv) - { - int capacity = JPGD_MAX(32768 - 256, (nSize + 2047) & ~2047); - mem_block *b = (mem_block*)jpgd_malloc(sizeof(mem_block) + capacity); - if (!b) stop_decoding(JPGD_NOTENOUGHMEM); - b->m_pNext = m_pMem_blocks; m_pMem_blocks = b; - b->m_used_count = nSize; - b->m_size = capacity; - rv = b->m_data; - } - if (zero) memset(rv, 0, nSize); - return rv; - } - - void jpeg_decoder::word_clear(void *p, uint16 c, uint n) - { - uint8 *pD = (uint8*)p; - const uint8 l = c & 0xFF, h = (c >> 8) & 0xFF; - while (n) - { - pD[0] = l; pD[1] = h; pD += 2; - n--; - } - } - - // Refill the input buffer. - // This method will sit in a loop until (A) the buffer is full or (B) - // the stream's read() method reports and end of file condition. - void jpeg_decoder::prep_in_buffer() - { - m_in_buf_left = 0; - m_pIn_buf_ofs = m_in_buf; - - if (m_eof_flag) - return; - - do - { - int bytes_read = m_pStream->read(m_in_buf + m_in_buf_left, JPGD_IN_BUF_SIZE - m_in_buf_left, &m_eof_flag); - if (bytes_read == -1) - stop_decoding(JPGD_STREAM_READ); - - m_in_buf_left += bytes_read; - } while ((m_in_buf_left < JPGD_IN_BUF_SIZE) && (!m_eof_flag)); - - m_total_bytes_read += m_in_buf_left; - - // Pad the end of the block with M_EOI (prevents the decompressor from going off the rails if the stream is invalid). - // (This dates way back to when this decompressor was written in C/asm, and the all-asm Huffman decoder did some fancy things to increase perf.) - word_clear(m_pIn_buf_ofs + m_in_buf_left, 0xD9FF, 64); - } - - // Read a Huffman code table. - void jpeg_decoder::read_dht_marker() - { - int i, index, count; - uint8 huff_num[17]; - uint8 huff_val[256]; - - uint num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= 2; - - while (num_left) - { - index = get_bits(8); - - huff_num[0] = 0; - - count = 0; - - for (i = 1; i <= 16; i++) - { - huff_num[i] = static_cast(get_bits(8)); - count += huff_num[i]; - } - - if (count > 255) - stop_decoding(JPGD_BAD_DHT_COUNTS); - - for (i = 0; i < count; i++) - huff_val[i] = static_cast(get_bits(8)); - - i = 1 + 16 + count; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DHT_MARKER); - - num_left -= i; - - if ((index & 0x10) > 0x10) - stop_decoding(JPGD_BAD_DHT_INDEX); - - index = (index & 0x0F) + ((index & 0x10) >> 4) * (JPGD_MAX_HUFF_TABLES >> 1); - - if (index >= JPGD_MAX_HUFF_TABLES) - stop_decoding(JPGD_BAD_DHT_INDEX); - - if (!m_huff_num[index]) - m_huff_num[index] = (uint8 *)alloc(17); - - if (!m_huff_val[index]) - m_huff_val[index] = (uint8 *)alloc(256); - - m_huff_ac[index] = (index & 0x10) != 0; - memcpy(m_huff_num[index], huff_num, 17); - memcpy(m_huff_val[index], huff_val, 256); - } - } - - // Read a quantization table. - void jpeg_decoder::read_dqt_marker() - { - int n, i, prec; - uint num_left; - uint temp; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_DQT_MARKER); - - num_left -= 2; - - while (num_left) - { - n = get_bits(8); - prec = n >> 4; - n &= 0x0F; - - if (n >= JPGD_MAX_QUANT_TABLES) - stop_decoding(JPGD_BAD_DQT_TABLE); - - if (!m_quant[n]) - m_quant[n] = (jpgd_quant_t *)alloc(64 * sizeof(jpgd_quant_t)); - - // read quantization entries, in zag order - for (i = 0; i < 64; i++) - { - temp = get_bits(8); - - if (prec) - temp = (temp << 8) + get_bits(8); - - m_quant[n][i] = static_cast(temp); - } - - i = 64 + 1; - - if (prec) - i += 64; - - if (num_left < (uint)i) - stop_decoding(JPGD_BAD_DQT_LENGTH); - - num_left -= i; - } - } - - // Read the start of frame (SOF) marker. - void jpeg_decoder::read_sof_marker() - { - int i; - uint num_left; - - num_left = get_bits(16); - - if (get_bits(8) != 8) /* precision: sorry, only 8-bit precision is supported right now */ - stop_decoding(JPGD_BAD_PRECISION); - - m_image_y_size = get_bits(16); - - if ((m_image_y_size < 1) || (m_image_y_size > JPGD_MAX_HEIGHT)) - stop_decoding(JPGD_BAD_HEIGHT); - - m_image_x_size = get_bits(16); - - if ((m_image_x_size < 1) || (m_image_x_size > JPGD_MAX_WIDTH)) - stop_decoding(JPGD_BAD_WIDTH); - - m_comps_in_frame = get_bits(8); - - if (m_comps_in_frame > JPGD_MAX_COMPONENTS) - stop_decoding(JPGD_TOO_MANY_COMPONENTS); - - if (num_left != (uint)(m_comps_in_frame * 3 + 8)) - stop_decoding(JPGD_BAD_SOF_LENGTH); - - for (i = 0; i < m_comps_in_frame; i++) - { - m_comp_ident[i] = get_bits(8); - m_comp_h_samp[i] = get_bits(4); - m_comp_v_samp[i] = get_bits(4); - m_comp_quant[i] = get_bits(8); - } - } - - // Used to skip unrecognized markers. - void jpeg_decoder::skip_variable_marker() - { - uint num_left; - - num_left = get_bits(16); - - if (num_left < 2) - stop_decoding(JPGD_BAD_VARIABLE_MARKER); - - num_left -= 2; - - while (num_left) - { - get_bits(8); - num_left--; - } - } - - // Read a define restart interval (DRI) marker. - void jpeg_decoder::read_dri_marker() - { - if (get_bits(16) != 4) - stop_decoding(JPGD_BAD_DRI_LENGTH); - - m_restart_interval = get_bits(16); - } - - // Read a start of scan (SOS) marker. - void jpeg_decoder::read_sos_marker() - { - uint num_left; - int i, ci, n, c, cc; - - num_left = get_bits(16); - - n = get_bits(8); - - m_comps_in_scan = n; - - num_left -= 3; - - if ( (num_left != (uint)(n * 2 + 3)) || (n < 1) || (n > JPGD_MAX_COMPS_IN_SCAN) ) - stop_decoding(JPGD_BAD_SOS_LENGTH); - - for (i = 0; i < n; i++) - { - cc = get_bits(8); - c = get_bits(8); - num_left -= 2; - - for (ci = 0; ci < m_comps_in_frame; ci++) - if (cc == m_comp_ident[ci]) - break; - - if (ci >= m_comps_in_frame) - stop_decoding(JPGD_BAD_SOS_COMP_ID); - - m_comp_list[i] = ci; - m_comp_dc_tab[ci] = (c >> 4) & 15; - m_comp_ac_tab[ci] = (c & 15) + (JPGD_MAX_HUFF_TABLES >> 1); - } - - m_spectral_start = get_bits(8); - m_spectral_end = get_bits(8); - m_successive_high = get_bits(4); - m_successive_low = get_bits(4); - - if (!m_progressive_flag) - { - m_spectral_start = 0; - m_spectral_end = 63; - } - - num_left -= 3; - - while (num_left) /* read past whatever is num_left */ - { - get_bits(8); - num_left--; - } - } - - // Finds the next marker. - int jpeg_decoder::next_marker() - { - uint c, bytes; - - bytes = 0; - - do - { - do - { - bytes++; - c = get_bits(8); - } while (c != 0xFF); - - do - { - c = get_bits(8); - } while (c == 0xFF); - - } while (c == 0); - - // If bytes > 0 here, there where extra bytes before the marker (not good). - - return c; - } - - // Process markers. Returns when an SOFx, SOI, EOI, or SOS marker is - // encountered. - int jpeg_decoder::process_markers() - { - int c; - - for ( ; ; ) - { - c = next_marker(); - - switch (c) - { - case M_SOF0: - case M_SOF1: - case M_SOF2: - case M_SOF3: - case M_SOF5: - case M_SOF6: - case M_SOF7: - // case M_JPG: - case M_SOF9: - case M_SOF10: - case M_SOF11: - case M_SOF13: - case M_SOF14: - case M_SOF15: - case M_SOI: - case M_EOI: - case M_SOS: - { - return c; - } - case M_DHT: - { - read_dht_marker(); - break; - } - // No arithmitic support - dumb patents! - case M_DAC: - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - case M_DQT: - { - read_dqt_marker(); - break; - } - case M_DRI: - { - read_dri_marker(); - break; - } - //case M_APP0: /* no need to read the JFIF marker */ - - case M_JPG: - case M_RST0: /* no parameters */ - case M_RST1: - case M_RST2: - case M_RST3: - case M_RST4: - case M_RST5: - case M_RST6: - case M_RST7: - case M_TEM: - { - stop_decoding(JPGD_UNEXPECTED_MARKER); - break; - } - default: /* must be DNL, DHP, EXP, APPn, JPGn, COM, or RESn or APP0 */ - { - skip_variable_marker(); - break; - } - } - } - } - - // Finds the start of image (SOI) marker. - // This code is rather defensive: it only checks the first 512 bytes to avoid - // false positives. - void jpeg_decoder::locate_soi_marker() - { - uint lastchar, thischar; - uint bytesleft; - - lastchar = get_bits(8); - - thischar = get_bits(8); - - /* ok if it's a normal JPEG file without a special header */ - - if ((lastchar == 0xFF) && (thischar == M_SOI)) - return; - - bytesleft = 4096; //512; - - for ( ; ; ) - { - if (--bytesleft == 0) - stop_decoding(JPGD_NOT_JPEG); - - lastchar = thischar; - - thischar = get_bits(8); - - if (lastchar == 0xFF) - { - if (thischar == M_SOI) - break; - else if (thischar == M_EOI) // get_bits will keep returning M_EOI if we read past the end - stop_decoding(JPGD_NOT_JPEG); - } - } - - // Check the next character after marker: if it's not 0xFF, it can't be the start of the next marker, so the file is bad. - thischar = (m_bit_buf >> 24) & 0xFF; - - if (thischar != 0xFF) - stop_decoding(JPGD_NOT_JPEG); - } - - // Find a start of frame (SOF) marker. - void jpeg_decoder::locate_sof_marker() - { - locate_soi_marker(); - - int c = process_markers(); - - switch (c) - { - case M_SOF2: - m_progressive_flag = JPGD_TRUE; - case M_SOF0: /* baseline DCT */ - case M_SOF1: /* extended sequential DCT */ - { - read_sof_marker(); - break; - } - case M_SOF9: /* Arithmitic coding */ - { - stop_decoding(JPGD_NO_ARITHMITIC_SUPPORT); - break; - } - default: - { - stop_decoding(JPGD_UNSUPPORTED_MARKER); - break; - } - } - } - - // Find a start of scan (SOS) marker. - int jpeg_decoder::locate_sos_marker() - { - int c; - - c = process_markers(); - - if (c == M_EOI) - return JPGD_FALSE; - else if (c != M_SOS) - stop_decoding(JPGD_UNEXPECTED_MARKER); - - read_sos_marker(); - - return JPGD_TRUE; - } - - // Reset everything to default/uninitialized state. - void jpeg_decoder::init(jpeg_decoder_stream *pStream) - { - m_pMem_blocks = NULL; - m_error_code = JPGD_SUCCESS; - m_ready_flag = false; - m_image_x_size = m_image_y_size = 0; - m_pStream = pStream; - m_progressive_flag = JPGD_FALSE; - - memset(m_huff_ac, 0, sizeof(m_huff_ac)); - memset(m_huff_num, 0, sizeof(m_huff_num)); - memset(m_huff_val, 0, sizeof(m_huff_val)); - memset(m_quant, 0, sizeof(m_quant)); - - m_scan_type = 0; - m_comps_in_frame = 0; - - memset(m_comp_h_samp, 0, sizeof(m_comp_h_samp)); - memset(m_comp_v_samp, 0, sizeof(m_comp_v_samp)); - memset(m_comp_quant, 0, sizeof(m_comp_quant)); - memset(m_comp_ident, 0, sizeof(m_comp_ident)); - memset(m_comp_h_blocks, 0, sizeof(m_comp_h_blocks)); - memset(m_comp_v_blocks, 0, sizeof(m_comp_v_blocks)); - - m_comps_in_scan = 0; - memset(m_comp_list, 0, sizeof(m_comp_list)); - memset(m_comp_dc_tab, 0, sizeof(m_comp_dc_tab)); - memset(m_comp_ac_tab, 0, sizeof(m_comp_ac_tab)); - - m_spectral_start = 0; - m_spectral_end = 0; - m_successive_low = 0; - m_successive_high = 0; - m_max_mcu_x_size = 0; - m_max_mcu_y_size = 0; - m_blocks_per_mcu = 0; - m_max_blocks_per_row = 0; - m_mcus_per_row = 0; - m_mcus_per_col = 0; - m_expanded_blocks_per_component = 0; - m_expanded_blocks_per_mcu = 0; - m_expanded_blocks_per_row = 0; - m_freq_domain_chroma_upsample = false; - - memset(m_mcu_org, 0, sizeof(m_mcu_org)); - - m_total_lines_left = 0; - m_mcu_lines_left = 0; - m_real_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_scan_line = 0; - m_dest_bytes_per_pixel = 0; - - memset(m_pHuff_tabs, 0, sizeof(m_pHuff_tabs)); - - memset(m_dc_coeffs, 0, sizeof(m_dc_coeffs)); - memset(m_ac_coeffs, 0, sizeof(m_ac_coeffs)); - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_eob_run = 0; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - m_pIn_buf_ofs = m_in_buf; - m_in_buf_left = 0; - m_eof_flag = false; - m_tem_flag = 0; - - memset(m_in_buf_pad_start, 0, sizeof(m_in_buf_pad_start)); - memset(m_in_buf, 0, sizeof(m_in_buf)); - memset(m_in_buf_pad_end, 0, sizeof(m_in_buf_pad_end)); - - m_restart_interval = 0; - m_restarts_left = 0; - m_next_restart_num = 0; - - m_max_mcus_per_row = 0; - m_max_blocks_per_mcu = 0; - m_max_mcus_per_col = 0; - - memset(m_last_dc_val, 0, sizeof(m_last_dc_val)); - m_pMCU_coefficients = NULL; - m_pSample_buf = NULL; - - m_total_bytes_read = 0; - - m_pScan_line_0 = NULL; - m_pScan_line_1 = NULL; - - // Ready the input buffer. - prep_in_buffer(); - - // Prime the bit buffer. - m_bits_left = 16; - m_bit_buf = 0; - - get_bits(16); - get_bits(16); - - for (int i = 0; i < JPGD_MAX_BLOCKS_PER_MCU; i++) - m_mcu_block_max_zag[i] = 64; - } - -#define SCALEBITS 16 -#define ONE_HALF ((int) 1 << (SCALEBITS-1)) -#define FIX(x) ((int) ((x) * (1L<> SCALEBITS; - m_cbb[i] = ( FIX(1.77200f) * k + ONE_HALF) >> SCALEBITS; - m_crg[i] = (-FIX(0.71414f)) * k; - m_cbg[i] = (-FIX(0.34414f)) * k + ONE_HALF; - } - } - - // This method throws back into the stream any bytes that where read - // into the bit buffer during initial marker scanning. - void jpeg_decoder::fix_in_buffer() - { - // In case any 0xFF's where pulled into the buffer during marker scanning. - JPGD_ASSERT((m_bits_left & 7) == 0); - - if (m_bits_left == 16) - stuff_char( (uint8)(m_bit_buf & 0xFF)); - - if (m_bits_left >= 8) - stuff_char( (uint8)((m_bit_buf >> 8) & 0xFF)); - - stuff_char((uint8)((m_bit_buf >> 16) & 0xFF)); - stuff_char((uint8)((m_bit_buf >> 24) & 0xFF)); - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - void jpeg_decoder::transform_mcu(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_blocks_per_mcu * 64; - - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - } - - static const uint8 s_max_rc[64] = - { - 17, 18, 34, 50, 50, 51, 52, 52, 52, 68, 84, 84, 84, 84, 85, 86, 86, 86, 86, 86, - 102, 118, 118, 118, 118, 118, 118, 119, 120, 120, 120, 120, 120, 120, 120, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, - 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136 - }; - - void jpeg_decoder::transform_mcu_expand(int mcu_row) - { - jpgd_block_t* pSrc_ptr = m_pMCU_coefficients; - uint8* pDst_ptr = m_pSample_buf + mcu_row * m_expanded_blocks_per_mcu * 64; - - // Y IDCT - int mcu_block; - for (mcu_block = 0; mcu_block < m_expanded_blocks_per_component; mcu_block++) - { - idct(pSrc_ptr, pDst_ptr, m_mcu_block_max_zag[mcu_block]); - pSrc_ptr += 64; - pDst_ptr += 64; - } - - // Chroma IDCT, with upsampling - jpgd_block_t temp_block[64]; - - for (int i = 0; i < 2; i++) - { - DCT_Upsample::Matrix44 P, Q, R, S; - - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] >= 1); - JPGD_ASSERT(m_mcu_block_max_zag[mcu_block] <= 64); - - switch (s_max_rc[m_mcu_block_max_zag[mcu_block++] - 1]) - { - case 1*16+1: - DCT_Upsample::P_Q<1, 1>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 1>::calc(R, S, pSrc_ptr); - break; - case 1*16+2: - DCT_Upsample::P_Q<1, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<1, 2>::calc(R, S, pSrc_ptr); - break; - case 2*16+2: - DCT_Upsample::P_Q<2, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<2, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+2: - DCT_Upsample::P_Q<3, 2>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 2>::calc(R, S, pSrc_ptr); - break; - case 3*16+3: - DCT_Upsample::P_Q<3, 3>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 3>::calc(R, S, pSrc_ptr); - break; - case 3*16+4: - DCT_Upsample::P_Q<3, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<3, 4>::calc(R, S, pSrc_ptr); - break; - case 4*16+4: - DCT_Upsample::P_Q<4, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<4, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+4: - DCT_Upsample::P_Q<5, 4>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 4>::calc(R, S, pSrc_ptr); - break; - case 5*16+5: - DCT_Upsample::P_Q<5, 5>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 5>::calc(R, S, pSrc_ptr); - break; - case 5*16+6: - DCT_Upsample::P_Q<5, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<5, 6>::calc(R, S, pSrc_ptr); - break; - case 6*16+6: - DCT_Upsample::P_Q<6, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<6, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+6: - DCT_Upsample::P_Q<7, 6>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 6>::calc(R, S, pSrc_ptr); - break; - case 7*16+7: - DCT_Upsample::P_Q<7, 7>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 7>::calc(R, S, pSrc_ptr); - break; - case 7*16+8: - DCT_Upsample::P_Q<7, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<7, 8>::calc(R, S, pSrc_ptr); - break; - case 8*16+8: - DCT_Upsample::P_Q<8, 8>::calc(P, Q, pSrc_ptr); - DCT_Upsample::R_S<8, 8>::calc(R, S, pSrc_ptr); - break; - default: - JPGD_ASSERT(false); - } - - DCT_Upsample::Matrix44 a(P + Q); P -= Q; - DCT_Upsample::Matrix44& b = P; - DCT_Upsample::Matrix44 c(R + S); R -= S; - DCT_Upsample::Matrix44& d = R; - - DCT_Upsample::Matrix44::add_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, a, c); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::add_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - DCT_Upsample::Matrix44::sub_and_store(temp_block, b, d); - idct_4x4(temp_block, pDst_ptr); - pDst_ptr += 64; - - pSrc_ptr += 64; - } - } - - // Loads and dequantizes the next row of (already decoded) coefficients. - // Progressive images only. - void jpeg_decoder::load_next_row() - { - int i; - jpgd_block_t *p; - jpgd_quant_t *q; - int mcu_row, mcu_block, row_block = 0; - int component_num, component_id; - int block_x_mcu[JPGD_MAX_COMPONENTS]; - - memset(block_x_mcu, 0, JPGD_MAX_COMPONENTS * sizeof(int)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - q = m_quant[m_comp_quant[component_id]]; - - p = m_pMCU_coefficients + 64 * mcu_block; - - jpgd_block_t* pAC = coeff_buf_getp(m_ac_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - jpgd_block_t* pDC = coeff_buf_getp(m_dc_coeffs[component_id], block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - p[0] = pDC[0]; - memcpy(&p[1], &pAC[1], 63 * sizeof(jpgd_block_t)); - - for (i = 63; i > 0; i--) - if (p[g_ZAG[i]]) - break; - - m_mcu_block_max_zag[mcu_block] = i + 1; - - for ( ; i >= 0; i--) - if (p[g_ZAG[i]]) - p[g_ZAG[i]] = static_cast(p[g_ZAG[i]] * q[i]); - - row_block++; - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - - // Restart interval processing. - void jpeg_decoder::process_restart() - { - int i; - int c = 0; - - // Align to a byte boundry - // FIXME: Is this really necessary? get_bits_no_markers() never reads in markers! - //get_bits_no_markers(m_bits_left & 7); - - // Let's scan a little bit to find the marker, but not _too_ far. - // 1536 is a "fudge factor" that determines how much to scan. - for (i = 1536; i > 0; i--) - if (get_char() == 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - for ( ; i > 0; i--) - if ((c = get_char()) != 0xFF) - break; - - if (i == 0) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Is it the expected marker? If not, something bad happened. - if (c != (m_next_restart_num + M_RST0)) - stop_decoding(JPGD_BAD_RESTART_MARKER); - - // Reset each component's DC prediction values. - memset(&m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - m_restarts_left = m_restart_interval; - - m_next_restart_num = (m_next_restart_num + 1) & 7; - - // Get the bit buffer going again... - - m_bits_left = 16; - get_bits_no_markers(16); - get_bits_no_markers(16); - } - - static inline int dequantize_ac(int c, int q) { c *= q; return c; } - - // Decodes and dequantizes the next row of coefficients. - void jpeg_decoder::decode_next_row() - { - int row_block = 0; - - for (int mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - jpgd_block_t* p = m_pMCU_coefficients; - for (int mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++, p += 64) - { - int component_id = m_mcu_org[mcu_block]; - jpgd_quant_t* q = m_quant[m_comp_quant[component_id]]; - - int r, s; - s = huff_decode(m_pHuff_tabs[m_comp_dc_tab[component_id]], r); - s = HUFF_EXTEND(r, s); - - m_last_dc_val[component_id] = (s += m_last_dc_val[component_id]); - - p[0] = static_cast(s * q[0]); - - int prev_num_set = m_mcu_block_max_zag[mcu_block]; - - huff_tables *pH = m_pHuff_tabs[m_comp_ac_tab[component_id]]; - - int k; - for (k = 1; k < 64; k++) - { - int extra_bits; - s = huff_decode(pH, extra_bits); - - r = s >> 4; - s &= 15; - - if (s) - { - if (r) - { - if ((k + r) > 63) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(r, prev_num_set - k); - int kt = k; - while (n--) - p[g_ZAG[kt++]] = 0; - } - - k += r; - } - - s = HUFF_EXTEND(extra_bits, s); - - JPGD_ASSERT(k < 64); - - p[g_ZAG[k]] = static_cast(dequantize_ac(s, q[k])); //s * q[k]; - } - else - { - if (r == 15) - { - if ((k + 16) > 64) - stop_decoding(JPGD_DECODE_ERROR); - - if (k < prev_num_set) - { - int n = JPGD_MIN(16, prev_num_set - k); - int kt = k; - while (n--) - { - JPGD_ASSERT(kt <= 63); - p[g_ZAG[kt++]] = 0; - } - } - - k += 16 - 1; // - 1 because the loop counter is k - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64 && p[g_ZAG[k]] == 0); - // END EPIC MOD - } - else - break; - } - } - - if (k < prev_num_set) - { - int kt = k; - while (kt < prev_num_set) - p[g_ZAG[kt++]] = 0; - } - - m_mcu_block_max_zag[mcu_block] = k; - - row_block++; - } - - if (m_freq_domain_chroma_upsample) - transform_mcu_expand(mcu_row); - else - transform_mcu(mcu_row); - - m_restarts_left--; - } - } - - // YCbCr H1V1 (1x1:1:1, 3 m_blocks per MCU) to RGB - void jpeg_decoder::H1V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int y = s[j]; - int cb = s[64+j]; - int cr = s[128+j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - d += 4; - } - - s += 64*3; - } - } - - // YCbCr H2V1 (2x1:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H2V1Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *y = m_pSample_buf + row * 8; - uint8 *c = m_pSample_buf + 2*64 + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 4; j++) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j<<1]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[(j<<1)+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - } - - d0 += 8; - - c++; - } - y += 64; - } - - y += 64*4 - 64*2; - c += 64*4 - 8; - } - } - - // YCbCr H2V1 (1x2:1:1, 4 m_blocks per MCU) to RGB - void jpeg_decoder::H1V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*1 + (row & 7) * 8; - - c = m_pSample_buf + 64*2 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int j = 0; j < 8; j++) - { - int cb = c[0+j]; - int cr = c[64+j]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[8+j]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - } - - d0 += 4; - d1 += 4; - } - - y += 64*4; - c += 64*4; - } - } - - // YCbCr H2V2 (2x2:1:1, 6 m_blocks per MCU) to RGB - void jpeg_decoder::H2V2Convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d0 = m_pScan_line_0; - uint8 *d1 = m_pScan_line_1; - uint8 *y; - uint8 *c; - - if (row < 8) - y = m_pSample_buf + row * 8; - else - y = m_pSample_buf + 64*2 + (row & 7) * 8; - - c = m_pSample_buf + 64*4 + (row >> 1) * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int l = 0; l < 2; l++) - { - for (int j = 0; j < 8; j += 2) - { - int cb = c[0]; - int cr = c[64]; - - int rc = m_crr[cr]; - int gc = ((m_crg[cr] + m_cbg[cb]) >> 16); - int bc = m_cbb[cb]; - - int yy = y[j]; - if (jpg_format == ERGBFormatJPG::BGRA) - { - d0[0] = clamp(yy+bc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+rc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+bc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+rc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+bc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+rc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+bc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+rc); - d1[7] = 255; - } - else - { - d0[0] = clamp(yy+rc); - d0[1] = clamp(yy+gc); - d0[2] = clamp(yy+bc); - d0[3] = 255; - yy = y[j+1]; - d0[4] = clamp(yy+rc); - d0[5] = clamp(yy+gc); - d0[6] = clamp(yy+bc); - d0[7] = 255; - yy = y[j+8]; - d1[0] = clamp(yy+rc); - d1[1] = clamp(yy+gc); - d1[2] = clamp(yy+bc); - d1[3] = 255; - yy = y[j+8+1]; - d1[4] = clamp(yy+rc); - d1[5] = clamp(yy+gc); - d1[6] = clamp(yy+bc); - d1[7] = 255; - } - - d0 += 8; - d1 += 8; - - c++; - } - y += 64; - } - - y += 64*6 - 64*2; - c += 64*6 - 8; - } - } - - // Y (1 block per MCU) to 8-bit grayscale - void jpeg_decoder::gray_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - uint8 *d = m_pScan_line_0; - uint8 *s = m_pSample_buf + row * 8; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - *(uint *)d = *(uint *)s; - *(uint *)(&d[4]) = *(uint *)(&s[4]); - - s += 64; - d += 8; - } - } - - void jpeg_decoder::expanded_convert() - { - int row = m_max_mcu_y_size - m_mcu_lines_left; - - uint8* Py = m_pSample_buf + (row / 8) * 64 * m_comp_h_samp[0] + (row & 7) * 8; - - uint8* d = m_pScan_line_0; - - for (int i = m_max_mcus_per_row; i > 0; i--) - { - for (int k = 0; k < m_max_mcu_x_size; k += 8) - { - const int Y_ofs = k * 8; - const int Cb_ofs = Y_ofs + 64 * m_expanded_blocks_per_component; - const int Cr_ofs = Y_ofs + 64 * m_expanded_blocks_per_component * 2; - for (int j = 0; j < 8; j++) - { - int y = Py[Y_ofs + j]; - int cb = Py[Cb_ofs + j]; - int cr = Py[Cr_ofs + j]; - - if (jpg_format == ERGBFormatJPG::BGRA) - { - d[0] = clamp(y + m_cbb[cb]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_crr[cr]); - d[3] = 255; - } - else - { - d[0] = clamp(y + m_crr[cr]); - d[1] = clamp(y + ((m_crg[cr] + m_cbg[cb]) >> 16)); - d[2] = clamp(y + m_cbb[cb]); - d[3] = 255; - } - - d += 4; - } - } - - Py += 64 * m_expanded_blocks_per_mcu; - } - } - - // Find end of image (EOI) marker, so we can return to the user the exact size of the input stream. - void jpeg_decoder::find_eoi() - { - if (!m_progressive_flag) - { - // Attempt to read the EOI marker. - //get_bits_no_markers(m_bits_left & 7); - - // Prime the bit buffer - m_bits_left = 16; - get_bits(16); - get_bits(16); - - // The next marker _should_ be EOI - process_markers(); - } - - m_total_bytes_read -= m_in_buf_left; - } - - int jpeg_decoder::decode(const void** pScan_line, uint* pScan_line_len) - { - if ((m_error_code) || (!m_ready_flag)) - return JPGD_FAILED; - - if (m_total_lines_left == 0) - return JPGD_DONE; - - if (m_mcu_lines_left == 0) - { - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - if (m_progressive_flag) - load_next_row(); - else - decode_next_row(); - - // Find the EOI marker if that was the last row. - if (m_total_lines_left <= m_max_mcu_y_size) - find_eoi(); - - m_mcu_lines_left = m_max_mcu_y_size; - } - - if (m_freq_domain_chroma_upsample) - { - expanded_convert(); - *pScan_line = m_pScan_line_0; - } - else - { - switch (m_scan_type) - { - case JPGD_YH2V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H2V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH2V1: - { - H2V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_YH1V2: - { - if ((m_mcu_lines_left & 1) == 0) - { - H1V2Convert(); - *pScan_line = m_pScan_line_0; - } - else - *pScan_line = m_pScan_line_1; - - break; - } - case JPGD_YH1V1: - { - H1V1Convert(); - *pScan_line = m_pScan_line_0; - break; - } - case JPGD_GRAYSCALE: - { - gray_convert(); - *pScan_line = m_pScan_line_0; - - break; - } - } - } - - *pScan_line_len = m_real_dest_bytes_per_scan_line; - - m_mcu_lines_left--; - m_total_lines_left--; - - return JPGD_SUCCESS; - } - - // Creates the tables needed for efficient Huffman decoding. - void jpeg_decoder::make_huff_table(int index, huff_tables *pH) - { - int p, i, l, si; - uint8 huffsize[257]; - uint huffcode[257]; - uint code; - uint subtree; - int code_size; - int lastp; - int nextfreeentry; - int currententry; - - pH->ac_table = m_huff_ac[index] != 0; - - p = 0; - - for (l = 1; l <= 16; l++) - { - for (i = 1; i <= m_huff_num[index][l]; i++) - huffsize[p++] = static_cast(l); - } - - huffsize[p] = 0; - - lastp = p; - - code = 0; - si = huffsize[0]; - p = 0; - - while (huffsize[p]) - { - while (huffsize[p] == si) - { - huffcode[p++] = code; - code++; - } - - code <<= 1; - si++; - } - - memset(pH->look_up, 0, sizeof(pH->look_up)); - memset(pH->look_up2, 0, sizeof(pH->look_up2)); - memset(pH->tree, 0, sizeof(pH->tree)); - memset(pH->code_size, 0, sizeof(pH->code_size)); - - nextfreeentry = -1; - - p = 0; - - while (p < lastp) - { - i = m_huff_val[index][p]; - code = huffcode[p]; - code_size = huffsize[p]; - - pH->code_size[i] = static_cast(code_size); - - if (code_size <= 8) - { - code <<= (8 - code_size); - - for (l = 1 << (8 - code_size); l > 0; l--) - { - JPGD_ASSERT(i < 256); - - pH->look_up[code] = i; - - bool has_extrabits = false; - int extra_bits = 0; - int num_extra_bits = i & 15; - - int bits_to_fetch = code_size; - if (num_extra_bits) - { - int total_codesize = code_size + num_extra_bits; - if (total_codesize <= 8) - { - has_extrabits = true; - extra_bits = ((1 << num_extra_bits) - 1) & (code >> (8 - total_codesize)); - JPGD_ASSERT(extra_bits <= 0x7FFF); - bits_to_fetch += num_extra_bits; - } - } - - if (!has_extrabits) - pH->look_up2[code] = i | (bits_to_fetch << 8); - else - pH->look_up2[code] = i | 0x8000 | (extra_bits << 16) | (bits_to_fetch << 8); - - code++; - } - } - else - { - subtree = (code >> (code_size - 8)) & 0xFF; - - currententry = pH->look_up[subtree]; - - if (currententry == 0) - { - pH->look_up[subtree] = currententry = nextfreeentry; - pH->look_up2[subtree] = currententry = nextfreeentry; - - nextfreeentry -= 2; - } - - code <<= (16 - (code_size - 8)); - - for (l = code_size; l > 9; l--) - { - if ((code & 0x8000) == 0) - currententry--; - - if (pH->tree[-currententry - 1] == 0) - { - pH->tree[-currententry - 1] = nextfreeentry; - - currententry = nextfreeentry; - - nextfreeentry -= 2; - } - else - currententry = pH->tree[-currententry - 1]; - - code <<= 1; - } - - if ((code & 0x8000) == 0) - currententry--; - - pH->tree[-currententry - 1] = i; - } - - p++; - } - } - - // Verifies the quantization tables needed for this scan are available. - void jpeg_decoder::check_quant_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - if (m_quant[m_comp_quant[m_comp_list[i]]] == NULL) - stop_decoding(JPGD_UNDEFINED_QUANT_TABLE); - } - - // Verifies that all the Huffman tables needed for this scan are available. - void jpeg_decoder::check_huff_tables() - { - for (int i = 0; i < m_comps_in_scan; i++) - { - if ((m_spectral_start == 0) && (m_huff_num[m_comp_dc_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - - if ((m_spectral_end > 0) && (m_huff_num[m_comp_ac_tab[m_comp_list[i]]] == NULL)) - stop_decoding(JPGD_UNDEFINED_HUFF_TABLE); - } - - for (int i = 0; i < JPGD_MAX_HUFF_TABLES; i++) - if (m_huff_num[i]) - { - if (!m_pHuff_tabs[i]) - m_pHuff_tabs[i] = (huff_tables *)alloc(sizeof(huff_tables)); - - make_huff_table(i, m_pHuff_tabs[i]); - } - } - - // Determines the component order inside each MCU. - // Also calcs how many MCU's are on each row, etc. - void jpeg_decoder::calc_mcu_block_order() - { - int component_num, component_id; - int max_h_samp = 0, max_v_samp = 0; - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - if (m_comp_h_samp[component_id] > max_h_samp) - max_h_samp = m_comp_h_samp[component_id]; - - if (m_comp_v_samp[component_id] > max_v_samp) - max_v_samp = m_comp_v_samp[component_id]; - } - - for (component_id = 0; component_id < m_comps_in_frame; component_id++) - { - m_comp_h_blocks[component_id] = ((((m_image_x_size * m_comp_h_samp[component_id]) + (max_h_samp - 1)) / max_h_samp) + 7) / 8; - m_comp_v_blocks[component_id] = ((((m_image_y_size * m_comp_v_samp[component_id]) + (max_v_samp - 1)) / max_v_samp) + 7) / 8; - } - - if (m_comps_in_scan == 1) - { - m_mcus_per_row = m_comp_h_blocks[m_comp_list[0]]; - m_mcus_per_col = m_comp_v_blocks[m_comp_list[0]]; - } - else - { - m_mcus_per_row = (((m_image_x_size + 7) / 8) + (max_h_samp - 1)) / max_h_samp; - m_mcus_per_col = (((m_image_y_size + 7) / 8) + (max_v_samp - 1)) / max_v_samp; - } - - if (m_comps_in_scan == 1) - { - m_mcu_org[0] = m_comp_list[0]; - - m_blocks_per_mcu = 1; - } - else - { - m_blocks_per_mcu = 0; - - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - int num_blocks; - - component_id = m_comp_list[component_num]; - - num_blocks = m_comp_h_samp[component_id] * m_comp_v_samp[component_id]; - - while (num_blocks--) - m_mcu_org[m_blocks_per_mcu++] = component_id; - } - } - } - - // Starts a new scan. - int jpeg_decoder::init_scan() - { - if (!locate_sos_marker()) - return JPGD_FALSE; - - calc_mcu_block_order(); - - check_huff_tables(); - - check_quant_tables(); - - memset(m_last_dc_val, 0, m_comps_in_frame * sizeof(uint)); - - m_eob_run = 0; - - if (m_restart_interval) - { - m_restarts_left = m_restart_interval; - m_next_restart_num = 0; - } - - fix_in_buffer(); - - return JPGD_TRUE; - } - - // Starts a frame. Determines if the number of components or sampling factors - // are supported. - void jpeg_decoder::init_frame() - { - int i; - - if (m_comps_in_frame == 1) - { - if ((m_comp_h_samp[0] != 1) || (m_comp_v_samp[0] != 1)) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - m_scan_type = JPGD_GRAYSCALE; - m_max_blocks_per_mcu = 1; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if (m_comps_in_frame == 3) - { - if ( ((m_comp_h_samp[1] != 1) || (m_comp_v_samp[1] != 1)) || - ((m_comp_h_samp[2] != 1) || (m_comp_v_samp[2] != 1)) ) - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - - if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH1V1; - - m_max_blocks_per_mcu = 3; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) - { - m_scan_type = JPGD_YH2V1; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 8; - } - else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH1V2; - m_max_blocks_per_mcu = 4; - m_max_mcu_x_size = 8; - m_max_mcu_y_size = 16; - } - else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) - { - m_scan_type = JPGD_YH2V2; - m_max_blocks_per_mcu = 6; - m_max_mcu_x_size = 16; - m_max_mcu_y_size = 16; - } - else - stop_decoding(JPGD_UNSUPPORTED_SAMP_FACTORS); - } - else - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - m_max_mcus_per_row = (m_image_x_size + (m_max_mcu_x_size - 1)) / m_max_mcu_x_size; - m_max_mcus_per_col = (m_image_y_size + (m_max_mcu_y_size - 1)) / m_max_mcu_y_size; - - // These values are for the *destination* pixels: after conversion. - if (m_scan_type == JPGD_GRAYSCALE) - m_dest_bytes_per_pixel = 1; - else - m_dest_bytes_per_pixel = 4; - - m_dest_bytes_per_scan_line = ((m_image_x_size + 15) & 0xFFF0) * m_dest_bytes_per_pixel; - - m_real_dest_bytes_per_scan_line = (m_image_x_size * m_dest_bytes_per_pixel); - - // Initialize two scan line buffers. - m_pScan_line_0 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - if ((m_scan_type == JPGD_YH1V2) || (m_scan_type == JPGD_YH2V2)) - m_pScan_line_1 = (uint8 *)alloc(m_dest_bytes_per_scan_line, true); - - m_max_blocks_per_row = m_max_mcus_per_row * m_max_blocks_per_mcu; - - // Should never happen - if (m_max_blocks_per_row > JPGD_MAX_BLOCKS_PER_ROW) - stop_decoding(JPGD_ASSERTION_ERROR); - - // Allocate the coefficient buffer, enough for one MCU - m_pMCU_coefficients = (jpgd_block_t*)alloc(m_max_blocks_per_mcu * 64 * sizeof(jpgd_block_t)); - - for (i = 0; i < m_max_blocks_per_mcu; i++) - m_mcu_block_max_zag[i] = 64; - - m_expanded_blocks_per_component = m_comp_h_samp[0] * m_comp_v_samp[0]; - m_expanded_blocks_per_mcu = m_expanded_blocks_per_component * m_comps_in_frame; - m_expanded_blocks_per_row = m_max_mcus_per_row * m_expanded_blocks_per_mcu; - // Freq. domain chroma upsampling is only supported for H2V2 subsampling factor. -// BEGIN EPIC MOD -#if JPGD_SUPPORT_FREQ_DOMAIN_UPSAMPLING - m_freq_domain_chroma_upsample = (m_expanded_blocks_per_mcu == 4*3); -#else - m_freq_domain_chroma_upsample = 0; -#endif -// END EPIC MOD - - if (m_freq_domain_chroma_upsample) - m_pSample_buf = (uint8 *)alloc(m_expanded_blocks_per_row * 64); - else - m_pSample_buf = (uint8 *)alloc(m_max_blocks_per_row * 64); - - m_total_lines_left = m_image_y_size; - - m_mcu_lines_left = 0; - - create_look_ups(); - } - - // The coeff_buf series of methods originally stored the coefficients - // into a "virtual" file which was located in EMS, XMS, or a disk file. A cache - // was used to make this process more efficient. Now, we can store the entire - // thing in RAM. - jpeg_decoder::coeff_buf* jpeg_decoder::coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y) - { - coeff_buf* cb = (coeff_buf*)alloc(sizeof(coeff_buf)); - - cb->block_num_x = block_num_x; - cb->block_num_y = block_num_y; - cb->block_len_x = block_len_x; - cb->block_len_y = block_len_y; - cb->block_size = (block_len_x * block_len_y) * sizeof(jpgd_block_t); - cb->pData = (uint8 *)alloc(cb->block_size * block_num_x * block_num_y, true); - return cb; - } - - inline jpgd_block_t *jpeg_decoder::coeff_buf_getp(coeff_buf *cb, int block_x, int block_y) - { - JPGD_ASSERT((block_x < cb->block_num_x) && (block_y < cb->block_num_y)); - return (jpgd_block_t *)(cb->pData + block_x * cb->block_size + block_y * (cb->block_size * cb->block_num_x)); - } - - // The following methods decode the various types of m_blocks encountered - // in progressively encoded images. - void jpeg_decoder::decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, r; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - if ((s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_dc_tab[component_id]])) != 0) - { - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - } - - pD->m_last_dc_val[component_id] = (s += pD->m_last_dc_val[component_id]); - - p[0] = static_cast(s << pD->m_successive_low); - } - - void jpeg_decoder::decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - if (pD->get_bits_no_markers(1)) - { - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_dc_coeffs[component_id], block_x, block_y); - - p[0] |= (1 << pD->m_successive_low); - } - } - - void jpeg_decoder::decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int k, s, r; - - if (pD->m_eob_run) - { - pD->m_eob_run--; - return; - } - - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - for (k = pD->m_spectral_start; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if ((k += r) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - - r = pD->get_bits_no_markers(s); - s = HUFF_EXTEND(r, s); - - p[g_ZAG[k]] = static_cast(s << pD->m_successive_low); - } - else - { - if (r == 15) - { - if ((k += 15) > 63) - pD->stop_decoding(JPGD_DECODE_ERROR); - } - else - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - pD->m_eob_run--; - - break; - } - } - } - } - - void jpeg_decoder::decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y) - { - int s, k, r; - int p1 = 1 << pD->m_successive_low; - int m1 = (-1) << pD->m_successive_low; - jpgd_block_t *p = pD->coeff_buf_getp(pD->m_ac_coeffs[component_id], block_x, block_y); - - k = pD->m_spectral_start; - - if (pD->m_eob_run == 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - s = pD->huff_decode(pD->m_pHuff_tabs[pD->m_comp_ac_tab[component_id]]); - - r = s >> 4; - s &= 15; - - if (s) - { - if (s != 1) - pD->stop_decoding(JPGD_DECODE_ERROR); - - if (pD->get_bits_no_markers(1)) - s = p1; - else - s = m1; - } - else - { - if (r != 15) - { - pD->m_eob_run = 1 << r; - - if (r) - pD->m_eob_run += pD->get_bits_no_markers(r); - - break; - } - } - - do - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - else - { - if (--r < 0) - break; - } - - k++; - - } while (k <= pD->m_spectral_end); - - if ((s) && (k < 64)) - { - p[g_ZAG[k]] = static_cast(s); - } - } - } - - if (pD->m_eob_run > 0) - { - for ( ; k <= pD->m_spectral_end; k++) - { - // BEGIN EPIC MOD - JPGD_ASSERT(k < 64); - // END EPIC MOD - - jpgd_block_t *this_coef = p + g_ZAG[k]; - - if (*this_coef != 0) - { - if (pD->get_bits_no_markers(1)) - { - if ((*this_coef & p1) == 0) - { - if (*this_coef >= 0) - *this_coef = static_cast(*this_coef + p1); - else - *this_coef = static_cast(*this_coef + m1); - } - } - } - } - - pD->m_eob_run--; - } - } - - // Decode a scan in a progressively encoded image. - void jpeg_decoder::decode_scan(pDecode_block_func decode_block_func) - { - int mcu_row, mcu_col, mcu_block; - int block_x_mcu[JPGD_MAX_COMPONENTS], m_block_y_mcu[JPGD_MAX_COMPONENTS]; - - memset(m_block_y_mcu, 0, sizeof(m_block_y_mcu)); - - for (mcu_col = 0; mcu_col < m_mcus_per_col; mcu_col++) - { - int component_num, component_id; - - memset(block_x_mcu, 0, sizeof(block_x_mcu)); - - for (mcu_row = 0; mcu_row < m_mcus_per_row; mcu_row++) - { - int block_x_mcu_ofs = 0, block_y_mcu_ofs = 0; - - if ((m_restart_interval) && (m_restarts_left == 0)) - process_restart(); - - for (mcu_block = 0; mcu_block < m_blocks_per_mcu; mcu_block++) - { - component_id = m_mcu_org[mcu_block]; - - decode_block_func(this, component_id, block_x_mcu[component_id] + block_x_mcu_ofs, m_block_y_mcu[component_id] + block_y_mcu_ofs); - - if (m_comps_in_scan == 1) - block_x_mcu[component_id]++; - else - { - if (++block_x_mcu_ofs == m_comp_h_samp[component_id]) - { - block_x_mcu_ofs = 0; - - if (++block_y_mcu_ofs == m_comp_v_samp[component_id]) - { - block_y_mcu_ofs = 0; - block_x_mcu[component_id] += m_comp_h_samp[component_id]; - } - } - } - } - - m_restarts_left--; - } - - if (m_comps_in_scan == 1) - m_block_y_mcu[m_comp_list[0]]++; - else - { - for (component_num = 0; component_num < m_comps_in_scan; component_num++) - { - component_id = m_comp_list[component_num]; - m_block_y_mcu[component_id] += m_comp_v_samp[component_id]; - } - } - } - } - - // Decode a progressively encoded image. - void jpeg_decoder::init_progressive() - { - int i; - - if (m_comps_in_frame == 4) - stop_decoding(JPGD_UNSUPPORTED_COLORSPACE); - - // Allocate the coefficient buffers. - for (i = 0; i < m_comps_in_frame; i++) - { - m_dc_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 1, 1); - m_ac_coeffs[i] = coeff_buf_open(m_max_mcus_per_row * m_comp_h_samp[i], m_max_mcus_per_col * m_comp_v_samp[i], 8, 8); - } - - for ( ; ; ) - { - int dc_only_scan, refinement_scan; - pDecode_block_func decode_block_func; - - if (!init_scan()) - break; - - dc_only_scan = (m_spectral_start == 0); - refinement_scan = (m_successive_high != 0); - - if ((m_spectral_start > m_spectral_end) || (m_spectral_end > 63)) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if (dc_only_scan) - { - if (m_spectral_end) - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - } - else if (m_comps_in_scan != 1) /* AC scans can only contain one component */ - stop_decoding(JPGD_BAD_SOS_SPECTRAL); - - if ((refinement_scan) && (m_successive_low != m_successive_high - 1)) - stop_decoding(JPGD_BAD_SOS_SUCCESSIVE); - - if (dc_only_scan) - { - if (refinement_scan) - decode_block_func = decode_block_dc_refine; - else - decode_block_func = decode_block_dc_first; - } - else - { - if (refinement_scan) - decode_block_func = decode_block_ac_refine; - else - decode_block_func = decode_block_ac_first; - } - - decode_scan(decode_block_func); - - m_bits_left = 16; - get_bits(16); - get_bits(16); - } - - m_comps_in_scan = m_comps_in_frame; - - for (i = 0; i < m_comps_in_frame; i++) - m_comp_list[i] = i; - - calc_mcu_block_order(); - } - - void jpeg_decoder::init_sequential() - { - if (!init_scan()) - stop_decoding(JPGD_UNEXPECTED_MARKER); - } - - void jpeg_decoder::decode_start() - { - init_frame(); - - if (m_progressive_flag) - init_progressive(); - else - init_sequential(); - } - - void jpeg_decoder::decode_init(jpeg_decoder_stream *pStream) - { - init(pStream); - locate_sof_marker(); - } - - jpeg_decoder::jpeg_decoder(jpeg_decoder_stream *pStream) - { - if (setjmp(m_jmp_state)) - return; - decode_init(pStream); - } - - int jpeg_decoder::begin_decoding() - { - if (m_ready_flag) - return JPGD_SUCCESS; - - if (m_error_code) - return JPGD_FAILED; - - if (setjmp(m_jmp_state)) - return JPGD_FAILED; - - decode_start(); - - m_ready_flag = true; - - return JPGD_SUCCESS; - } - - jpeg_decoder::~jpeg_decoder() - { - free_all_blocks(); - } - - jpeg_decoder_file_stream::jpeg_decoder_file_stream() - { - m_pFile = NULL; - m_eof_flag = false; - m_error_flag = false; - } - - void jpeg_decoder_file_stream::close() - { - if (m_pFile) - { - fclose(m_pFile); - m_pFile = NULL; - } - - m_eof_flag = false; - m_error_flag = false; - } - - jpeg_decoder_file_stream::~jpeg_decoder_file_stream() - { - close(); - } - - bool jpeg_decoder_file_stream::open(const char *Pfilename) - { - close(); - - m_eof_flag = false; - m_error_flag = false; - -#if defined(_MSC_VER) - m_pFile = NULL; - fopen_s(&m_pFile, Pfilename, "rb"); -#else - m_pFile = fopen(Pfilename, "rb"); -#endif - return m_pFile != NULL; - } - - int jpeg_decoder_file_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - if (!m_pFile) - return -1; - - if (m_eof_flag) - { - *pEOF_flag = true; - return 0; - } - - if (m_error_flag) - return -1; - - int bytes_read = static_cast(fread(pBuf, 1, max_bytes_to_read, m_pFile)); - if (bytes_read < max_bytes_to_read) - { - if (ferror(m_pFile)) - { - m_error_flag = true; - return -1; - } - - m_eof_flag = true; - *pEOF_flag = true; - } - - return bytes_read; - } - - bool jpeg_decoder_mem_stream::open(const uint8 *pSrc_data, uint size) - { - close(); - m_pSrc_data = pSrc_data; - m_ofs = 0; - m_size = size; - return true; - } - - int jpeg_decoder_mem_stream::read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) - { - *pEOF_flag = false; - - if (!m_pSrc_data) - return -1; - - uint bytes_remaining = m_size - m_ofs; - if ((uint)max_bytes_to_read > bytes_remaining) - { - max_bytes_to_read = bytes_remaining; - *pEOF_flag = true; - } - - memcpy(pBuf, m_pSrc_data + m_ofs, max_bytes_to_read); - m_ofs += max_bytes_to_read; - - return max_bytes_to_read; - } - - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps) - { - if (!actual_comps) - return NULL; - *actual_comps = 0; - - if ((!pStream) || (!width) || (!height) || (!req_comps)) - return NULL; - - if ((req_comps != 1) && (req_comps != 3) && (req_comps != 4)) - return NULL; - - jpeg_decoder decoder(pStream); - if (decoder.get_error_code() != JPGD_SUCCESS) - return NULL; - - const int image_width = decoder.get_width(), image_height = decoder.get_height(); - *width = image_width; - *height = image_height; - *actual_comps = decoder.get_num_components(); - - if (decoder.begin_decoding() != JPGD_SUCCESS) - return NULL; - - const int dst_bpl = image_width * req_comps; - - uint8 *pImage_data = (uint8*)jpgd_malloc(dst_bpl * image_height); - if (!pImage_data) - return NULL; - - for (int y = 0; y < image_height; y++) - { - const uint8* pScan_line = 0; - uint scan_line_len; - if (decoder.decode((const void**)&pScan_line, &scan_line_len) != JPGD_SUCCESS) - { - jpgd_free(pImage_data); - return NULL; - } - - uint8 *pDst = pImage_data + y * dst_bpl; - - if (((req_comps == 4) && (decoder.get_num_components() == 3)) || - ((req_comps == 1) && (decoder.get_num_components() == 1))) - { - memcpy(pDst, pScan_line, dst_bpl); - } - else if (decoder.get_num_components() == 1) - { - if (req_comps == 3) - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst += 3; - } - } - else - { - for (int x = 0; x < image_width; x++) - { - uint8 luma = pScan_line[x]; - pDst[0] = luma; - pDst[1] = luma; - pDst[2] = luma; - pDst[3] = 255; - pDst += 4; - } - } - } - else if (decoder.get_num_components() == 3) - { - if (req_comps == 1) - { - const int YR = 19595, YG = 38470, YB = 7471; - for (int x = 0; x < image_width; x++) - { - int r = pScan_line[x*4+0]; - int g = pScan_line[x*4+1]; - int b = pScan_line[x*4+2]; - *pDst++ = static_cast((r * YR + g * YG + b * YB + 32768) >> 16); - } - } - else - { - for (int x = 0; x < image_width; x++) - { - pDst[0] = pScan_line[x*4+0]; - pDst[1] = pScan_line[x*4+1]; - pDst[2] = pScan_line[x*4+2]; - pDst += 3; - } - } - } - } - - return pImage_data; - } - -// BEGIN EPIC MOD - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format) - { - jpg_format = (ERGBFormatJPG)format; -// EMD EPIC MOD - jpgd::jpeg_decoder_mem_stream mem_stream(pSrc_data, src_data_size); - return decompress_jpeg_image_from_stream(&mem_stream, width, height, actual_comps, req_comps); - } - - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps) - { - jpgd::jpeg_decoder_file_stream file_stream; - if (!file_stream.open(pSrc_filename)) - return NULL; - return decompress_jpeg_image_from_stream(&file_stream, width, height, actual_comps, req_comps); - } - -} // namespace jpgd diff --git a/spaces/fb700/chatglm-fitness-RLHF/speaker_encoder/train.py b/spaces/fb700/chatglm-fitness-RLHF/speaker_encoder/train.py deleted file mode 100644 index 2e9485afbeead6a063b5ef69a85f05757d6c91ff..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/speaker_encoder/train.py +++ /dev/null @@ -1,125 +0,0 @@ -from speaker_encoder.visualizations import Visualizations -from speaker_encoder.data_objects import SpeakerVerificationDataLoader, SpeakerVerificationDataset -from speaker_encoder.params_model import * -from speaker_encoder.model import SpeakerEncoder -from utils.profiler import Profiler -from pathlib import Path -import torch - -def sync(device: torch.device): - # FIXME - return - # For correct profiling (cuda operations are async) - if device.type == "cuda": - torch.cuda.synchronize(device) - -def train(run_id: str, clean_data_root: Path, models_dir: Path, umap_every: int, save_every: int, - backup_every: int, vis_every: int, force_restart: bool, visdom_server: str, - no_visdom: bool): - # Create a dataset and a dataloader - dataset = SpeakerVerificationDataset(clean_data_root) - loader = SpeakerVerificationDataLoader( - dataset, - speakers_per_batch, # 64 - utterances_per_speaker, # 10 - num_workers=8, - ) - - # Setup the device on which to run the forward pass and the loss. These can be different, - # because the forward pass is faster on the GPU whereas the loss is often (depending on your - # hyperparameters) faster on the CPU. - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - # FIXME: currently, the gradient is None if loss_device is cuda - loss_device = torch.device("cpu") - - # Create the model and the optimizer - model = SpeakerEncoder(device, loss_device) - optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate_init) - init_step = 1 - - # Configure file path for the model - state_fpath = models_dir.joinpath(run_id + ".pt") - backup_dir = models_dir.joinpath(run_id + "_backups") - - # Load any existing model - if not force_restart: - if state_fpath.exists(): - print("Found existing model \"%s\", loading it and resuming training." % run_id) - checkpoint = torch.load(state_fpath) - init_step = checkpoint["step"] - model.load_state_dict(checkpoint["model_state"]) - optimizer.load_state_dict(checkpoint["optimizer_state"]) - optimizer.param_groups[0]["lr"] = learning_rate_init - else: - print("No model \"%s\" found, starting training from scratch." % run_id) - else: - print("Starting the training from scratch.") - model.train() - - # Initialize the visualization environment - vis = Visualizations(run_id, vis_every, server=visdom_server, disabled=no_visdom) - vis.log_dataset(dataset) - vis.log_params() - device_name = str(torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU") - vis.log_implementation({"Device": device_name}) - - # Training loop - profiler = Profiler(summarize_every=10, disabled=False) - for step, speaker_batch in enumerate(loader, init_step): - profiler.tick("Blocking, waiting for batch (threaded)") - - # Forward pass - inputs = torch.from_numpy(speaker_batch.data).to(device) - sync(device) - profiler.tick("Data to %s" % device) - embeds = model(inputs) - sync(device) - profiler.tick("Forward pass") - embeds_loss = embeds.view((speakers_per_batch, utterances_per_speaker, -1)).to(loss_device) - loss, eer = model.loss(embeds_loss) - sync(loss_device) - profiler.tick("Loss") - - # Backward pass - model.zero_grad() - loss.backward() - profiler.tick("Backward pass") - model.do_gradient_ops() - optimizer.step() - profiler.tick("Parameter update") - - # Update visualizations - # learning_rate = optimizer.param_groups[0]["lr"] - vis.update(loss.item(), eer, step) - - # Draw projections and save them to the backup folder - if umap_every != 0 and step % umap_every == 0: - print("Drawing and saving projections (step %d)" % step) - backup_dir.mkdir(exist_ok=True) - projection_fpath = backup_dir.joinpath("%s_umap_%06d.png" % (run_id, step)) - embeds = embeds.detach().cpu().numpy() - vis.draw_projections(embeds, utterances_per_speaker, step, projection_fpath) - vis.save() - - # Overwrite the latest version of the model - if save_every != 0 and step % save_every == 0: - print("Saving the model (step %d)" % step) - torch.save({ - "step": step + 1, - "model_state": model.state_dict(), - "optimizer_state": optimizer.state_dict(), - }, state_fpath) - - # Make a backup - if backup_every != 0 and step % backup_every == 0: - print("Making a backup (step %d)" % step) - backup_dir.mkdir(exist_ok=True) - backup_fpath = backup_dir.joinpath("%s_bak_%06d.pt" % (run_id, step)) - torch.save({ - "step": step + 1, - "model_state": model.state_dict(), - "optimizer_state": optimizer.state_dict(), - }, backup_fpath) - - profiler.tick("Extras (visualizations, saving)") - diff --git a/spaces/fclong/summary/fengshen/examples/finetune_taiyi_stable_diffusion/evaluate.sh b/spaces/fclong/summary/fengshen/examples/finetune_taiyi_stable_diffusion/evaluate.sh deleted file mode 100644 index 8b7d5412f7bd75cb0700cca0699e029a022db7a7..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/finetune_taiyi_stable_diffusion/evaluate.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=evaluate_model # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=1 # number of tasks to run per node -#SBATCH --cpus-per-task=5 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:1 # number of gpus per node -#SBATCH -o inference_log/%x-%j.log # output and error log file names (%x for job id) -#SBATCH -p batch -#SBATCH --qos=ai4cogsys - -export SCRIPT_PATH=./evaluate_model.py - -MODEL_PATH='' - -srun python $SCRIPT_PATH $MODEL_PATH \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/examples/pretrain_erlangshen_deberta_v2/pretrain_deberta.py b/spaces/fclong/summary/fengshen/examples/pretrain_erlangshen_deberta_v2/pretrain_deberta.py deleted file mode 100644 index e6bd2f81781c5bfcdd55aa1514104f8dec5d8f50..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/pretrain_erlangshen_deberta_v2/pretrain_deberta.py +++ /dev/null @@ -1,227 +0,0 @@ -from dataclasses import dataclass -from transformers import ( - DebertaV2Config, - DebertaV2ForMaskedLM, - AutoTokenizer, -) -from pytorch_lightning import ( - LightningModule, - Trainer, -) -from pytorch_lightning.callbacks import ( - LearningRateMonitor, -) -import argparse -import torch -import os -import numpy as np -from fengshen.data.universal_datamodule import UniversalDataModule -from fengshen.data.data_utils.truncate_utils import truncate_segments -from fengshen.data.data_utils.token_type_utils import create_tokens_and_tokentypes -from fengshen.data.data_utils.mask_utils import create_masked_lm_predictions -from fengshen.models.model_utils import ( - add_module_args, - configure_optimizers, - get_total_steps, -) -from fengshen.utils.universal_checkpoint import UniversalCheckpoint -from torch.utils.data._utils.collate import default_collate - -SHOW_DATA = False - - -@dataclass -class DeBERTaV2Collator: - ''' - 由input处理成samples,也就是最终模型的输入 - 其中主要处理逻辑在__call__里 - 包含Mask任务,使用Whole Word Mask - ''' - tokenizer: None # 分词 - max_seq_length: 512 - masked_lm_prob: 0.15 - content_key: str = 'text' - # 一些预处理操作 - - def setup(self): - self.np_rng = np.random.RandomState(seed=42) - inv_vocab = {v: k for k, v in self.tokenizer.vocab.items()} - self.vocab_id_list = list(inv_vocab.keys()) - self.vocab_id_to_token_dict = inv_vocab - import jieba_fast - self.zh_tokenizer = jieba_fast.lcut - - def __call__(self, samples): - ''' - samples: 一个sample长这样{"text": "hello world"} - ''' - model_inputs = [] - for s in samples: - tokenized_sentences = self.tokenizer.convert_tokens_to_ids( - self.tokenizer.tokenize(s[self.content_key])) - if len(tokenized_sentences) == 0: - print('find empty sentence') - continue - tokens_a = tokenized_sentences - # max_seq_length - 3因为还需要拼上[CLS] [SEP] [SEP] - if len(tokens_a) == 0: - continue - _ = truncate_segments(tokens_a, [], len(tokens_a), - 0, self.max_seq_length-3, self.np_rng) - # Build tokens and toketypes. - tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, [], - self.tokenizer.cls_token_id, self.tokenizer.sep_token_id) - # Masking. - max_predictions_per_seq = self.masked_lm_prob * len(tokens) - (tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions( - tokens, self.vocab_id_list, self.vocab_id_to_token_dict, self.masked_lm_prob, - self.tokenizer.cls_token_id, self.tokenizer.sep_token_id, self.tokenizer.mask_token_id, - max_predictions_per_seq, self.np_rng, - masking_style='bert', - zh_tokenizer=self.zh_tokenizer) - - # Some checks. - num_tokens = len(tokens) - padding_length = self.max_seq_length - num_tokens - assert padding_length >= 0 - assert len(tokentypes) == num_tokens - assert len(masked_positions) == len(masked_labels) - - # Tokens and token types. - filler = [self.tokenizer.pad_token_id] * padding_length - tokens_np = np.array(tokens + filler, dtype=np.int64) - tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) - - # Padding mask. - padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, - dtype=np.int64) - - # Lables and loss mask. - labels = [-100] * self.max_seq_length - for i in range(len(masked_positions)): - assert masked_positions[i] < num_tokens - labels[masked_positions[i]] = masked_labels[i] - labels_np = np.array(labels, dtype=np.int64) - model_inputs.append( - { - 'input_ids': tokens_np, - 'attention_mask': padding_mask_np, - 'token_type_ids': tokentypes_np, - 'labels': labels_np, - } - ) - return default_collate(model_inputs) - - -class ErlangshenDeBERTaV2(LightningModule): - @staticmethod - def add_module_specific_args(parent_parser): - parser = parent_parser.add_argument_group('Erlangshen Bert') - parser.add_argument('--masked_lm_prob', type=float, default=0.15) - parser.add_argument('--max_seq_length', type=int, default=512) - parser.add_argument('--sample_content_key', type=str, default='text') - return parent_parser - - def __init__(self, args, tokenizer, **kwargs) -> None: - super().__init__() - self.save_hyperparameters(args) - config = DebertaV2Config.from_pretrained(args.model_path) - self.config = config - self.tokenizer = tokenizer - self.model = DebertaV2ForMaskedLM(config) - - def setup(self, stage) -> None: - if stage == 'fit': - self.total_steps = get_total_steps(self.trainer, self.hparams) - print('Total steps: {}' .format(self.total_steps)) - - def configure_optimizers(self): - return configure_optimizers(self) - - def forward(self, **batch): - return self.model(**batch) - - def detokenize(self, token_ids): - toks = self.tokenizer.convert_ids_to_tokens(token_ids) - return self.tokenizer.convert_tokens_to_string(toks) - - def comput_metrix(self, logits, labels): - y_pred = torch.argmax(logits, dim=-1) - y_pred = y_pred.view(size=(-1,)) - y_true = labels.view(size=(-1,)).float() - corr = torch.eq(y_pred, y_true) - acc = torch.sum(corr.float())/labels.shape[0] - return acc - - def training_step(self, batch, batch_idx): - if self.trainer.global_rank == 0: - global SHOW_DATA - if not SHOW_DATA: - print(self.config) - print(self.model) - SHOW_DATA = True - print('source: {}'.format(batch['input_ids'][0])) - print('target: {}'.format(batch['labels'][0])) - print('source: {}'.format(self.detokenize(batch['input_ids'][0]))) - label_idx = batch['labels'][0] != -100 - print('target: {}'.format(self.detokenize( - batch['labels'][0][label_idx]))) - output = self(**batch) - self.log('train_loss', output.loss, sync_dist=True) - label_idx = batch['labels'] != -100 - acc = self.comput_metrix( - output.logits[label_idx].view(-1, output.logits.size(-1)), batch['labels'][label_idx]) - self.log('train_acc', acc, sync_dist=True) - return output.loss - - def validation_step(self, batch, batch_idx): - output = self(**batch) - self.log('val_loss', output.loss, sync_dist=True) - return output.loss - - def on_load_checkpoint(self, checkpoint) -> None: - # 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0 - global_step_offset = checkpoint["global_step"] - if 'global_samples' in checkpoint: - self.consumed_samples = checkpoint['global_samples'] - self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset - - -if __name__ == '__main__': - args_parser = argparse.ArgumentParser() - args_parser = add_module_args(args_parser) - args_parser = UniversalDataModule.add_data_specific_args(args_parser) - args_parser = Trainer.add_argparse_args(args_parser) - args_parser = ErlangshenDeBERTaV2.add_module_specific_args(args_parser) - args_parser = UniversalCheckpoint.add_argparse_args(args_parser) - args = args_parser.parse_args() - - tokenizer = AutoTokenizer.from_pretrained(args.model_path) - collate_fn = DeBERTaV2Collator( - tokenizer=tokenizer, - max_seq_length=args.max_seq_length, - masked_lm_prob=args.masked_lm_prob, - content_key=args.sample_content_key, - ) - collate_fn.setup() - data_module = UniversalDataModule(tokenizer=tokenizer, args=args, collate_fn=collate_fn) - print('data load complete') - - model = ErlangshenDeBERTaV2(args, tokenizer=tokenizer) - print('model load complete') - - lr_monitor = LearningRateMonitor(logging_interval='step') - checkpoint_callback = UniversalCheckpoint(args) - - # 做兼容,如果目录不存在的话把这个参数去掉,不然会报错 - if args.load_ckpt_path is not None and \ - not os.path.exists(args.load_ckpt_path): - print('--------warning no checkpoint found--------, remove args') - args.load_ckpt_path = None - - trainer = Trainer.from_argparse_args(args, - callbacks=[ - lr_monitor, - checkpoint_callback]) - - trainer.fit(model, data_module, ckpt_path=args.load_ckpt_path) diff --git a/spaces/fclong/summary/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py b/spaces/fclong/summary/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py deleted file mode 100644 index 2aeef8c860864d138b0c970baca72a568bf51a19..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py +++ /dev/null @@ -1,37 +0,0 @@ -import time -from builtins import print -import argparse - -import torch -# os.environ["CUDA_VISIBLE_DEVICES"] = '3' - - -def get_time_str(): - return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - -def main(): - total_parser = argparse.ArgumentParser("Pretrain Unsupervise.") - total_parser.add_argument('--ckpt_path', default=None, type=str) - total_parser.add_argument('--bin_path', default=None, type=str) - total_parser.add_argument('--rm_prefix', default=None, type=str) - # * Args for base model - args = total_parser.parse_args() - print('Argument parse success.') - state_dict = torch.load(args.ckpt_path)['module'] - new_state_dict = {} - - if args.rm_prefix is not None: - prefix_len = len(args.rm_prefix) - for k, v in state_dict.items(): - if k[:prefix_len] == args.rm_prefix: - new_state_dict[k[prefix_len:]] = v - else: - new_state_dict[k] = v - else: - new_state_dict = state_dict - torch.save(new_state_dict, args.bin_path) - - -if __name__ == '__main__': - main() diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bubble Shooter The Ultimate Puzzle Game for Android - Download APK Now.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bubble Shooter The Ultimate Puzzle Game for Android - Download APK Now.md deleted file mode 100644 index 1503692d07578c292ebbfaa575bb16bd96c41599..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bubble Shooter The Ultimate Puzzle Game for Android - Download APK Now.md +++ /dev/null @@ -1,64 +0,0 @@ -
        -

        Bubble Shooter APK: A Fun and Addictive Puzzle Game for Android

        -

        Do you love playing puzzle games on your Android device? Are you looking for a fun and addictive game that can keep you entertained for hours? If yes, then you should try bubble shooter APK, one of the most popular online puzzle games that you can play anytime and anywhere, even without an internet connection. In this article, we will tell you everything you need to know about bubble shooter APK, including what it is, how to download and install it, how to play it, and why you should play it. Let's get started!

        -

        What is Bubble Shooter APK?

        -

        Bubble shooter APK is an Android game that belongs to the genre of puzzle games. It is based on the classic arcade game of shooting bubbles with a cannon and matching three or more bubbles of the same color to pop them and clear the board. Bubble shooter APK is developed by Bubble Shooter, a company that specializes in creating fun and challenging bubble shooting games for various platforms.

        -

        bubble shooter apk


        DOWNLOAD 🌟 https://gohhs.com/2uPuvi



        -

        The history of bubble shooter games

        -

        Bubble shooter games have a long history that dates back to 1994, when Puzzle Bobble, also known as Bust-a-Move, was released by Taito Corporation. It was a spin-off of the popular platform game Bubble Bobble, which featured two cute dragons named Bub and Bob who could blow bubbles to trap enemies. Puzzle Bobble was a huge success and spawned many sequels and clones over the years. Some of the most famous ones are Snood, Woobies, Zuma, Luxor, Bubble Witch Saga, and Bubble Shooter.

        -

        The features of bubble shooter APK

        -

        Bubble shooter APK is one of the best bubble shooting games available for Android devices. It has many features that make it fun and addictive, such as: - Over 3000 levels of varying difficulty and themes, from easy to hard, from forest to desert, from candy to jewels. - Colorful and high-quality graphics and animations that create a lively and immersive atmosphere. - Smooth and responsive gameplay that is easy to control and suitable for all ages and skills. - Various power-ups and boosters that can help you pop more bubbles and score higher, such as bombs, fireballs, rainbow bubbles, and more. - Daily rewards and bonuses that you can collect by playing the game regularly and completing challenges. - Offline mode that allows you to play the game without an internet connection, so you can enjoy it anytime and anywhere. - Leaderboards and achievements that you can share with your friends and other players around the world, and see who is the best bubble shooter.

        How to download and install bubble shooter APK?

        -

        If you want to play bubble shooter APK on your Android device, you need to download and install it first. Here are the requirements and steps for doing so:

        -

        The requirements for bubble shooter APK

        -

        To download and install bubble shooter APK, you need to have: - An Android device that runs on Android 4.1 or higher. - At least 50 MB of free storage space on your device or SD card. - A stable internet connection to download the APK file.

        The steps to download and install bubble shooter APK

        -

        To download and install bubble shooter APK, you need to follow these steps: - Go to this link on your device's browser to access the official website of bubble shooter APK. - Tap on the green "Download" button to start downloading the APK file to your device. - Once the download is complete, locate the APK file on your device's file manager and tap on it to open it. - You may need to enable "Unknown Sources" on your device's settings to allow the installation of apps from sources other than Google Play Store. - Follow the instructions on the screen to install the app on your device. - Once the installation is done, you can launch the app from your app drawer or home screen and start playing bubble shooter APK. - It can reward you with stars and trophies, which you can use to unlock more levels and themes in the game. - It can reward you with fun and excitement, which you can share with your friends and other players online.

        Conclusion

        -

        Bubble shooter APK is a fun and addictive puzzle game for Android devices that you can play anytime and anywhere, even without an internet connection. It is based on the classic arcade game of shooting bubbles with a cannon and matching three or more bubbles of the same color to pop them and clear the board. It has over 3000 levels of varying difficulty and themes, colorful and high-quality graphics and animations, smooth and responsive gameplay, various power-ups and boosters, daily rewards and bonuses, offline mode, leaderboards and achievements, and more. It can also improve your concentration, focus, problem-solving, logical thinking, creativity, imagination, stress relief, happiness, satisfaction, patience, perseverance, memory, attention, and more. It is a game that can challenge you and reward you at the same time. If you love playing puzzle games on your Android device, you should definitely try bubble shooter APK. You will not regret it!

        -

        bubble shooter apk download
        -bubble shooter apk mod
        -bubble shooter apk offline
        -bubble shooter apk free
        -bubble shooter apk latest version
        -bubble shooter apk for android
        -bubble shooter apk old version
        -bubble shooter apk no ads
        -bubble shooter apk unlimited coins
        -bubble shooter apk hack
        -bubble shooter game apk
        -bubble shooter classic apk
        -bubble shooter original apk
        -bubble shooter legend apk
        -bubble shooter deluxe apk
        -bubble shooter 2 apk
        -bubble shooter 3 apk
        -bubble shooter 4 apk
        -bubble shooter 5 apk
        -bubble shooter 6 apk
        -bubble shooter pro apk
        -bubble shooter premium apk
        -bubble shooter plus apk
        -bubble shooter puzzle apk
        -bubble shooter adventure apk
        -bubble shooter online apk
        -bubble shooter offline game free download for android apk
        -bubble shooter mod apk unlimited money and lives
        -bubble shooter mod apk unlimited everything
        -bubble shooter mod apk android 1
        -download game bubble shooter mod apk versi terbaru
        -download game gratis offline android terbaik mod apk - game puzzle - game teka-teki - game strategi - game balon - game tembak balon - game balon meletus - game balon warna-warni - game balon warna-warni meletus - game balon warna-warni tembak - game balon warna-warni tembak meletus - game balon warna-warni tembak meletus gratis - game balon warna-warni tembak meletus gratis offline - game balon warna-warni tembak meletus gratis offline android terbaik mod apk - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 latest version - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 latest version update - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 latest version update new features - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 latest version update new features bug fixes - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 latest version update new features bug fixes no ads - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 latest version update new features bug fixes no ads unlimited coins - game balon warna-warni tembak meletus gratis offline android terbaik mod apk download free 2023 latest version update new features bug fixes no ads unlimited coins hack

        -

        Summary of the main points

        -

        Here are the main points of this article: - Bubble shooter APK is an Android game that belongs to the genre of puzzle games. - It is based on the classic arcade game of shooting bubbles with a cannon and matching three or more bubbles of the same color to pop them and clear the board. - It has many features that make it fun and addictive, such as over 3000 levels, colorful graphics, smooth gameplay, power-ups, rewards, offline mode, leaderboards, etc. - It can be downloaded and installed easily from the official website of bubble shooter APK. - It can be played easily by following the basic rules of aiming, shooting, matching, and popping bubbles. - It can bring many benefits to the players, such as improving their concentration, focus, problem-solving, logical thinking, creativity, imagination, stress relief, happiness, satisfaction, patience, perseverance, memory, attention, etc. - It can also challenge and reward the players with coins, gems, stars, trophies, fun, and excitement.

        FAQs about bubble shooter APK

        -

        Here are some frequently asked questions about bubble shooter APK:

        -
          -
        1. Is bubble shooter APK free to play?
        2. -

          Yes, bubble shooter APK is free to play. However, it may contain some in-app purchases that can enhance your gaming experience.

          -
        3. Is bubble shooter APK safe to download and install?
        4. -

          Yes, bubble shooter APK is safe to download and install. It does not contain any viruses or malware that can harm your device or data.

          -
        5. How can I update bubble shooter APK?
        6. -

          You can update bubble shooter APK by visiting the official website of bubble shooter APK and downloading the latest version of the APK file. You can also enable the auto-update feature on your device's settings to get the latest updates automatically.

          -
        7. How can I contact the developer of bubble shooter APK?
        8. -

          You can contact the developer of bubble shooter APK by sending an email to support@ilyon.net. You can also visit their Facebook page or their website for more information.

          -
        9. How can I rate and review bubble shooter APK?
        10. -

          You can rate and review bubble shooter APK by visiting the Google Play Store page of bubble shooter APK and tapping on the stars and writing your feedback. You can also share your thoughts and opinions with other players on the comment section.

          -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fffiloni/Music_Source_Separation/scripts/apply-black.sh b/spaces/fffiloni/Music_Source_Separation/scripts/apply-black.sh deleted file mode 100644 index db35f6dd4af7f573770b8614f6dd3448a41909d9..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Music_Source_Separation/scripts/apply-black.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -python3 -m black bytesep - diff --git a/spaces/fffiloni/langchain-chat-with-pdf-openai/README.md b/spaces/fffiloni/langchain-chat-with-pdf-openai/README.md deleted file mode 100644 index bed784f08b0adb68a7d866fc274068bd10b115c9..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/langchain-chat-with-pdf-openai/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chat with PDF • OpenAI -emoji: 📄🤖 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.28.2 -app_file: app.py -pinned: false -duplicated_from: fffiloni/langchain-chat-with-pdf ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/flatindo/scaler/realesrgan/version.py b/spaces/flatindo/scaler/realesrgan/version.py deleted file mode 100644 index f5a23197e4dac473f971675a1555bb02bcfa56c5..0000000000000000000000000000000000000000 --- a/spaces/flatindo/scaler/realesrgan/version.py +++ /dev/null @@ -1,5 +0,0 @@ -# GENERATED VERSION FILE -# TIME: Fri Jun 2 00:17:29 2023 -__version__ = '0.3.0' -__gitsha__ = '5ca1078' -version_info = (0, 3, 0) diff --git a/spaces/freddyaboulton/latent-diffusion-seed/app.py b/spaces/freddyaboulton/latent-diffusion-seed/app.py deleted file mode 100644 index c62b994766a16c8f7e8be515929f6db8994407c0..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/latent-diffusion-seed/app.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 -import queue -import gradio as gr -import random -import torch -from collections import defaultdict -from diffusers import DiffusionPipeline -from functools import partial -from itertools import zip_longest -from typing import List -from PIL import Image - -SELECT_LABEL = "Select as seed" - -MODEL_ID = "CompVis/ldm-text2im-large-256" -STEPS = 5 # while running on CPU -ETA = 0.3 -GUIDANCE_SCALE = 6 - -ldm = DiffusionPipeline.from_pretrained(MODEL_ID) - -import torch -print(f"cuda: {torch.cuda.is_available()}") - -with gr.Blocks(css=".container { max-width: 800px; margin: auto; }") as demo: - state = gr.Variable({ - 'selected': -1, - 'seeds': [random.randint(0, 2 ** 32 - 1) for _ in range(6)] - }) - - def infer_seeded_image(prompt, seed): - print(f"Prompt: {prompt}, seed: {seed}") - images, _ = infer_grid(prompt, n=1, seeds=[seed]) - return images[0] - - def infer_grid(prompt, n=6, seeds=[]): - # Unfortunately we have to iterate instead of requesting all images at once, - # because we have no way to get the intermediate generation seeds. - result = defaultdict(list) - for _, seed in zip_longest(range(n), seeds, fillvalue=None): - seed = random.randint(0, 2**32 - 1) if seed is None else seed - _ = torch.manual_seed(seed) - with torch.autocast("cuda"): - images = ldm( - [prompt], - num_inference_steps=STEPS, - eta=ETA, - guidance_scale=GUIDANCE_SCALE - )["sample"] - result["images"].append(images[0]) - result["seeds"].append(seed) - return result["images"], result["seeds"] - - def infer(prompt, state): - """ - Outputs: - - Grid images (list) - - Seeded Image (Image or None) - - Grid Box with updated visibility - - Seeded Box with updated visibility - """ - grid_images = [None] * 6 - image_with_seed = None - visible = (False, False) - - if (seed_index := state["selected"]) > -1: - seed = state["seeds"][seed_index] - image_with_seed = infer_seeded_image(prompt, seed) - visible = (False, True) - else: - grid_images, seeds = infer_grid(prompt) - state["seeds"] = seeds - visible = (True, False) - - boxes = [gr.Box.update(visible=v) for v in visible] - return grid_images + [image_with_seed] + boxes + [state] - - def update_state(selected_index: int, value, state): - if value == '': - others_value = gr.components._Keywords.NO_VALUE - else: - others_value = '' - state["selected"] = selected_index - return [gr.Radio.update(value=others_value) for _ in range(5)] + [state] - - def clear_seed(state): - """Update state of Radio buttons, grid, seeded_box""" - state["selected"] = -1 - return [''] * 6 + [gr.Box.update(visible=True), gr.Box.update(visible=False)] + [state] - - def image_block(): - return gr.Image( - interactive=False, show_label=False - ).style( - # border = (True, True, False, True), - rounded = (True, True, False, False), - ) - - def radio_block(): - radio = gr.Radio( - choices=[SELECT_LABEL], interactive=True, show_label=False, - ).style( - # border = (False, True, True, True), - # rounded = (False, False, True, True) - container=False - ) - return radio - - gr.Markdown( - """ -

        Latent Diffusion Demo

        -

        Type anything to generate a few images that represent your prompt. - Select one of the results to use as a seed for the next generation: - you can try variations of your prompt starting from the same state and see how it changes. - For example, Labrador in the style of Vermeer could be tweaked to - Labrador in the style of Picasso or Lynx in the style of Van Gogh. - If your prompts are similar, the tweaked result should also have a similar structure - but different details or style.

        - """ - ) - with gr.Group(): - with gr.Box(): - with gr.Row().style(mobile_collapse=False, equal_height=True): - text = gr.Textbox( - label="Enter your prompt", show_label=False, max_lines=1 - ).style( - border=(True, False, True, True), - # margin=False, - rounded=(True, False, False, True), - container=False, - ) - btn = gr.Button("Run").style( - margin=False, - rounded=(False, True, True, False), - ) - - ## Can we create a Component with these, so it can participate as an output? - with (grid := gr.Box()): - with gr.Row(): - with gr.Box().style(border=None): - image1 = image_block() - select1 = radio_block() - with gr.Box().style(border=None): - image2 = image_block() - select2 = radio_block() - with gr.Box().style(border=None): - image3 = image_block() - select3 = radio_block() - with gr.Row(): - with gr.Box().style(border=None): - image4 = image_block() - select4 = radio_block() - with gr.Box().style(border=None): - image5 = image_block() - select5 = radio_block() - with gr.Box().style(border=None): - image6 = image_block() - select6 = radio_block() - - images = [image1, image2, image3, image4, image5, image6] - selectors = [select1, select2, select3, select4, select5, select6] - - for i, radio in enumerate(selectors): - others = list(filter(lambda s: s != radio, selectors)) - radio.change( - partial(update_state, i), - inputs=[radio, state], - outputs=others + [state], - queue=False - ) - - with (seeded_box := gr.Box()): - seeded_image = image_block() - clear_seed_button = gr.Button("Return to Grid") - seeded_box.visible = False - clear_seed_button.click( - clear_seed, - inputs=[state], - outputs=selectors + [grid, seeded_box] + [state] - ) - - all_images = images + [seeded_image] - boxes = [grid, seeded_box] - infer_outputs = all_images + boxes + [state] - - text.submit( - infer, - inputs=[text, state], - outputs=infer_outputs - ) - btn.click( - infer, - inputs=[text, state], - outputs=infer_outputs - ) - -demo.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/fun-research/FC-CLIP/README.md b/spaces/fun-research/FC-CLIP/README.md deleted file mode 100644 index 0471313ea84b6facb818349fbeca03eba84445f0..0000000000000000000000000000000000000000 --- a/spaces/fun-research/FC-CLIP/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: FC CLIP -emoji: 🏢 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/fxmikau/o4gpt/theme_dropdown.py b/spaces/fxmikau/o4gpt/theme_dropdown.py deleted file mode 100644 index 6235388fd00549553df44028f3ccf03e946994ea..0000000000000000000000000000000000000000 --- a/spaces/fxmikau/o4gpt/theme_dropdown.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import pathlib - -from gradio.themes.utils import ThemeAsset - - -def create_theme_dropdown(): - import gradio as gr - - asset_path = pathlib.Path(__file__).parent / "themes" - themes = [] - for theme_asset in os.listdir(str(asset_path)): - themes.append( - (ThemeAsset(theme_asset), gr.Theme.load(str(asset_path / theme_asset))) - ) - - def make_else_if(theme_asset): - return f""" - else if (theme == '{str(theme_asset[0].version)}') {{ - var theme_css = `{theme_asset[1]._get_theme_css()}` - }}""" - - head, tail = themes[0], themes[1:] - if_statement = f""" - if (theme == "{str(head[0].version)}") {{ - var theme_css = `{head[1]._get_theme_css()}` - }} {" ".join(make_else_if(t) for t in tail)} - """ - - latest_to_oldest = sorted([t[0] for t in themes], key=lambda asset: asset.version)[ - ::-1 - ] - latest_to_oldest = [str(t.version) for t in latest_to_oldest] - - component = gr.Dropdown( - choices=latest_to_oldest, - value=latest_to_oldest[0], - render=False, - label="Select Version", - ).style(container=False) - - return ( - component, - f""" - (theme) => {{ - if (!document.querySelector('.theme-css')) {{ - var theme_elem = document.createElement('style'); - theme_elem.classList.add('theme-css'); - document.head.appendChild(theme_elem); - }} else {{ - var theme_elem = document.querySelector('.theme-css'); - }} - {if_statement} - theme_elem.innerHTML = theme_css; - }} - """, - ) diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/alexnet.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/alexnet.py deleted file mode 100644 index 89e36b8c7851f895d9ae7f07149f0e707456aab0..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/alexnet.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -import torch.nn as nn - - -class AlexNet(nn.Module): - """AlexNet backbone. - - Args: - num_classes (int): number of classes for classification. - """ - - def __init__(self, num_classes=-1): - super(AlexNet, self).__init__() - self.num_classes = num_classes - self.features = nn.Sequential( - nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - nn.Conv2d(64, 192, kernel_size=5, padding=2), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - nn.Conv2d(192, 384, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(384, 256, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(256, 256, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.MaxPool2d(kernel_size=3, stride=2), - ) - if self.num_classes > 0: - self.classifier = nn.Sequential( - nn.Dropout(), - nn.Linear(256 * 6 * 6, 4096), - nn.ReLU(inplace=True), - nn.Dropout(), - nn.Linear(4096, 4096), - nn.ReLU(inplace=True), - nn.Linear(4096, num_classes), - ) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - from ..runner import load_checkpoint - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - # use default initializer - pass - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - - x = self.features(x) - if self.num_classes > 0: - x = x.view(x.size(0), 256 * 6 * 6) - x = self.classifier(x) - - return x diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/utils/version_utils.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/utils/version_utils.py deleted file mode 100644 index 963c45a2e8a86a88413ab6c18c22481fb9831985..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/utils/version_utils.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import subprocess -import warnings - -from packaging.version import parse - - -def digit_version(version_str: str, length: int = 4): - """Convert a version string into a tuple of integers. - - This method is usually used for comparing two versions. For pre-release - versions: alpha < beta < rc. - - Args: - version_str (str): The version string. - length (int): The maximum number of version levels. Default: 4. - - Returns: - tuple[int]: The version info in digits (integers). - """ - assert 'parrots' not in version_str - version = parse(version_str) - assert version.release, f'failed to parse version {version_str}' - release = list(version.release) - release = release[:length] - if len(release) < length: - release = release + [0] * (length - len(release)) - if version.is_prerelease: - mapping = {'a': -3, 'b': -2, 'rc': -1} - val = -4 - # version.pre can be None - if version.pre: - if version.pre[0] not in mapping: - warnings.warn(f'unknown prerelease version {version.pre[0]}, ' - 'version checking may go wrong') - else: - val = mapping[version.pre[0]] - release.extend([val, version.pre[-1]]) - else: - release.extend([val, 0]) - - elif version.is_postrelease: - release.extend([1, version.post]) - else: - release.extend([0, 0]) - return tuple(release) - - -def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen( - cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - -def get_git_hash(fallback='unknown', digits=None): - """Get the git hash of the current repo. - - Args: - fallback (str, optional): The fallback string when git hash is - unavailable. Defaults to 'unknown'. - digits (int, optional): kept digits of the hash. Defaults to None, - meaning all digits are kept. - - Returns: - str: Git commit hash. - """ - - if digits is not None and not isinstance(digits, int): - raise TypeError('digits must be None or an integer') - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - if digits is not None: - sha = sha[:digits] - except OSError: - sha = fallback - - return sha diff --git a/spaces/gradio-discord-bots/Llama-2-70b-chat-hf/app.py b/spaces/gradio-discord-bots/Llama-2-70b-chat-hf/app.py deleted file mode 100644 index fcd0f2d669537ced8dd762457d5fb431f20f0e95..0000000000000000000000000000000000000000 --- a/spaces/gradio-discord-bots/Llama-2-70b-chat-hf/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import gradio as gr - -with gr.Blocks() as demo: - gr.Markdown(""" - # Llama-2-70b-chat-hf Discord Bot Powered by Gradio and Hugging Face Endpoints - - Make sure you read the 'Special Consideration' section below first! 🦙 - - ### First install the `gradio_client` - - ```bash - pip install gradio_client - ``` - - ### Then deploy to discord in one line! ⚡️ - - ```python - grc.Client("ysharma/Explore_llamav2_with_TGI").deploy_discord(to_id="llama2-70b-discord-bot") - ``` - - """) - with gr.Accordion(label="Special Considerations", open=False): - gr.Markdown(""" - This discord bot will use a FREE Inference Endpoint provided by Hugging Face. - Hugging Face does not commit to paying for this endpoint in perpetuity so there is no guarantee your bot will always work. - If you would like more control over the infrastructure backing the llama 70b model, consider deploying your own inference endpoint. - """ - ) - gr.Markdown(""" - Note: As a derivate work of [Llama-2-70b-chat](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, this demo is governed by the original [license](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/USE_POLICY.md) - """) - - -demo.queue(concurrency_count=70).launch() \ No newline at end of file diff --git a/spaces/gradio-discord-bots/llama-2-13b-chat-transformers/model.py b/spaces/gradio-discord-bots/llama-2-13b-chat-transformers/model.py deleted file mode 100644 index b3b9d3ed79f8ef37257ab60da55f23c05e3f13ba..0000000000000000000000000000000000000000 --- a/spaces/gradio-discord-bots/llama-2-13b-chat-transformers/model.py +++ /dev/null @@ -1,82 +0,0 @@ -from threading import Thread -import os -import torch -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer - -model_id = 'meta-llama/Llama-2-13b-chat-hf' - -is_spaces = True if "SPACE_ID" in os.environ else False -if is_spaces : - is_shared_ui = True if "gradio-discord-bots/llama-2-13b-chat-transformers" in os.environ['SPACE_ID'] else False -else: - is_shared_ui = False -is_gpu_associated = torch.cuda.is_available() - -if torch.cuda.is_available() and not is_shared_ui: - config = AutoConfig.from_pretrained(model_id) - config.pretraining_tp = 1 - model = AutoModelForCausalLM.from_pretrained( - model_id, - config=config, - torch_dtype=torch.float16, - load_in_4bit=True, - device_map='auto' - ) - tokenizer = AutoTokenizer.from_pretrained(model_id) -else: - model = None - tokenizer = None - - -def get_prompt(message: str, chat_history: list[tuple[str, str]], - system_prompt: str) -> str: - texts = [f'[INST] <>\n{system_prompt}\n<>\n\n'] - # The first user input is _not_ stripped - do_strip = False - for user_input, response in chat_history: - user_input = user_input.strip() if do_strip else user_input - do_strip = True - texts.append(f'{user_input} [/INST] {response.strip()} [INST] ') - message = message.strip() if do_strip else message - texts.append(f'{message} [/INST]') - return ''.join(texts) - - -def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int: - prompt = get_prompt(message, chat_history, system_prompt) - input_ids = tokenizer([prompt], return_tensors='np', add_special_tokens=False)['input_ids'] - return input_ids.shape[-1] - - -def run(message: str, - chat_history: list[tuple[str, str]], - system_prompt: str, - max_new_tokens: int = 1024, - temperature: float = 0.8, - top_p: float = 0.95, - top_k: int = 50) -> str: - prompt = get_prompt(message, chat_history, system_prompt) - inputs = tokenizer([prompt], return_tensors='pt', add_special_tokens=False).to('cuda') - - streamer = TextIteratorStreamer(tokenizer, - timeout=10., - skip_prompt=True, - skip_special_tokens=True) - generate_kwargs = dict( - inputs, - streamer=streamer, - max_new_tokens=max_new_tokens, - do_sample=True, - top_p=top_p, - top_k=top_k, - temperature=temperature, - num_beams=1, - ) - t = Thread(target=model.generate, kwargs=generate_kwargs) - t.start() - - outputs = [] - for text in streamer: - outputs.append(text) - - return "".join(outputs) diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/samples/torch/__init__.py b/spaces/gwang-kim/DATID-3D/pose_estimation/nvdiffrast/samples/torch/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/h2oai/wave-tour/examples/background_progress.py b/spaces/h2oai/wave-tour/examples/background_progress.py deleted file mode 100644 index f4afb75bcfc416c0e5470a467b57808058f570a3..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/background_progress.py +++ /dev/null @@ -1,71 +0,0 @@ -# Background Tasks / Progress -# Execute background functions while incrementing a #progress bar. -# #background_tasks -# --- -import asyncio -import time -import concurrent.futures -from threading import Event -from h2o_wave import main, app, Q, ui - - -# This takes a lot of time (compute heavy). -def blocking_function(q: Q, loop: asyncio.AbstractEventLoop): - count = 0 - total = 10 - future = None - while count < total: - # Check if cancelled. - if q.client.event.is_set(): - asyncio.ensure_future(show_cancel(q), loop=loop) - return - # This blocks the main thread and prevents any other execution. - # This would be the compute in the real world. - time.sleep(1) - count += 1 - # If future is not done yet, skip the update to keep the correct order. - if not future or future.done(): - # Assume you are able to emit some kind of progress. - future = asyncio.ensure_future(update_ui(q, count / total), loop=loop) - - -async def show_cancel(q: Q): - q.page['form'].progress.caption = 'Cancelled' - await q.page.save() - - -async def update_ui(q: Q, value: int): - q.page['form'].progress.value = value - await q.page.save() - - -@app('/demo') -async def serve(q: Q): - # Unimportant, draw initial UI. - if not q.client.initialized: - q.page['form'] = ui.form_card(box='1 1 3 2', items=[ - ui.inline([ - ui.button(name='start_job', label='Start job'), - ui.button(name='cancel', label='Cancel') - ]), - ui.progress(name='progress', label='Progress', value=0), - ]) - q.client.initialized = True - - # Handle start job button click. - if q.args.start_job: - # Do not run like this - will block the whole thread - freeze the app. - # blocking_function(q, loop) - - # Get the current event loop - will be used for - # running async functions within the blocking. - loop = asyncio.get_event_loop() - # Create an event to use for cancellation. - q.client.event = Event() - with concurrent.futures.ThreadPoolExecutor() as pool: - await q.exec(pool, blocking_function, q, loop) - - if q.args.cancel: - q.client.event.set() - - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/date_picker_trigger.py b/spaces/h2oai/wave-tour/examples/date_picker_trigger.py deleted file mode 100644 index 923fe1e79cbc34f6dd3a265ea8c8034e2a09f535..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/date_picker_trigger.py +++ /dev/null @@ -1,23 +0,0 @@ -# Form / Date Picker / Trigger -# To handle live changes to a date picker, enable the `trigger` attribute. -# #form #date_picker #trigger -# --- -from typing import Optional -from h2o_wave import main, app, Q, ui - - -def get_form_items(value: Optional[str]): - return [ - ui.text(f'date_trigger={value}'), - ui.date_picker(name='date_trigger', label='Pick a date', trigger=True), - ] - - -@app('/demo') -async def serve(q: Q): - if not q.client.initialized: - q.page['example'] = ui.form_card(box='1 1 4 5', items=get_form_items(None)) - q.client.initialized = True - else: - q.page['example'].items = get_form_items(q.args.date_trigger) - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/tab.py b/spaces/h2oai/wave-tour/examples/tab.py deleted file mode 100644 index 727566853dda58bd15442b6fcbcd40fd465a8b63..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/tab.py +++ /dev/null @@ -1,47 +0,0 @@ -# Tab -# This example demonstrates how you can observe and handle changes to the browser's -# [location hash](https://developer.mozilla.org/en-US/docs/Web/API/Location/hash). -# -# The location hash can be accessed using `q.args['#']`. -# #routing #tab -# --- -from h2o_wave import main, app, Q, ui - - -@app('/demo') -async def serve(q: Q): - content = 'Welcome to our store!' - - location = q.args['#'] - if location: - if location == 'menu/spam': - content = "Sorry, we're out of spam!" - elif location == 'menu/ham': - content = "Sorry, we're out of ham!" - elif location == 'menu/eggs': - content = "Sorry, we're out of eggs!" - elif location == 'about': - content = 'Everything here is gluten-free!' - - if not q.client.initialized: - q.page['nav'] = ui.tab_card( - box='1 1 4 1', - items=[ - ui.tab(name='#menu/spam', label='Spam'), - ui.tab(name='#menu/ham', label='Ham'), - ui.tab(name='#menu/eggs', label='Eggs'), - ui.tab(name='#about', label='About'), - ], - value=f'#{location}' if location else None, - ) - q.page['blurb'] = ui.markdown_card( - box='1 2 4 2', - title='Store', - content=content, - ) - q.client.initialized = True - elif location: - blurb = q.page['blurb'] - blurb.content = content - - await q.page.save() diff --git a/spaces/hamacojr/SAM-CAT-Seg/cat_seg/data/datasets/register_pascal_59.py b/spaces/hamacojr/SAM-CAT-Seg/cat_seg/data/datasets/register_pascal_59.py deleted file mode 100644 index ff49702fc898ecf38420985d143c70f71169b91a..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/SAM-CAT-Seg/cat_seg/data/datasets/register_pascal_59.py +++ /dev/null @@ -1,81 +0,0 @@ -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets import load_sem_seg -import copy - - -stuff_colors = [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192], - [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], - [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], - [64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], - [0, 32, 0], [0, 128, 128], [64, 128, 160], [128, 160, 0], - [0, 128, 0], [192, 128, 32], [128, 96, 128], [0, 0, 128], - [64, 0, 32], [0, 224, 128], [128, 0, 0], [192, 0, 160], - [0, 96, 128], [128, 128, 128], [64, 0, 160], [128, 224, 128], - [128, 128, 64], [192, 0, 32], [128, 96, 0], [128, 0, 192], - [0, 128, 32], [64, 224, 0], [0, 0, 64], [128, 128, 160], - [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], - [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192], - [128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128], - [64, 96, 0], [0, 128, 192], [0, 128, 160], [192, 224, 0], - [0, 128, 64], [128, 128, 32], [192, 32, 128], [0, 64, 192], - [0, 0, 32], [64, 160, 128], [128, 64, 64], [128, 0, 160], - [128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], - [0, 0, 230], [119, 11, 32], - [64, 128, 64], [128, 192, 32], [192, 32, 192], [64, 64, 192], - [0, 64, 32], [64, 160, 192], [192, 64, 64], [128, 64, 160], - [64, 32, 192], [192, 192, 192], [0, 64, 160], [192, 160, 192], - [192, 192, 0], [128, 64, 96], [192, 32, 64], [192, 64, 128], - [64, 192, 96], [64, 160, 64], [64, 64, 0]] - -def _get_pascal_context_59_meta(): - #context_classes = ["aeroplane", "bag", "bed", "bedclothes", "bench", "bicycle", "bird", "boat", "book", "bottle", "building", "bus", "cabinet", "car", "cat", "ceiling", "chair", "cloth", "computer", "cow", "cup", "curtain", "dog", "door", "fence", "floor", "flower", "food", "grass", "ground", "horse", "keyboard", "light", "motorbike", "mountain", "mouse", "person", "plate", "platform", "pottedplant", "road", "rock", "sheep", "shelves", "sidewalk", "sign", "sky", "snow", "sofa", "diningtable", "track", "train", "tree", "truck", "tvmonitor", "wall", "water", "window", "wood"]#, "background"] - context_classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor", "bag", "bed", "bench", "book", "building", "cabinet", "ceiling", "cloth", "computer", "cup", "door", "fence", "floor", "flower", "food", "grass", "ground", "keyboard", "light", "mountain", "mouse", "curtain", "platform", "sign", "plate", "road", "rock", "shelves", "sidewalk", "sky", "snow", "bedclothes", "track", "tree", "truck", "wall", "water", "window", "wood"] - context_colors = [stuff_colors[i % len(stuff_colors)] for i in range(len(context_classes))] - ret = { - "stuff_colors" : context_colors, - "stuff_classes" : context_classes, - } - return ret - -def register_pascal_context_59(root): - root = os.path.join(root, "VOCdevkit", "VOC2010") - meta = _get_pascal_context_59_meta() - for name, image_dirname, sem_seg_dirname in [ - ("test", "JPEGImages", "annotations_detectron2/pc59_val"), - ]: - image_dir = os.path.join(root, image_dirname) - gt_dir = os.path.join(root, sem_seg_dirname) - name = f"context_59_{name}_sem_seg" - DatasetCatalog.register(name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext='png', image_ext='jpg')) - MetadataCatalog.get(name).set(image_root=image_dir, seg_seg_root=gt_dir, evaluator_type="sem_seg", ignore_label=255, **meta,) - -def _get_pascal_context_459_meta(): - context_459_classes = ["accordion", "aeroplane", "airconditioner", "antenna", "artillery", "ashtray", "atrium", "babycarriage", "bag", "ball", "balloon", "bambooweaving", "barrel", "baseballbat", "basket", "basketballbackboard", "bathtub", "bed", "bedclothes", "beer", "bell", "bench", "bicycle", "binoculars", "bird", "birdcage", "birdfeeder", "birdnest", "blackboard", "board", "boat", "bone", "book", "bottle", "bottleopener", "bowl", "box", "bracelet", "brick", "bridge", "broom", "brush", "bucket", "building", "bus", "cabinet", "cabinetdoor", "cage", "cake", "calculator", "calendar", "camel", "camera", "cameralens", "can", "candle", "candleholder", "cap", "car", "card", "cart", "case", "casetterecorder", "cashregister", "cat", "cd", "cdplayer", "ceiling", "cellphone", "cello", "chain", "chair", "chessboard", "chicken", "chopstick", "clip", "clippers", "clock", "closet", "cloth", "clothestree", "coffee", "coffeemachine", "comb", "computer", "concrete", "cone", "container", "controlbooth", "controller", "cooker", "copyingmachine", "coral", "cork", "corkscrew", "counter", "court", "cow", "crabstick", "crane", "crate", "cross", "crutch", "cup", "curtain", "cushion", "cuttingboard", "dais", "disc", "disccase", "dishwasher", "dock", "dog", "dolphin", "door", "drainer", "dray", "drinkdispenser", "drinkingmachine", "drop", "drug", "drum", "drumkit", "duck", "dumbbell", "earphone", "earrings", "egg", "electricfan", "electriciron", "electricpot", "electricsaw", "electronickeyboard", "engine", "envelope", "equipment", "escalator", "exhibitionbooth", "extinguisher", "eyeglass", "fan", "faucet", "faxmachine", "fence", "ferriswheel", "fireextinguisher", "firehydrant", "fireplace", "fish", "fishtank", "fishbowl", "fishingnet", "fishingpole", "flag", "flagstaff", "flame", "flashlight", "floor", "flower", "fly", "foam", "food", "footbridge", "forceps", "fork", "forklift", "fountain", "fox", "frame", "fridge", "frog", "fruit", "funnel", "furnace", "gamecontroller", "gamemachine", "gascylinder", "gashood", "gasstove", "giftbox", "glass", "glassmarble", "globe", "glove", "goal", "grandstand", "grass", "gravestone", "ground", "guardrail", "guitar", "gun", "hammer", "handcart", "handle", "handrail", "hanger", "harddiskdrive", "hat", "hay", "headphone", "heater", "helicopter", "helmet", "holder", "hook", "horse", "horse-drawncarriage", "hot-airballoon", "hydrovalve", "ice", "inflatorpump", "ipod", "iron", "ironingboard", "jar", "kart", "kettle", "key", "keyboard", "kitchenrange", "kite", "knife", "knifeblock", "ladder", "laddertruck", "ladle", "laptop", "leaves", "lid", "lifebuoy", "light", "lightbulb", "lighter", "line", "lion", "lobster", "lock", "machine", "mailbox", "mannequin", "map", "mask", "mat", "matchbook", "mattress", "menu", "metal", "meterbox", "microphone", "microwave", "mirror", "missile", "model", "money", "monkey", "mop", "motorbike", "mountain", "mouse", "mousepad", "musicalinstrument", "napkin", "net", "newspaper", "oar", "ornament", "outlet", "oven", "oxygenbottle", "pack", "pan", "paper", "paperbox", "papercutter", "parachute", "parasol", "parterre", "patio", "pelage", "pen", "pencontainer", "pencil", "person", "photo", "piano", "picture", "pig", "pillar", "pillow", "pipe", "pitcher", "plant", "plastic", "plate", "platform", "player", "playground", "pliers", "plume", "poker", "pokerchip", "pole", "pooltable", "postcard", "poster", "pot", "pottedplant", "printer", "projector", "pumpkin", "rabbit", "racket", "radiator", "radio", "rail", "rake", "ramp", "rangehood", "receiver", "recorder", "recreationalmachines", "remotecontrol", "road", "robot", "rock", "rocket", "rockinghorse", "rope", "rug", "ruler", "runway", "saddle", "sand", "saw", "scale", "scanner", "scissors", "scoop", "screen", "screwdriver", "sculpture", "scythe", "sewer", "sewingmachine", "shed", "sheep", "shell", "shelves", "shoe", "shoppingcart", "shovel", "sidecar", "sidewalk", "sign", "signallight", "sink", "skateboard", "ski", "sky", "sled", "slippers", "smoke", "snail", "snake", "snow", "snowmobiles", "sofa", "spanner", "spatula", "speaker", "speedbump", "spicecontainer", "spoon", "sprayer", "squirrel", "stage", "stair", "stapler", "stick", "stickynote", "stone", "stool", "stove", "straw", "stretcher", "sun", "sunglass", "sunshade", "surveillancecamera", "swan", "sweeper", "swimring", "swimmingpool", "swing", "switch", "table", "tableware", "tank", "tap", "tape", "tarp", "telephone", "telephonebooth", "tent", "tire", "toaster", "toilet", "tong", "tool", "toothbrush", "towel", "toy", "toycar", "track", "train", "trampoline", "trashbin", "tray", "tree", "tricycle", "tripod", "trophy", "truck", "tube", "turtle", "tvmonitor", "tweezers", "typewriter", "umbrella", "unknown", "vacuumcleaner", "vendingmachine", "videocamera", "videogameconsole", "videoplayer", "videotape", "violin", "wakeboard", "wall", "wallet", "wardrobe", "washingmachine", "watch", "water", "waterdispenser", "waterpipe", "waterskateboard", "watermelon", "whale", "wharf", "wheel", "wheelchair", "window", "windowblinds", "wineglass", "wire", "wood", "wool"] - context_colors = [stuff_colors[i % len(stuff_colors)] for i in range(len(context_459_classes))] - ret = { - "stuff_colors" : context_colors, - "stuff_classes" : context_459_classes, - } - return ret - -def register_pascal_context_459(root): - root = os.path.join(root, "VOCdevkit", "VOC2010") - meta = _get_pascal_context_459_meta() - for name, image_dirname, sem_seg_dirname in [ - ("test", "JPEGImages", "annotations_detectron2/pc459_val"), - ]: - image_dir = os.path.join(root, image_dirname) - gt_dir = os.path.join(root, sem_seg_dirname) - name = f"context_459_{name}_sem_seg" - DatasetCatalog.register(name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext='tif', image_ext='jpg')) - MetadataCatalog.get(name).set(image_root=image_dir, seg_seg_root=gt_dir, evaluator_type="sem_seg", ignore_label=459, **meta,) - - -_root = os.getenv("DETECTRON2_DATASETS", "datasets") -register_pascal_context_59(_root) -register_pascal_context_459(_root) \ No newline at end of file diff --git a/spaces/hamza50/rhymethyme/app.py b/spaces/hamza50/rhymethyme/app.py deleted file mode 100644 index ccce9a67d48fe32a52c1cadff2b336003b84f4ba..0000000000000000000000000000000000000000 --- a/spaces/hamza50/rhymethyme/app.py +++ /dev/null @@ -1,54 +0,0 @@ - -import streamlit as st -import openai - -#streamlit run main.py -# import os -# os.environ.getattribute("openai.api_key") - -header = st.container() -about = st.container() -model_output = st.container() - -with header: - st.title('RHYME THYME') - #st.text('This this bot-bop-shop uses Artificail Ingredients \ - #(AI) to produce natural flavors.') - -with model_output: - st.header('Half-baked Takes:') - sel_col, disp_col = st.columns(2) - - sel_col.subheader('Input ideas') - - #input_poem = sel_col.selectbox('What type of poem do you want to write?', options=['limerick', 'haiku', 'sonnet', 'acrostic', 'villanelle', 'ode', 'elegy', 'ballad', 'couplet', 'tercet', 'quatrain', 'cinquan', 'sestet']) - - input_word1 = sel_col.text_input('Input a word to use in the poem:') - - input_word2 = sel_col.text_input('Input another word to use in the poem:') - - - # This is the NLP model - - - - def generate_poem(input_word1, input_word2): - # Use GPT-3 to generate a limerick based on the user's input word - prompt = f"Write a limerick in AABBA rhyme scheme about {input_word1} and '{input_word2}':" - completions = openai.Completion.create( - engine="text-davinci-002", - prompt=prompt, - max_tokens=1024, - n=1, - stop=None, - temperature=0.5, - ) - - # Return the generated limerick - return completions.choices[0].text - - limerick = generate_poem(input_word1.lower(), input_word2.lower()) - - # This is where the results are displayed on the app - disp_col.subheader('Limerick output') - disp_col.write(limerick) diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/configs/Detectron1-Comparisons/README.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/configs/Detectron1-Comparisons/README.md deleted file mode 100644 index a90ed9e433a00b8b9f43961d7a2696d5b9013127..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/configs/Detectron1-Comparisons/README.md +++ /dev/null @@ -1,83 +0,0 @@ - -Detectron2 model zoo's experimental settings and a few implementation details are different from Detectron. - -The differences in implementation details are shared in -[Compatibility with Other Libraries](../../docs/notes/compatibility.md). - -The differences in model zoo's experimental settings include: -* Use scale augmentation during training. This improves AP with lower training cost. -* Use L1 loss instead of smooth L1 loss for simplicity. This sometimes improves box AP but may - affect other AP. -* Use `POOLER_SAMPLING_RATIO=0` instead of 2. This does not significantly affect AP. -* Use `ROIAlignV2`. This does not significantly affect AP. - -In this directory, we provide a few configs that __do not__ have the above changes. -They mimic Detectron's behavior as close as possible, -and provide a fair comparison of accuracy and speed against Detectron. - - - - -
    Video DownloaderSupported PlatformsSupported FormatsLive TV ChannelsPopular WebsitesTrending Videos
    VidMate 2017 APKOver 1000 online platformsMP4, MP3, HD, etc.YesYesYes
    TubeMateYouTube and some other platformsMP4, MP3, HD, etc.NoNoNo
    SnaptubeOver 50 online platformsMP4, MP3, HD, etc.NoNoNo
    VideoderOver 1000 online platformsMP4, MP3, HD, etc.NoNoNo
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Namelr
    sched
    train
    time
    (s/iter)
    inference
    time
    (s/im)
    train
    mem
    (GB)
    box
    AP
    mask
    AP
    kp.
    AP
    model iddownload
    Faster R-CNN1x0.2190.0383.136.9137781054model | metrics
    Keypoint R-CNN1x0.3130.0715.053.164.2137781195model | metrics
    Mask R-CNN1x0.2730.0433.437.834.9137781281model | metrics
    - -## Comparisons: - -* Faster R-CNN: Detectron's AP is 36.7, similar to ours. -* Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's - [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be - compensated back by some parameter tuning. -* Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation. - -For speed comparison, see [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html). diff --git a/spaces/hebert2099/MusicGen/audiocraft/modules/rope.py b/spaces/hebert2099/MusicGen/audiocraft/modules/rope.py deleted file mode 100644 index 4b8c70b9aba28eeb53d12ddc3de8852492847808..0000000000000000000000000000000000000000 --- a/spaces/hebert2099/MusicGen/audiocraft/modules/rope.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from torch import nn -import torch - - -class XPos(nn.Module): - """Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1). - This applies an exponential decay to the RoPE rotation matrix. - - Args: - dim (int): Embedding dimension. - smoothing (float): Smoothing factor applied to the decay rates. - base_scale (int): Base decay rate, given in terms of scaling time. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512, - device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - self.base_scale = base_scale - - half_dim = dim // 2 - adim = torch.arange(half_dim, device=device, dtype=dtype) - decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing) - self.register_buffer("decay_rates", decay_rates) - self.decay: tp.Optional[torch.Tensor] = None - - def get_decay(self, start: int, end: int): - """Create complex decay tensor, cache values for fast computation. - """ - if self.decay is None or end > self.decay.shape[0]: - assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype) - power = idx / self.base_scale - scale = self.decay_rates ** power.unsqueeze(-1) - self.decay = torch.polar(scale, torch.zeros_like(scale)) - return self.decay[start:end] # [T, C/2] - - -class RotaryEmbedding(nn.Module): - """Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864). - - Args: - dim (int): Embedding dimension (twice the number of frequencies). - max_period (float): Maximum period of the rotation frequencies. - xpos (bool): Use xPos, applies an exponential decay to rotation matrix. - scale (float): Scale of positional embedding, set to 0 to deactivate. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False, - scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - self.scale = scale - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - - adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)] - frequencies = 1.0 / (max_period ** (adim / dim)) - self.register_buffer("frequencies", frequencies) - self.rotation: tp.Optional[torch.Tensor] = None - - self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None - - def get_rotation(self, start: int, end: int): - """Create complex rotation tensor, cache values for fast computation. - """ - if self.rotation is None or end > self.rotation.shape[0]: - assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype) - angles = torch.outer(idx, self.frequencies) - self.rotation = torch.polar(torch.ones_like(angles), angles) - return self.rotation[start:end] - - def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False): - """Apply rope rotation to query or key tensor. - """ - T = x.shape[1] - rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2) - - if self.xpos: - decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2) - else: - decay = 1.0 - - if invert_decay: - decay = decay ** -1 - - x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2)) - scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale) - x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2) - - return x_out.type_as(x) - - def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0): - """ Apply rope rotation to both query and key tensors. - Supports streaming mode, in which query and key are not expected to have the same shape. - In streaming mode, key will be of legnth [P + C] with P the cached past timesteps, but - query will be [C] (typically C == 1). - - Args: - query (torch.Tensor): Query to rotate. - key (torch.Tensor): Key to rotate. - start (int): Start index of the sequence for time offset. - """ - query_timesteps = query.shape[1] - key_timesteps = key.shape[1] - streaming_offset = key_timesteps - query_timesteps - - query_out = self.rotate(query, start + streaming_offset) - key_out = self.rotate(key, start, invert_decay=True) - - return query_out, key_out diff --git a/spaces/hezhaoqia/vits-simple-api/vits/commons.py b/spaces/hezhaoqia/vits-simple-api/vits/commons.py deleted file mode 100644 index bda0a67534ac34bd02dc28b845619b2433a40df6..0000000000000000000000000000000000000000 --- a/spaces/hezhaoqia/vits-simple-api/vits/commons.py +++ /dev/null @@ -1,96 +0,0 @@ -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path diff --git a/spaces/hhhyrhe/vits-uma-genshin-honkai/app.py b/spaces/hhhyrhe/vits-uma-genshin-honkai/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/hhhyrhe/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
    VITS语音在线合成demo\n" - "
    主要有赛马娘,原神中文,原神日语,崩坏3的音色
    " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/Disk Drill 3.8.947 Pro Crack Activation Code For Mac.md b/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/Disk Drill 3.8.947 Pro Crack Activation Code For Mac.md deleted file mode 100644 index db0791d70b18dd14bff9a87cedaeaf5a9df01086..0000000000000000000000000000000000000000 --- a/spaces/hilmyblaze/WebUI-Counterfeit-V2.5/Disk Drill 3.8.947 Pro Crack Activation Code For Mac.md +++ /dev/null @@ -1,102 +0,0 @@ -## Disk Drill 3.8.947 Pro Crack Activation Code For Mac - - - - - - - - - -**Click Here ✔ [https://bionallopi.blogspot.com/?file=2txmmR](https://bionallopi.blogspot.com/?file=2txmmR)** - - - - - - - - - - - - - -# Disk Drill 3.8.947 Pro Crack Activation Code For Mac: Is It Worth It? - - - -Disk Drill 3.8.947 Pro is a Mac data recovery software that claims to recover deleted or lost files from any storage device, including iPhone and Android devices. It also offers some useful tools for disk management, such as duplicate file finder, disk cleanup, space visualization, bootable data recovery drive, disk health monitoring, and more. - - - -But is Disk Drill 3.8.947 Pro worth its full price of $89? Or are you better off looking for a free Disk Drill activation code, crack, serial key, or keygen on the internet? - - - -In this article, we will review the features and performance of Disk Drill 3.8.947 Pro, and compare it with some of the alternatives available on the market. We will also discuss the risks and drawbacks of using a cracked or pirated version of Disk Drill, and how you can get a legal license for Disk Drill Pro at a reduced price. - - - -## Features and Performance of Disk Drill 3.8.947 Pro - - - -Disk Drill 3.8.947 Pro is designed natively for Mac OS and supports most storage devices, file types, and file systems. It can recover data from internal and external hard drives, USB flash drives, memory cards, cameras, iPods, iPhones, iPads, Android devices, and more. - - - -Disk Drill 3.8.947 Pro uses several scanning methods to find and recover your data. You can choose between Quick Scan, which is fast but may not find all the files; Deep Scan, which is thorough but may take longer; or Partition Search, which can recover entire partitions that have been lost or reformatted. - - - -Disk Drill 3.8.947 Pro also has a feature called Recovery Vault, which protects your files from accidental deletion by keeping a backup of their metadata. You can enable Recovery Vault for any disk or folder on your Mac, and it will automatically save the information about deleted files in a hidden database. This way, you can easily restore them with Disk Drill later. - - - -Another feature of Disk Drill 3.8.947 Pro is Data Protection, which allows you to create a byte-to-byte backup of any disk or partition on your Mac. You can save this backup as a DMG image file on another storage device or upload it to a cloud service like Dropbox or Google Drive. You can then use this backup to restore your data in case of a disk failure or corruption. - - - -Disk Drill 3.8.947 Pro also offers some other tools for disk management, such as: - - - -- Duplicate File Finder: This tool scans your disk for duplicate files and lets you delete them to free up space. - -- Disk Cleanup: This tool analyzes your disk space usage and helps you remove unnecessary files and folders. - -- Space Visualization: This tool shows you a graphical representation of your disk space usage and lets you explore your files and folders. - -- Bootable Data Recovery Drive: This tool lets you create a bootable USB drive with Disk Drill on it, so you can recover data from a Mac that won't boot up. - -- Disk Health Monitoring: This tool monitors your disk's SMART status and alerts you if there are any signs of disk failure or degradation. - - - -Disk Drill 3.8.947 Pro has a simple and intuitive user interface that makes it easy to use for anyone. You can scan and recover your data in just a few clicks, and preview the files before restoring them. You can also filter the results by file type, size, date, or name. - - - -Disk Drill 3.8.947 Pro has a high success rate of data recovery and can recover up to 500 MB of data for free in the trial version. However, if you want to recover more than that, you will need to purchase a license for $89. - - - -## Alternatives to Disk Drill 3.8.947 Pro - - - -If you are looking for other options to recover your data on Mac, there are some alternatives to Disk Drill 3.8.947 Pro that you can try: - - - -- Data Rescue 6: This 1b8d091108 - - - - - - - - - diff --git a/spaces/hussain-shk/IndiSent/subword-nmt/learn_bpe.py b/spaces/hussain-shk/IndiSent/subword-nmt/learn_bpe.py deleted file mode 100644 index 7b01f046fa6b3fd8ba64b7658c23b6f80a4e6ba3..0000000000000000000000000000000000000000 --- a/spaces/hussain-shk/IndiSent/subword-nmt/learn_bpe.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Author: Rico Sennrich - -"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text. -Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary -of a text to a configurable number of symbols, with only a small increase in the number of tokens. - -Reference: -Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units. -Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany. -""" - -from __future__ import unicode_literals - -import os -import sys -import inspect -import codecs -import re -import copy -import argparse -import warnings -import tempfile -from multiprocessing import Pool, cpu_count -from collections import defaultdict, Counter - -try: - from tqdm import tqdm -except ImportError: - def tqdm(iterator, *args, **kwargs): - return iterator - -# hack for python2/3 compatibility -from io import open -argparse.open = open - -def create_parser(subparsers=None): - - if subparsers: - parser = subparsers.add_parser('learn-bpe', - formatter_class=argparse.RawDescriptionHelpFormatter, - description="learn BPE-based word segmentation") - else: - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description="learn BPE-based word segmentation") - - parser.add_argument( - '--input', '-i', type=argparse.FileType('r'), default=sys.stdin, - metavar='PATH', - help="Input text (default: standard input).") - - parser.add_argument( - '--output', '-o', type=argparse.FileType('w'), default=sys.stdout, - metavar='PATH', - help="Output file for BPE codes (default: standard output)") - parser.add_argument( - '--symbols', '-s', type=int, default=10000, - help="Create this many new symbols (each representing a character n-gram) (default: %(default)s)") - parser.add_argument( - '--min-frequency', type=int, default=2, metavar='FREQ', - help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s)') - parser.add_argument('--dict-input', action="store_true", - help="If set, input file is interpreted as a dictionary where each line contains a word-count pair") - parser.add_argument( - '--total-symbols', '-t', action="store_true", - help="subtract number of characters from the symbols to be generated (so that '--symbols' becomes an estimate for the total number of symbols needed to encode text).") - parser.add_argument( - '--num-workers', type=int, default=1, - help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)") - parser.add_argument( - '--verbose', '-v', action="store_true", - help="verbose mode.") - - return parser - -def get_vocabulary(fobj, is_dict=False, num_workers=1): - """Read text and return dictionary that encodes vocabulary - """ - vocab = Counter() - if is_dict: - for i, line in enumerate(fobj): - try: - word, count = line.strip('\r\n ').split(' ') - except: - print('Failed reading vocabulary file at line {0}: {1}'.format(i, line)) - sys.exit(1) - vocab[word] += int(count) - elif num_workers == 1 or fobj.name == '': - if num_workers > 1: - warnings.warn("In parallel mode, the input cannot be STDIN. Using 1 processor instead.") - for i, line in enumerate(fobj): - for word in line.strip('\r\n ').split(' '): - if word: - vocab[word] += 1 - elif num_workers > 1: - - if sys.version_info < (3, 0): - print("Parallel mode is only supported in Python3.") - sys.exit(1) - - with open(fobj.name, encoding="utf8") as f: - size = os.fstat(f.fileno()).st_size - chunk_size = int(size / num_workers) - offsets = [0 for _ in range(num_workers + 1)] - for i in range(1, num_workers): - f.seek(chunk_size * i) - pos = f.tell() - while True: - try: - line = f.readline() - break - except UnicodeDecodeError: - pos -= 1 - f.seek(pos) - offsets[i] = f.tell() - assert 0 <= offsets[i] < 1e20, "Bad new line separator, e.g. '\\r'" - - vocab_files = [] - pool = Pool(processes=num_workers) - for i in range(num_workers): - tmp = tempfile.NamedTemporaryFile(delete=False) - tmp.close() - vocab_files.append(tmp) - pool.apply_async(_get_vocabulary, (fobj.name, tmp.name, offsets[i], offsets[i + 1])) - pool.close() - pool.join() - import pickle - for i in range(num_workers): - with open(vocab_files[i].name, 'rb') as f: - vocab += pickle.load(f) - os.remove(vocab_files[i].name) - else: - raise ValueError('`num_workers` is expected to be a positive number, but got {}.'.format(num_workers)) - return vocab - -def _get_vocabulary(infile, outfile, begin, end): - import pickle - vocab = Counter() - with open(infile, encoding="utf8") as f: - f.seek(begin) - line = f.readline() - while line: - pos = f.tell() - assert 0 <= pos < 1e20, "Bad new line separator, e.g. '\\r'" - if end > 0 and pos > end: - break - for word in line.strip('\r\n ').split(' '): - if word: - vocab[word] += 1 - line = f.readline() - with open(outfile, 'wb') as f: - pickle.dump(vocab, f) - -def update_pair_statistics(pair, changed, stats, indices): - """Minimally update the indices and frequency of symbol pairs - - if we merge a pair of symbols, only pairs that overlap with occurrences - of this pair are affected, and need to be updated. - """ - stats[pair] = 0 - indices[pair] = defaultdict(int) - first, second = pair - new_pair = first+second - for j, word, old_word, freq in changed: - - # find all instances of pair, and update frequency/indices around it - i = 0 - while True: - # find first symbol - try: - i = old_word.index(first, i) - except ValueError: - break - # if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2]) - if i < len(old_word)-1 and old_word[i+1] == second: - # assuming a symbol sequence "A B C", if "B C" is merged, reduce the frequency of "A B" - if i: - prev = old_word[i-1:i+1] - stats[prev] -= freq - indices[prev][j] -= 1 - if i < len(old_word)-2: - # assuming a symbol sequence "A B C B", if "B C" is merged, reduce the frequency of "C B". - # however, skip this if the sequence is A B C B C, because the frequency of "C B" will be reduced by the previous code block - if old_word[i+2] != first or i >= len(old_word)-3 or old_word[i+3] != second: - nex = old_word[i+1:i+3] - stats[nex] -= freq - indices[nex][j] -= 1 - i += 2 - else: - i += 1 - - i = 0 - while True: - try: - # find new pair - i = word.index(new_pair, i) - except ValueError: - break - # assuming a symbol sequence "A BC D", if "B C" is merged, increase the frequency of "A BC" - if i: - prev = word[i-1:i+1] - stats[prev] += freq - indices[prev][j] += 1 - # assuming a symbol sequence "A BC B", if "B C" is merged, increase the frequency of "BC B" - # however, if the sequence is A BC BC, skip this step because the count of "BC BC" will be incremented by the previous code block - if i < len(word)-1 and word[i+1] != new_pair: - nex = word[i:i+2] - stats[nex] += freq - indices[nex][j] += 1 - i += 1 - - -def get_pair_statistics(vocab): - """Count frequency of all symbol pairs, and create index""" - - # data structure of pair frequencies - stats = defaultdict(int) - - #index from pairs to words - indices = defaultdict(lambda: defaultdict(int)) - - for i, (word, freq) in enumerate(vocab): - prev_char = word[0] - for char in word[1:]: - stats[prev_char, char] += freq - indices[prev_char, char][i] += 1 - prev_char = char - - return stats, indices - - -def replace_pair(pair, vocab, indices): - """Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'""" - first, second = pair - pair_str = ''.join(pair) - pair_str = pair_str.replace('\\','\\\\') - changes = [] - pattern = re.compile(r'(?'); - # version numbering allows bckward compatibility - outfile.write('#version: 0.2\n') - - vocab = get_vocabulary(infile, is_dict, num_workers) - vocab = dict([(tuple(x[:-1])+(x[-1]+'',) ,y) for (x,y) in vocab.items()]) - sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True) - - stats, indices = get_pair_statistics(sorted_vocab) - big_stats = copy.deepcopy(stats) - - if total_symbols: - uniq_char_internal = set() - uniq_char_final = set() - for word in vocab: - for char in word[:-1]: - uniq_char_internal.add(char) - uniq_char_final.add(word[-1]) - sys.stderr.write('Number of word-internal characters: {0}\n'.format(len(uniq_char_internal))) - sys.stderr.write('Number of word-final characters: {0}\n'.format(len(uniq_char_final))) - sys.stderr.write('Reducing number of merge operations by {0}\n'.format(len(uniq_char_internal) + len(uniq_char_final))) - num_symbols -= len(uniq_char_internal) + len(uniq_char_final) - - # threshold is inspired by Zipfian assumption, but should only affect speed - threshold = max(stats.values()) / 10 - for i in tqdm(range(num_symbols)): - if stats: - most_frequent = max(stats, key=lambda x: (stats[x], x)) - - # we probably missed the best pair because of pruning; go back to full statistics - if not stats or (i and stats[most_frequent] < threshold): - prune_stats(stats, big_stats, threshold) - stats = copy.deepcopy(big_stats) - most_frequent = max(stats, key=lambda x: (stats[x], x)) - # threshold is inspired by Zipfian assumption, but should only affect speed - threshold = stats[most_frequent] * i/(i+10000.0) - prune_stats(stats, big_stats, threshold) - - if stats[most_frequent] < min_frequency: - sys.stderr.write('no pair has frequency >= {0}. Stopping\n'.format(min_frequency)) - break - - if verbose: - sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(i, most_frequent[0], most_frequent[1], stats[most_frequent])) - outfile.write('{0} {1}\n'.format(*most_frequent)) - changes = replace_pair(most_frequent, sorted_vocab, indices) - update_pair_statistics(most_frequent, changes, stats, indices) - stats[most_frequent] = 0 - if not i % 100: - prune_stats(stats, big_stats, threshold) - - -if __name__ == '__main__': - - currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) - newdir = os.path.join(currentdir, 'subword_nmt') - if os.path.isdir(newdir): - warnings.simplefilter('default') - warnings.warn( - "this script's location has moved to {0}. This symbolic link will be removed in a future version. Please point to the new location, or install the package and use the command 'subword-nmt'".format(newdir), - DeprecationWarning - ) - - # python 2/3 compatibility - if sys.version_info < (3, 0): - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin) - else: - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer) - - parser = create_parser() - args = parser.parse_args() - - if args.num_workers <= 0: - args.num_workers = cpu_count() - - if sys.version_info < (3, 0) and args.num_workers > 1: - args.num_workers = 1 - warnings.warn("Parallel mode is only supported in Python3. Using 1 processor instead.") - - # read/write files as UTF-8 - if args.input.name != '': - args.input = codecs.open(args.input.name, encoding='utf-8') - if args.output.name != '': - args.output = codecs.open(args.output.name, 'w', encoding='utf-8') - - learn_bpe(args.input, args.output, args.symbols, args.min_frequency, args.verbose, is_dict=args.dict_input, total_symbols=args.total_symbols, num_workers=args.num_workers) diff --git a/spaces/hysts/ViTPose_video/README.md b/spaces/hysts/ViTPose_video/README.md deleted file mode 100644 index 09cc310f9500f3b37483376f74051e346b17bae4..0000000000000000000000000000000000000000 --- a/spaces/hysts/ViTPose_video/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: ViTPose Video -emoji: 🦀 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -suggested_hardware: t4-small ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -https://arxiv.org/abs/2204.12484 diff --git a/spaces/hysts/list-of-demos/app.py b/spaces/hysts/list-of-demos/app.py deleted file mode 100644 index 21dd9f453e3d6ab7571b162a1df3a4a667123ce9..0000000000000000000000000000000000000000 --- a/spaces/hysts/list-of-demos/app.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import gradio as gr - -from constants import ( - HARDWARE_CHOICES, - OWNER_CHOICES, - SDK_CHOICES, - SLEEP_TIME_CHOICES, - STATUS_CHOICES, - VISIBILITY_CHOICES, -) -from demo_list import DemoList, get_df_from_yaml - -demo_list = DemoList(get_df_from_yaml("list.yaml")) - - -def update_status_checkboxes(choices: list[str]) -> list[str]: - if "(ALL)" in choices: - return STATUS_CHOICES - elif "(NONE)" in choices: - return [] - else: - return choices - - -def update_hardware_checkboxes(choices: list[str]) -> list[str]: - if "(ALL)" in choices: - return HARDWARE_CHOICES - elif "(NONE)" in choices: - return [] - else: - return choices - - -def update_sdk_checkboxes(choices: list[str]) -> list[str]: - if "(ALL)" in choices: - return SDK_CHOICES - elif "(NONE)" in choices: - return [] - else: - return choices - - -def update_sleep_time_checkboxes(choices: list[str]) -> list[str]: - if "(ALL)" in choices: - return SLEEP_TIME_CHOICES - elif "(NONE)" in choices: - return [] - else: - return choices - - -with gr.Blocks(css="style.css") as demo: - with gr.Accordion(label="Filter", open=True): - status = gr.CheckboxGroup( - label="Status", - choices=["(ALL)", "(NONE)"] + STATUS_CHOICES, - value=STATUS_CHOICES, - type="value", - ) - hardware = gr.CheckboxGroup( - label="Hardware", - choices=["(ALL)", "(NONE)"] + HARDWARE_CHOICES, - value=HARDWARE_CHOICES, - type="value", - ) - sleep_time = gr.CheckboxGroup( - label="Sleep time", - choices=["(ALL)", "(NONE)"] + SLEEP_TIME_CHOICES, - value=SLEEP_TIME_CHOICES, - type="value", - ) - multiple_replicas = gr.Checkbox(label="Multiple replicas", value=False) - sdk = gr.CheckboxGroup( - label="SDK", - choices=["(ALL)", "(NONE)"] + SDK_CHOICES, - value=SDK_CHOICES, - type="value", - ) - visibility = gr.CheckboxGroup( - label="Visibility", - choices=VISIBILITY_CHOICES, - value=VISIBILITY_CHOICES, - type="value", - ) - owner = gr.CheckboxGroup( - label="Owner", - choices=OWNER_CHOICES, - value=OWNER_CHOICES, - type="value", - ) - apply_button = gr.Button("Apply") - df = gr.Dataframe( - value=demo_list.df_prettified, - datatype=demo_list.column_datatype, - type="pandas", - interactive=False, - row_count=(0, "dynamic"), - height=1000, - elem_id="table", - ) - - status.input( - fn=update_status_checkboxes, - inputs=status, - outputs=status, - concurrency_limit=None, - show_progress=False, - api_name=False, - ) - hardware.input( - fn=update_hardware_checkboxes, - inputs=hardware, - outputs=hardware, - concurrency_limit=None, - show_progress=False, - api_name=False, - ) - sdk.input( - fn=update_sdk_checkboxes, - inputs=sdk, - outputs=sdk, - concurrency_limit=None, - show_progress=False, - api_name=False, - ) - sleep_time.input( - fn=update_sleep_time_checkboxes, - inputs=sleep_time, - outputs=sleep_time, - concurrency_limit=None, - show_progress=False, - api_name=False, - ) - apply_button.click( - fn=demo_list.filter, - inputs=[ - status, - hardware, - sleep_time, - multiple_replicas, - sdk, - visibility, - owner, - ], - outputs=df, - api_name=False, - ) - -if __name__ == "__main__": - demo.queue(api_open=False).launch() diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_conflict_r50_pfc03_filter04.py b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_conflict_r50_pfc03_filter04.py deleted file mode 100644 index a766f4154bb801b57d0f9519748b63941e349330..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_conflict_r50_pfc03_filter04.py +++ /dev/null @@ -1,28 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.margin_list = (1.0, 0.0, 0.4) -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 0.3 -config.interclass_filtering_threshold = 0.4 -config.fp16 = True -config.weight_decay = 5e-4 -config.batch_size = 128 -config.optimizer = "sgd" -config.lr = 0.1 -config.verbose = 2000 -config.dali = False - -config.rec = "/train_tmp/WebFace12M_Conflict" -config.num_classes = 1017970 -config.num_image = 12720066 -config.num_epoch = 20 -config.warmup_epoch = config.num_epoch // 10 -config.val_targets = [] diff --git a/spaces/imperialwool/llama-cpp-api/gradio_app.py b/spaces/imperialwool/llama-cpp-api/gradio_app.py deleted file mode 100644 index 498bb004b69560579a083981fc9658f2e55ffe86..0000000000000000000000000000000000000000 --- a/spaces/imperialwool/llama-cpp-api/gradio_app.py +++ /dev/null @@ -1,120 +0,0 @@ -# Importing libraries -from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration -from llama_cpp import Llama -import gradio as gr -import psutil - -# Initing things -print("! DOWNLOADING TOKENIZER AND SETTING ALL UP !") -translator_tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator - "facebook/m2m100_418M", cache_dir="translator/" -) -print("! DOWNLOADING MODEL AND SETTING ALL UP !") -translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model - "facebook/m2m100_418M", cache_dir="translator/" -) -print("! SETTING MODEL IN EVALUATION MODE !") -translator_model.eval() -print("! INITING LLAMA MODEL !") -llm = Llama(model_path="./model.bin") # LLaMa model -llama_model_name = "TheBloke/dolphin-2.2.1-AshhLimaRP-Mistral-7B-GGUF" -print("! INITING DONE !") - -# Preparing things to work -translator_tokenizer.src_lang = "en" -title = "llama.cpp API" -desc = '''

    Hello, world!

    -This is showcase how to make own server with Llama2 model.
    -I'm using here 7b model just for example. Also here's only CPU power.
    -But you can use GPU power as well!

    -

    How to GPU?

    -Change `CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS` in Dockerfile on `CMAKE_ARGS="-DLLAMA_CUBLAS=on"`. Also you can try `DLLAMA_CLBLAST` or `DLLAMA_METAL`.

    -

    How to test it on own machine?

    -You can install Docker, build image and run it. I made `run-docker.sh` for ya. To stop container run `docker ps`, find name of container and run `docker stop _dockerContainerName_`
    -Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.
    -
    ''' + f"Memory used: {psutil.virtual_memory()[2]}
    " + ''' -Powered by llama-cpp-python and Gradio.

    ''' - -''' - # Defining languages for translator (i just chose popular on my opinion languages!!!) - ru - Russian - uk - Ukranian - zh - Chinese - de - German - fr - French - hi - Hindi - it - Italian - ja - Japanese - es - Spanish - ar - Arabic -''' -languages = ["ru", "uk", "zh", "de", "fr", "hi", "it", "ja", "es", "ar"] - -# Loading prompt -with open('system.prompt', 'r', encoding='utf-8') as f: - prompt = f.read() - -def generate_answer(request: str, max_tokens: int = 256, language: str = "en", custom_prompt: str = None): - logs = f"Request: {request}\nMax tokens: {max_tokens}\nLanguage: {language}\nCustom prompt: {custom_prompt}\n" - try: - maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64 - if isinstance(custom_prompt, str) and len(custom_prompt.strip()) > 1 and custom_prompt.strip() not in ['', None, ' ']: - userPrompt = custom_prompt.replace("{prompt}", request) - else: - userPrompt = prompt.replace("{prompt}", request) - logs += f"\nFinal prompt: {userPrompt}\n" - except: - return "Not enough data! Check that you passed all needed data.", logs - - try: - # this shitty fix will be until i willnt figure out why sometimes there is empty output - counter = 1 - while counter <= 10: - logs += f"Attempt {counter} to generate answer...\n" - output = llm(userPrompt, max_tokens=maxTokens, stop=["User:"], echo=False) - text = output["choices"][0]["text"] - if len(text.strip()) > 1 and text.strip() not in ['', None, ' ']: - break - counter += 1 - logs += f"Final attempt: {counter}\n" - if len(text.strip()) > 1 and text.strip() not in ['', None, ' ']: - text = "Sorry, but something went wrong while generating answer. Try again or fix code. If you are maintainer of this space, look into logs." - - if language in languages and language != "en": - logs += f"\nTranslating from en to {language}" - encoded_input = translator_tokenizer(text, return_tensors="pt") - generated_tokens = translator_model.generate( - **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language) - ) - translated_text = translator_tokenizer.batch_decode( - generated_tokens, skip_special_tokens=True - )[0] - logs += f"\nTranslated: {translated_text}\nOriginal: {text}" - return translated_text, logs - logs += f"\nOriginal: {text}" - return text, logs - except Exception as e: - print(e) - return "Oops! Internal server error. Check the logs of space/instance.", logs - print("\n\n\n") - -print("! LOAD GRADIO INTERFACE !") -demo = gr.Interface( - fn=generate_answer, - inputs=[ - gr.components.Textbox(label="Input"), - gr.components.Number(value=256), - gr.components.Dropdown(label="Target Language", value="en", choices=["en"]+languages), - gr.components.Textbox(label="Custom system prompt"), - ], - outputs=[ - gr.components.Textbox(label="Output"), - gr.components.Textbox(label="Logs") - ], - title=title, - description=desc, - allow_flagging='never' -) -demo.queue() -print("! LAUNCHING GRADIO !") -demo.launch(server_name="0.0.0.0") \ No newline at end of file diff --git a/spaces/imseldrith/DeepFakeAI/DeepFakeAI/uis/components/face_selector.py b/spaces/imseldrith/DeepFakeAI/DeepFakeAI/uis/components/face_selector.py deleted file mode 100644 index b6f4c66e07c46ce0f961acbd99289e421cd4e619..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/DeepFakeAI/DeepFakeAI/uis/components/face_selector.py +++ /dev/null @@ -1,133 +0,0 @@ -from typing import List, Optional, Tuple, Any, Dict -from time import sleep - -import cv2 -import gradio - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.capturer import get_video_frame -from DeepFakeAI.face_analyser import get_many_faces -from DeepFakeAI.face_reference import clear_face_reference -from DeepFakeAI.typing import Frame, FaceRecognition -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import ComponentName, Update -from DeepFakeAI.utilities import is_image, is_video - -FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None -REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None -REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global FACE_RECOGNITION_DROPDOWN - global REFERENCE_FACE_POSITION_GALLERY - global REFERENCE_FACE_DISTANCE_SLIDER - - with gradio.Box(): - reference_face_gallery_args: Dict[str, Any] = { - 'label': wording.get('reference_face_gallery_label'), - 'height': 120, - 'object_fit': 'cover', - 'columns': 10, - 'allow_preview': False, - 'visible': 'reference' in DeepFakeAI.globals.face_recognition - } - if is_image(DeepFakeAI.globals.target_path): - reference_frame = cv2.imread(DeepFakeAI.globals.target_path) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - FACE_RECOGNITION_DROPDOWN = gradio.Dropdown( - label = wording.get('face_recognition_dropdown_label'), - choices = DeepFakeAI.choices.face_recognition, - value = DeepFakeAI.globals.face_recognition - ) - REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) - REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( - label = wording.get('reference_face_distance_slider_label'), - value = DeepFakeAI.globals.reference_face_distance, - maximum = 3, - step = 0.05, - visible = 'reference' in DeepFakeAI.globals.face_recognition - ) - ui.register_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN) - ui.register_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) - ui.register_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) - - -def listen() -> None: - FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) - REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position) - REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) - update_component_names : List[ComponentName] =\ - [ - 'target_file', - 'preview_frame_slider' - ] - for component_name in update_component_names: - component = ui.get_component(component_name) - if component: - component.change(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) - select_component_names : List[ComponentName] =\ - [ - 'face_analyser_direction_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown' - ] - for component_name in select_component_names: - component = ui.get_component(component_name) - if component: - component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) - - -def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]: - if face_recognition == 'reference': - DeepFakeAI.globals.face_recognition = face_recognition - return gradio.update(visible = True), gradio.update(visible = True) - if face_recognition == 'many': - DeepFakeAI.globals.face_recognition = face_recognition - return gradio.update(visible = False), gradio.update(visible = False) - - -def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update: - clear_face_reference() - return update_face_reference_position(event.index) - - -def update_face_reference_position(reference_face_position : int = 0) -> Update: - sleep(0.2) - gallery_frames = [] - DeepFakeAI.globals.reference_face_position = reference_face_position - if is_image(DeepFakeAI.globals.target_path): - reference_frame = cv2.imread(DeepFakeAI.globals.target_path) - gallery_frames = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - gallery_frames = extract_gallery_frames(reference_frame) - if gallery_frames: - return gradio.update(value = gallery_frames) - return gradio.update(value = None) - - -def update_reference_face_distance(reference_face_distance : float) -> Update: - DeepFakeAI.globals.reference_face_distance = reference_face_distance - return gradio.update(value = reference_face_distance) - - -def extract_gallery_frames(reference_frame : Frame) -> List[Frame]: - crop_frames = [] - faces = get_many_faces(reference_frame) - for face in faces: - start_x, start_y, end_x, end_y = map(int, face['bbox']) - padding_x = int((end_x - start_x) * 0.25) - padding_y = int((end_y - start_y) * 0.25) - start_x = max(0, start_x - padding_x) - start_y = max(0, start_y - padding_y) - end_x = max(0, end_x + padding_x) - end_y = max(0, end_y + padding_y) - crop_frame = reference_frame[start_y:end_y, start_x:end_x] - crop_frames.append(ui.normalize_frame(crop_frame)) - return crop_frames diff --git a/spaces/inamXcontru/PoeticTTS/Bulanglang na Upo with Chicken A Simple and Savory Dish from Batangas.md b/spaces/inamXcontru/PoeticTTS/Bulanglang na Upo with Chicken A Simple and Savory Dish from Batangas.md deleted file mode 100644 index 85b22ff51018a66cfd335c8652c39204c5ff97ed..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Bulanglang na Upo with Chicken A Simple and Savory Dish from Batangas.md +++ /dev/null @@ -1,13 +0,0 @@ -
    -

    Bulanglang is a healthy dish consisting of different types of vegetables. There are different versions of Bulanglang; this particular recipe is the version from the province of Batangas.Making Bulanglang is quick and easy; all you need to do is boil water or rice washing (water used to clean rice) and put-in the vegetables according to their cooking time. Thick and hard vegetables such as calabaza squash (kalabasa) and green papaya should be boiled first, while soft green vegetables like malunggay should be added last.

    -

    bulanglang na upo recipe chicken


    Download ✓✓✓ https://gohhs.com/2uz3Mv



    -

    If you are just starting to learn how to cook, I suggest that you read this article completely and review the recipe below. This basic recipe that I am sharing might be a good exercise for you to develop your cooking skills. You can also watch the video below that I recorded so that you can be guided on the step-by-step procedure that I did in order to accomplish this dish. Please do not hesitate to ask questions because that is the reason why I am here. Send me a question by commenting below.

    -


    Sarap!!! Salamat sa inyong website, mas ginanahang akong magluto mula ng mapanuod ang inyong website. Na try na din namin ang kansi recipe, pinoy carbonara, tinola and menudo. Lahat panalo!???????????????????????? Looking forward to try all your recipes. ????????????

    -

    A reader actually commented on my post about the market and asked suggested trying a recipe with kamungay, but when I asked for some help finding it, the guy at the grocery store told me that kamungay is the same as malunggay. Is that right? Anyways, thank you Melissa for inspiring me to go find something new even if I ended up with something that starts with a different letter.

    -

    Speaking of curry like I do allthetime, this recipe reminds me so much of those green curry lentils I made a while back with the spinach, coconut milk, and bulgur. Except instead of bulgur, I used the last of my brown rice blend for a little bed to serve it on. We just stirred it all up and kept the leftovers that way, too.

    -

    -

    This is one of my favorite Filipino dishes! I like the twists you added and luckily my mom grows malunggay in her FL backyard! I just recently found your blog, bought your e-book and so inspired to get my own food blog going. I love how you incorporate these Filipino dishes and make them look so easy to make! Will pin and try this recipe out for sure!

    -

    This looks really delicious. I recently made a mung bean soup with celery and apples and it was super tasty. I agree that those malunggay leaves are super cute! I remember them from a previous lentil/curry recipe I think?

    -

    Bulanglang is an authentic vegetable recipe in the Philippines. Sometimes, bulanglang dishes are being mistaken as Pinakbet. Bulanglang is much more soupier and includes dark leafy vegetables making it as a healthy and diet friendly recipe. Also, bulanglang is a very flexible dish, meaning you can add or vary the included vegetables on the recipe. Fish is sometimes included on the dish.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/innat/Global.Wheat.Detection.MaskRCNN/config.py b/spaces/innat/Global.Wheat.Detection.MaskRCNN/config.py deleted file mode 100644 index 99e59220828f6e93d19b76d33c26a6f4e41c2ce6..0000000000000000000000000000000000000000 --- a/spaces/innat/Global.Wheat.Detection.MaskRCNN/config.py +++ /dev/null @@ -1,34 +0,0 @@ -from mrcnn.config import Config - - -class WheatDetectorConfig(Config): - # Give the configuration a recognizable name - NAME = "wheat" - GPU_COUNT = 1 - IMAGES_PER_GPU = 2 - BACKBONE = "resnet101" - NUM_CLASSES = 2 - IMAGE_RESIZE_MODE = "square" - IMAGE_MIN_DIM = 1024 - IMAGE_MAX_DIM = 1024 - STEPS_PER_EPOCH = 120 - BACKBONE_STRIDES = [4, 8, 16, 32, 64] - RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256) - LEARNING_RATE = 0.005 - WEIGHT_DECAY = 0.0005 - TRAIN_ROIS_PER_IMAGE = 350 - DETECTION_MIN_CONFIDENCE = 0.60 - VALIDATION_STEPS = 60 - MAX_GT_INSTANCES = 500 - LOSS_WEIGHTS = { - "rpn_class_loss": 1.0, - "rpn_bbox_loss": 1.0, - "mrcnn_class_loss": 1.0, - "mrcnn_bbox_loss": 1.0, - "mrcnn_mask_loss": 1.0, - } - - -class WheatInferenceConfig(WheatDetectorConfig): - GPU_COUNT = 1 - IMAGES_PER_GPU = 1 diff --git a/spaces/innat/Global.Wheat.Detection.MaskRCNN/mrcnn/model.py b/spaces/innat/Global.Wheat.Detection.MaskRCNN/mrcnn/model.py deleted file mode 100644 index 17c7f8b5acc8345fc27bf7f29d25d44533b02a43..0000000000000000000000000000000000000000 --- a/spaces/innat/Global.Wheat.Detection.MaskRCNN/mrcnn/model.py +++ /dev/null @@ -1,3242 +0,0 @@ -""" -Mask R-CNN -The main Mask R-CNN model implementation. - -Copyright (c) 2017 Matterport, Inc. -Licensed under the MIT License (see LICENSE for details) -Written by Waleed Abdulla -""" - -import datetime -import logging -import math -import multiprocessing -import os -import random -import re -from collections import OrderedDict - -# Requires TensorFlow 1.3+ and Keras 2.0.8+. -from distutils.version import LooseVersion - -import keras -import keras.backend as K -import keras.engine as KE -import keras.layers as KL -import keras.models as KM -import numpy as np -import tensorflow as tf - -from mrcnn import utils - -assert LooseVersion(tf.__version__) >= LooseVersion("1.3") -assert LooseVersion(keras.__version__) >= LooseVersion("2.0.8") - - -############################################################ -# Utility Functions -############################################################ - - -def log(text, array=None): - """Prints a text message. And, optionally, if a Numpy array is provided it - prints it's shape, min, and max values. - """ - if array is not None: - text = text.ljust(25) - text += "shape: {:20} ".format(str(array.shape)) - if array.size: - text += "min: {:10.5f} max: {:10.5f}".format(array.min(), array.max()) - else: - text += "min: {:10} max: {:10}".format("", "") - text += " {}".format(array.dtype) - print(text) - - -class BatchNorm(KL.BatchNormalization): - """Extends the Keras BatchNormalization class to allow a central place - to make changes if needed. - - Batch normalization has a negative effect on training if batches are small - so this layer is often frozen (via setting in Config class) and functions - as linear layer. - """ - - def call(self, inputs, training=None): - """ - Note about training values: - None: Train BN layers. This is the normal mode - False: Freeze BN layers. Good when batch size is small - True: (don't use). Set layer in training mode even when making inferences - """ - return super(self.__class__, self).call(inputs, training=training) - - -def compute_backbone_shapes(config, image_shape): - """Computes the width and height of each stage of the backbone network. - - Returns: - [N, (height, width)]. Where N is the number of stages - """ - if callable(config.BACKBONE): - return config.COMPUTE_BACKBONE_SHAPE(image_shape) - - # Currently supports ResNet only - assert config.BACKBONE in ["resnet50", "resnet101"] - return np.array( - [ - [ - int(math.ceil(image_shape[0] / stride)), - int(math.ceil(image_shape[1] / stride)), - ] - for stride in config.BACKBONE_STRIDES - ] - ) - - -############################################################ -# Resnet Graph -############################################################ - -# Code adopted from: -# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py - - -def identity_block( - input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True -): - """The identity_block is the block that has no conv layer at shortcut - # Arguments - input_tensor: input tensor - kernel_size: default 3, the kernel size of middle conv layer at main path - filters: list of integers, the nb_filters of 3 conv layer at main path - stage: integer, current stage label, used for generating layer names - block: 'a','b'..., current block label, used for generating layer names - use_bias: Boolean. To use or not use a bias in conv layers. - train_bn: Boolean. Train or freeze Batch Norm layers - """ - nb_filter1, nb_filter2, nb_filter3 = filters - conv_name_base = "res" + str(stage) + block + "_branch" - bn_name_base = "bn" + str(stage) + block + "_branch" - - x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + "2a", use_bias=use_bias)( - input_tensor - ) - x = BatchNorm(name=bn_name_base + "2a")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.Conv2D( - nb_filter2, - (kernel_size, kernel_size), - padding="same", - name=conv_name_base + "2b", - use_bias=use_bias, - )(x) - x = BatchNorm(name=bn_name_base + "2b")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + "2c", use_bias=use_bias)(x) - x = BatchNorm(name=bn_name_base + "2c")(x, training=train_bn) - - x = KL.Add()([x, input_tensor]) - x = KL.Activation("relu", name="res" + str(stage) + block + "_out")(x) - return x - - -def conv_block( - input_tensor, - kernel_size, - filters, - stage, - block, - strides=(2, 2), - use_bias=True, - train_bn=True, -): - """conv_block is the block that has a conv layer at shortcut - # Arguments - input_tensor: input tensor - kernel_size: default 3, the kernel size of middle conv layer at main path - filters: list of integers, the nb_filters of 3 conv layer at main path - stage: integer, current stage label, used for generating layer names - block: 'a','b'..., current block label, used for generating layer names - use_bias: Boolean. To use or not use a bias in conv layers. - train_bn: Boolean. Train or freeze Batch Norm layers - Note that from stage 3, the first conv layer at main path is with subsample=(2,2) - And the shortcut should have subsample=(2,2) as well - """ - nb_filter1, nb_filter2, nb_filter3 = filters - conv_name_base = "res" + str(stage) + block + "_branch" - bn_name_base = "bn" + str(stage) + block + "_branch" - - x = KL.Conv2D( - nb_filter1, - (1, 1), - strides=strides, - name=conv_name_base + "2a", - use_bias=use_bias, - )(input_tensor) - x = BatchNorm(name=bn_name_base + "2a")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.Conv2D( - nb_filter2, - (kernel_size, kernel_size), - padding="same", - name=conv_name_base + "2b", - use_bias=use_bias, - )(x) - x = BatchNorm(name=bn_name_base + "2b")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + "2c", use_bias=use_bias)(x) - x = BatchNorm(name=bn_name_base + "2c")(x, training=train_bn) - - shortcut = KL.Conv2D( - nb_filter3, - (1, 1), - strides=strides, - name=conv_name_base + "1", - use_bias=use_bias, - )(input_tensor) - shortcut = BatchNorm(name=bn_name_base + "1")(shortcut, training=train_bn) - - x = KL.Add()([x, shortcut]) - x = KL.Activation("relu", name="res" + str(stage) + block + "_out")(x) - return x - - -def resnet_graph(input_image, architecture, stage5=False, train_bn=True): - """Build a ResNet graph. - architecture: Can be resnet50 or resnet101 - stage5: Boolean. If False, stage5 of the network is not created - train_bn: Boolean. Train or freeze Batch Norm layers - """ - assert architecture in ["resnet50", "resnet101"] - # Stage 1 - x = KL.ZeroPadding2D((3, 3))(input_image) - x = KL.Conv2D(64, (7, 7), strides=(2, 2), name="conv1", use_bias=True)(x) - x = BatchNorm(name="bn_conv1")(x, training=train_bn) - x = KL.Activation("relu")(x) - C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) - # Stage 2 - x = conv_block( - x, 3, [64, 64, 256], stage=2, block="a", strides=(1, 1), train_bn=train_bn - ) - x = identity_block(x, 3, [64, 64, 256], stage=2, block="b", train_bn=train_bn) - C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block="c", train_bn=train_bn) - # Stage 3 - x = conv_block(x, 3, [128, 128, 512], stage=3, block="a", train_bn=train_bn) - x = identity_block(x, 3, [128, 128, 512], stage=3, block="b", train_bn=train_bn) - x = identity_block(x, 3, [128, 128, 512], stage=3, block="c", train_bn=train_bn) - C3 = x = identity_block( - x, 3, [128, 128, 512], stage=3, block="d", train_bn=train_bn - ) - # Stage 4 - x = conv_block(x, 3, [256, 256, 1024], stage=4, block="a", train_bn=train_bn) - block_count = {"resnet50": 5, "resnet101": 22}[architecture] - for i in range(block_count): - x = identity_block( - x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn - ) - C4 = x - # Stage 5 - if stage5: - x = conv_block(x, 3, [512, 512, 2048], stage=5, block="a", train_bn=train_bn) - x = identity_block( - x, 3, [512, 512, 2048], stage=5, block="b", train_bn=train_bn - ) - C5 = x = identity_block( - x, 3, [512, 512, 2048], stage=5, block="c", train_bn=train_bn - ) - else: - C5 = None - return [C1, C2, C3, C4, C5] - - -############################################################ -# Proposal Layer -############################################################ - - -def apply_box_deltas_graph(boxes, deltas): - """Applies the given deltas to the given boxes. - boxes: [N, (y1, x1, y2, x2)] boxes to update - deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply - """ - # Convert to y, x, h, w - height = boxes[:, 2] - boxes[:, 0] - width = boxes[:, 3] - boxes[:, 1] - center_y = boxes[:, 0] + 0.5 * height - center_x = boxes[:, 1] + 0.5 * width - # Apply deltas - center_y += deltas[:, 0] * height - center_x += deltas[:, 1] * width - height *= tf.exp(deltas[:, 2]) - width *= tf.exp(deltas[:, 3]) - # Convert back to y1, x1, y2, x2 - y1 = center_y - 0.5 * height - x1 = center_x - 0.5 * width - y2 = y1 + height - x2 = x1 + width - result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out") - return result - - -def clip_boxes_graph(boxes, window): - """ - boxes: [N, (y1, x1, y2, x2)] - window: [4] in the form y1, x1, y2, x2 - """ - # Split - wy1, wx1, wy2, wx2 = tf.split(window, 4) - y1, x1, y2, x2 = tf.split(boxes, 4, axis=1) - # Clip - y1 = tf.maximum(tf.minimum(y1, wy2), wy1) - x1 = tf.maximum(tf.minimum(x1, wx2), wx1) - y2 = tf.maximum(tf.minimum(y2, wy2), wy1) - x2 = tf.maximum(tf.minimum(x2, wx2), wx1) - clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes") - clipped.set_shape((clipped.shape[0], 4)) - return clipped - - -class ProposalLayer(KE.Layer): - """Receives anchor scores and selects a subset to pass as proposals - to the second stage. Filtering is done based on anchor scores and - non-max suppression to remove overlaps. It also applies bounding - box refinement deltas to anchors. - - Inputs: - rpn_probs: [batch, num_anchors, (bg prob, fg prob)] - rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))] - anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates - - Returns: - Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)] - """ - - def __init__(self, proposal_count, nms_threshold, config=None, **kwargs): - super(ProposalLayer, self).__init__(**kwargs) - self.config = config - self.proposal_count = proposal_count - self.nms_threshold = nms_threshold - - def call(self, inputs): - # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1] - scores = inputs[0][:, :, 1] - # Box deltas [batch, num_rois, 4] - deltas = inputs[1] - deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4]) - # Anchors - anchors = inputs[2] - - # Improve performance by trimming to top anchors by score - # and doing the rest on the smaller subset. - pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1]) - ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True, name="top_anchors").indices - scores = utils.batch_slice( - [scores, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU - ) - deltas = utils.batch_slice( - [deltas, ix], lambda x, y: tf.gather(x, y), self.config.IMAGES_PER_GPU - ) - pre_nms_anchors = utils.batch_slice( - [anchors, ix], - lambda a, x: tf.gather(a, x), - self.config.IMAGES_PER_GPU, - names=["pre_nms_anchors"], - ) - - # Apply deltas to anchors to get refined anchors. - # [batch, N, (y1, x1, y2, x2)] - boxes = utils.batch_slice( - [pre_nms_anchors, deltas], - lambda x, y: apply_box_deltas_graph(x, y), - self.config.IMAGES_PER_GPU, - names=["refined_anchors"], - ) - - # Clip to image boundaries. Since we're in normalized coordinates, - # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)] - window = np.array([0, 0, 1, 1], dtype=np.float32) - boxes = utils.batch_slice( - boxes, - lambda x: clip_boxes_graph(x, window), - self.config.IMAGES_PER_GPU, - names=["refined_anchors_clipped"], - ) - - # Filter out small boxes - # According to Xinlei Chen's paper, this reduces detection accuracy - # for small objects, so we're skipping it. - - # Non-max suppression - def nms(boxes, scores): - indices = tf.image.non_max_suppression( - boxes, - scores, - self.proposal_count, - self.nms_threshold, - name="rpn_non_max_suppression", - ) - proposals = tf.gather(boxes, indices) - # Pad if needed - padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0) - proposals = tf.pad(proposals, [(0, padding), (0, 0)]) - return proposals - - proposals = utils.batch_slice([boxes, scores], nms, self.config.IMAGES_PER_GPU) - return proposals - - def compute_output_shape(self, input_shape): - return (None, self.proposal_count, 4) - - -############################################################ -# ROIAlign Layer -############################################################ - - -def log2_graph(x): - """Implementation of Log2. TF doesn't have a native implementation.""" - return tf.log(x) / tf.log(2.0) - - -class PyramidROIAlign(KE.Layer): - """Implements ROI Pooling on multiple levels of the feature pyramid. - - Params: - - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7] - - Inputs: - - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized - coordinates. Possibly padded with zeros if not enough - boxes to fill the array. - - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - - feature_maps: List of feature maps from different levels of the pyramid. - Each is [batch, height, width, channels] - - Output: - Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels]. - The width and height are those specific in the pool_shape in the layer - constructor. - """ - - def __init__(self, pool_shape, **kwargs): - super(PyramidROIAlign, self).__init__(**kwargs) - self.pool_shape = tuple(pool_shape) - - def call(self, inputs): - # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords - boxes = inputs[0] - - # Image meta - # Holds details about the image. See compose_image_meta() - image_meta = inputs[1] - - # Feature Maps. List of feature maps from different level of the - # feature pyramid. Each is [batch, height, width, channels] - feature_maps = inputs[2:] - - # Assign each ROI to a level in the pyramid based on the ROI area. - y1, x1, y2, x2 = tf.split(boxes, 4, axis=2) - h = y2 - y1 - w = x2 - x1 - # Use shape of first image. Images in a batch must have the same size. - image_shape = parse_image_meta_graph(image_meta)["image_shape"][0] - # Equation 1 in the Feature Pyramid Networks paper. Account for - # the fact that our coordinates are normalized here. - # e.g. a 224x224 ROI (in pixels) maps to P4 - image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32) - roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area))) - roi_level = tf.minimum( - 5, tf.maximum(2, 4 + tf.cast(tf.round(roi_level), tf.int32)) - ) - roi_level = tf.squeeze(roi_level, 2) - - # Loop through levels and apply ROI pooling to each. P2 to P5. - pooled = [] - box_to_level = [] - for i, level in enumerate(range(2, 6)): - ix = tf.where(tf.equal(roi_level, level)) - level_boxes = tf.gather_nd(boxes, ix) - - # Box indices for crop_and_resize. - box_indices = tf.cast(ix[:, 0], tf.int32) - - # Keep track of which box is mapped to which level - box_to_level.append(ix) - - # Stop gradient propogation to ROI proposals - level_boxes = tf.stop_gradient(level_boxes) - box_indices = tf.stop_gradient(box_indices) - - # Crop and Resize - # From Mask R-CNN paper: "We sample four regular locations, so - # that we can evaluate either max or average pooling. In fact, - # interpolating only a single value at each bin center (without - # pooling) is nearly as effective." - # - # Here we use the simplified approach of a single value per bin, - # which is how it's done in tf.crop_and_resize() - # Result: [batch * num_boxes, pool_height, pool_width, channels] - pooled.append( - tf.image.crop_and_resize( - feature_maps[i], - level_boxes, - box_indices, - self.pool_shape, - method="bilinear", - ) - ) - - # Pack pooled features into one tensor - pooled = tf.concat(pooled, axis=0) - - # Pack box_to_level mapping into one array and add another - # column representing the order of pooled boxes - box_to_level = tf.concat(box_to_level, axis=0) - box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1) - box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range], axis=1) - - # Rearrange pooled features to match the order of the original boxes - # Sort box_to_level by batch then box index - # TF doesn't have a way to sort by two columns, so merge them and sort. - sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1] - ix = tf.nn.top_k(sorting_tensor, k=tf.shape(box_to_level)[0]).indices[::-1] - ix = tf.gather(box_to_level[:, 2], ix) - pooled = tf.gather(pooled, ix) - - # Re-add the batch dimension - shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0) - pooled = tf.reshape(pooled, shape) - return pooled - - def compute_output_shape(self, input_shape): - return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1],) - - -############################################################ -# Detection Target Layer -############################################################ - - -def overlaps_graph(boxes1, boxes2): - """Computes IoU overlaps between two sets of boxes. - boxes1, boxes2: [N, (y1, x1, y2, x2)]. - """ - # 1. Tile boxes2 and repeat boxes1. This allows us to compare - # every boxes1 against every boxes2 without loops. - # TF doesn't have an equivalent to np.repeat() so simulate it - # using tf.tile() and tf.reshape. - b1 = tf.reshape( - tf.tile(tf.expand_dims(boxes1, 1), [1, 1, tf.shape(boxes2)[0]]), [-1, 4] - ) - b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1]) - # 2. Compute intersections - b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1) - b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1) - y1 = tf.maximum(b1_y1, b2_y1) - x1 = tf.maximum(b1_x1, b2_x1) - y2 = tf.minimum(b1_y2, b2_y2) - x2 = tf.minimum(b1_x2, b2_x2) - intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0) - # 3. Compute unions - b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) - b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) - union = b1_area + b2_area - intersection - # 4. Compute IoU and reshape to [boxes1, boxes2] - iou = intersection / union - overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]]) - return overlaps - - -def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config): - """Generates detection targets for one image. Subsamples proposals and - generates target class IDs, bounding box deltas, and masks for each. - - Inputs: - proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might - be zero padded if there are not enough proposals. - gt_class_ids: [MAX_GT_INSTANCES] int class IDs - gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates. - gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type. - - Returns: Target ROIs and corresponding class IDs, bounding box shifts, - and masks. - rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates - class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded. - deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))] - masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox - boundaries and resized to neural network output size. - - Note: Returned arrays might be zero padded if not enough target ROIs. - """ - # Assertions - asserts = [ - tf.Assert( - tf.greater(tf.shape(proposals)[0], 0), [proposals], name="roi_assertion" - ), - ] - with tf.control_dependencies(asserts): - proposals = tf.identity(proposals) - - # Remove zero padding - proposals, _ = trim_zeros_graph(proposals, name="trim_proposals") - gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes") - gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros, name="trim_gt_class_ids") - gt_masks = tf.gather( - gt_masks, tf.where(non_zeros)[:, 0], axis=2, name="trim_gt_masks" - ) - - # Handle COCO crowds - # A crowd box in COCO is a bounding box around several instances. Exclude - # them from training. A crowd box is given a negative class ID. - crowd_ix = tf.where(gt_class_ids < 0)[:, 0] - non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0] - crowd_boxes = tf.gather(gt_boxes, crowd_ix) - gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix) - gt_boxes = tf.gather(gt_boxes, non_crowd_ix) - gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2) - - # Compute overlaps matrix [proposals, gt_boxes] - overlaps = overlaps_graph(proposals, gt_boxes) - - # Compute overlaps with crowd boxes [proposals, crowd_boxes] - crowd_overlaps = overlaps_graph(proposals, crowd_boxes) - crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1) - no_crowd_bool = crowd_iou_max < 0.001 - - # Determine positive and negative ROIs - roi_iou_max = tf.reduce_max(overlaps, axis=1) - # 1. Positive ROIs are those with >= 0.5 IoU with a GT box - positive_roi_bool = roi_iou_max >= 0.5 - positive_indices = tf.where(positive_roi_bool)[:, 0] - # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds. - negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0] - - # Subsample ROIs. Aim for 33% positive - # Positive ROIs - positive_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO) - positive_indices = tf.random_shuffle(positive_indices)[:positive_count] - positive_count = tf.shape(positive_indices)[0] - # Negative ROIs. Add enough to maintain positive:negative ratio. - r = 1.0 / config.ROI_POSITIVE_RATIO - negative_count = ( - tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count - ) - negative_indices = tf.random_shuffle(negative_indices)[:negative_count] - # Gather selected ROIs - positive_rois = tf.gather(proposals, positive_indices) - negative_rois = tf.gather(proposals, negative_indices) - - # Assign positive ROIs to GT boxes. - positive_overlaps = tf.gather(overlaps, positive_indices) - roi_gt_box_assignment = tf.cond( - tf.greater(tf.shape(positive_overlaps)[1], 0), - true_fn=lambda: tf.argmax(positive_overlaps, axis=1), - false_fn=lambda: tf.cast(tf.constant([]), tf.int64), - ) - roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment) - roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment) - - # Compute bbox refinement for positive ROIs - deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes) - deltas /= config.BBOX_STD_DEV - - # Assign positive ROIs to GT masks - # Permute masks to [N, height, width, 1] - transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1) - # Pick the right mask for each ROI - roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment) - - # Compute mask targets - boxes = positive_rois - if config.USE_MINI_MASK: - # Transform ROI coordinates from normalized image space - # to normalized mini-mask space. - y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1) - gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1) - gt_h = gt_y2 - gt_y1 - gt_w = gt_x2 - gt_x1 - y1 = (y1 - gt_y1) / gt_h - x1 = (x1 - gt_x1) / gt_w - y2 = (y2 - gt_y1) / gt_h - x2 = (x2 - gt_x1) / gt_w - boxes = tf.concat([y1, x1, y2, x2], 1) - box_ids = tf.range(0, tf.shape(roi_masks)[0]) - masks = tf.image.crop_and_resize( - tf.cast(roi_masks, tf.float32), boxes, box_ids, config.MASK_SHAPE - ) - # Remove the extra dimension from masks. - masks = tf.squeeze(masks, axis=3) - - # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with - # binary cross entropy loss. - masks = tf.round(masks) - - # Append negative ROIs and pad bbox deltas and masks that - # are not used for negative ROIs with zeros. - rois = tf.concat([positive_rois, negative_rois], axis=0) - N = tf.shape(negative_rois)[0] - P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0) - rois = tf.pad(rois, [(0, P), (0, 0)]) - roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)]) - roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)]) - deltas = tf.pad(deltas, [(0, N + P), (0, 0)]) - masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)]) - - return rois, roi_gt_class_ids, deltas, masks - - -class DetectionTargetLayer(KE.Layer): - """Subsamples proposals and generates target box refinement, class_ids, - and masks for each. - - Inputs: - proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might - be zero padded if there are not enough proposals. - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs. - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized - coordinates. - gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type - - Returns: Target ROIs and corresponding class IDs, bounding box shifts, - and masks. - rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized - coordinates - target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs. - target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)] - target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width] - Masks cropped to bbox boundaries and resized to neural - network output size. - - Note: Returned arrays might be zero padded if not enough target ROIs. - """ - - def __init__(self, config, **kwargs): - super(DetectionTargetLayer, self).__init__(**kwargs) - self.config = config - - def call(self, inputs): - proposals = inputs[0] - gt_class_ids = inputs[1] - gt_boxes = inputs[2] - gt_masks = inputs[3] - - # Slice the batch and run a graph for each slice - # TODO: Rename target_bbox to target_deltas for clarity - names = ["rois", "target_class_ids", "target_bbox", "target_mask"] - outputs = utils.batch_slice( - [proposals, gt_class_ids, gt_boxes, gt_masks], - lambda w, x, y, z: detection_targets_graph(w, x, y, z, self.config), - self.config.IMAGES_PER_GPU, - names=names, - ) - return outputs - - def compute_output_shape(self, input_shape): - return [ - (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois - (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids - (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas - ( - None, - self.config.TRAIN_ROIS_PER_IMAGE, - self.config.MASK_SHAPE[0], - self.config.MASK_SHAPE[1], - ), # masks - ] - - def compute_mask(self, inputs, mask=None): - return [None, None, None, None] - - -############################################################ -# Detection Layer -############################################################ - - -def refine_detections_graph(rois, probs, deltas, window, config): - """Refine classified proposals and filter overlaps and return final - detections. - - Inputs: - rois: [N, (y1, x1, y2, x2)] in normalized coordinates - probs: [N, num_classes]. Class probabilities. - deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific - bounding box deltas. - window: (y1, x1, y2, x2) in normalized coordinates. The part of the image - that contains the image excluding the padding. - - Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where - coordinates are normalized. - """ - # Class IDs per ROI - class_ids = tf.argmax(probs, axis=1, output_type=tf.int32) - # Class probability of the top class of each ROI - indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1) - class_scores = tf.gather_nd(probs, indices) - # Class-specific bounding box deltas - deltas_specific = tf.gather_nd(deltas, indices) - # Apply bounding box deltas - # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates - refined_rois = apply_box_deltas_graph(rois, deltas_specific * config.BBOX_STD_DEV) - # Clip boxes to image window - refined_rois = clip_boxes_graph(refined_rois, window) - - # TODO: Filter out boxes with zero area - - # Filter out background boxes - keep = tf.where(class_ids > 0)[:, 0] - # Filter out low confidence boxes - if config.DETECTION_MIN_CONFIDENCE: - conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0] - keep = tf.sets.set_intersection( - tf.expand_dims(keep, 0), tf.expand_dims(conf_keep, 0) - ) - keep = tf.sparse_tensor_to_dense(keep)[0] - - # Apply per-class NMS - # 1. Prepare variables - pre_nms_class_ids = tf.gather(class_ids, keep) - pre_nms_scores = tf.gather(class_scores, keep) - pre_nms_rois = tf.gather(refined_rois, keep) - unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0] - - def nms_keep_map(class_id): - """Apply Non-Maximum Suppression on ROIs of the given class.""" - # Indices of ROIs of the given class - ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0] - # Apply NMS - class_keep = tf.image.non_max_suppression( - tf.gather(pre_nms_rois, ixs), - tf.gather(pre_nms_scores, ixs), - max_output_size=config.DETECTION_MAX_INSTANCES, - iou_threshold=config.DETECTION_NMS_THRESHOLD, - ) - # Map indices - class_keep = tf.gather(keep, tf.gather(ixs, class_keep)) - # Pad with -1 so returned tensors have the same shape - gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0] - class_keep = tf.pad(class_keep, [(0, gap)], mode="CONSTANT", constant_values=-1) - # Set shape so map_fn() can infer result shape - class_keep.set_shape([config.DETECTION_MAX_INSTANCES]) - return class_keep - - # 2. Map over class IDs - nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids, dtype=tf.int64) - # 3. Merge results into one list, and remove -1 padding - nms_keep = tf.reshape(nms_keep, [-1]) - nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0]) - # 4. Compute intersection between keep and nms_keep - keep = tf.sets.set_intersection( - tf.expand_dims(keep, 0), tf.expand_dims(nms_keep, 0) - ) - keep = tf.sparse_tensor_to_dense(keep)[0] - # Keep top detections - roi_count = config.DETECTION_MAX_INSTANCES - class_scores_keep = tf.gather(class_scores, keep) - num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count) - top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1] - keep = tf.gather(keep, top_ids) - - # Arrange output as [N, (y1, x1, y2, x2, class_id, score)] - # Coordinates are normalized. - detections = tf.concat( - [ - tf.gather(refined_rois, keep), - tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis], - tf.gather(class_scores, keep)[..., tf.newaxis], - ], - axis=1, - ) - - # Pad with zeros if detections < DETECTION_MAX_INSTANCES - gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0] - detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT") - return detections - - -class DetectionLayer(KE.Layer): - """Takes classified proposal boxes and their bounding box deltas and - returns the final detection boxes. - - Returns: - [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where - coordinates are normalized. - """ - - def __init__(self, config=None, **kwargs): - super(DetectionLayer, self).__init__(**kwargs) - self.config = config - - def call(self, inputs): - rois = inputs[0] - mrcnn_class = inputs[1] - mrcnn_bbox = inputs[2] - image_meta = inputs[3] - - # Get windows of images in normalized coordinates. Windows are the area - # in the image that excludes the padding. - # Use the shape of the first image in the batch to normalize the window - # because we know that all images get resized to the same size. - m = parse_image_meta_graph(image_meta) - image_shape = m["image_shape"][0] - window = norm_boxes_graph(m["window"], image_shape[:2]) - - # Run detection refinement graph on each item in the batch - detections_batch = utils.batch_slice( - [rois, mrcnn_class, mrcnn_bbox, window], - lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config), - self.config.IMAGES_PER_GPU, - ) - - # Reshape output - # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in - # normalized coordinates - return tf.reshape( - detections_batch, - [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6], - ) - - def compute_output_shape(self, input_shape): - return (None, self.config.DETECTION_MAX_INSTANCES, 6) - - -############################################################ -# Region Proposal Network (RPN) -############################################################ - - -def rpn_graph(feature_map, anchors_per_location, anchor_stride): - """Builds the computation graph of Region Proposal Network. - - feature_map: backbone features [batch, height, width, depth] - anchors_per_location: number of anchors per pixel in the feature map - anchor_stride: Controls the density of anchors. Typically 1 (anchors for - every pixel in the feature map), or 2 (every other pixel). - - Returns: - rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax) - rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities. - rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be - applied to anchors. - """ - # TODO: check if stride of 2 causes alignment issues if the feature map - # is not even. - # Shared convolutional base of the RPN - shared = KL.Conv2D( - 512, - (3, 3), - padding="same", - activation="relu", - strides=anchor_stride, - name="rpn_conv_shared", - )(feature_map) - - # Anchor Score. [batch, height, width, anchors per location * 2]. - x = KL.Conv2D( - 2 * anchors_per_location, - (1, 1), - padding="valid", - activation="linear", - name="rpn_class_raw", - )(shared) - - # Reshape to [batch, anchors, 2] - rpn_class_logits = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x) - - # Softmax on last dimension of BG/FG. - rpn_probs = KL.Activation("softmax", name="rpn_class_xxx")(rpn_class_logits) - - # Bounding box refinement. [batch, H, W, anchors per location * depth] - # where depth is [x, y, log(w), log(h)] - x = KL.Conv2D( - anchors_per_location * 4, - (1, 1), - padding="valid", - activation="linear", - name="rpn_bbox_pred", - )(shared) - - # Reshape to [batch, anchors, 4] - rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x) - - return [rpn_class_logits, rpn_probs, rpn_bbox] - - -def build_rpn_model(anchor_stride, anchors_per_location, depth): - """Builds a Keras model of the Region Proposal Network. - It wraps the RPN graph so it can be used multiple times with shared - weights. - - anchors_per_location: number of anchors per pixel in the feature map - anchor_stride: Controls the density of anchors. Typically 1 (anchors for - every pixel in the feature map), or 2 (every other pixel). - depth: Depth of the backbone feature map. - - Returns a Keras Model object. The model outputs, when called, are: - rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax) - rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities. - rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be - applied to anchors. - """ - input_feature_map = KL.Input( - shape=[None, None, depth], name="input_rpn_feature_map" - ) - outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride) - return KM.Model([input_feature_map], outputs, name="rpn_model") - - -############################################################ -# Feature Pyramid Network Heads -############################################################ - - -def fpn_classifier_graph( - rois, - feature_maps, - image_meta, - pool_size, - num_classes, - train_bn=True, - fc_layers_size=1024, -): - """Builds the computation graph of the feature pyramid network classifier - and regressor heads. - - rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized - coordinates. - feature_maps: List of feature maps from different layers of the pyramid, - [P2, P3, P4, P5]. Each has a different resolution. - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - pool_size: The width of the square feature map generated from ROI Pooling. - num_classes: number of classes, which determines the depth of the results - train_bn: Boolean. Train or freeze Batch Norm layers - fc_layers_size: Size of the 2 FC layers - - Returns: - logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax) - probs: [batch, num_rois, NUM_CLASSES] classifier probabilities - bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to - proposal boxes - """ - # ROI Pooling - # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels] - x = PyramidROIAlign([pool_size, pool_size], name="roi_align_classifier")( - [rois, image_meta] + feature_maps - ) - # Two 1024 FC layers (implemented with Conv2D for consistency) - x = KL.TimeDistributed( - KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"), - name="mrcnn_class_conv1", - )(x) - x = KL.TimeDistributed(BatchNorm(), name="mrcnn_class_bn1")(x, training=train_bn) - x = KL.Activation("relu")(x) - x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)), name="mrcnn_class_conv2")( - x - ) - x = KL.TimeDistributed(BatchNorm(), name="mrcnn_class_bn2")(x, training=train_bn) - x = KL.Activation("relu")(x) - - shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2), name="pool_squeeze")(x) - - # Classifier head - mrcnn_class_logits = KL.TimeDistributed( - KL.Dense(num_classes), name="mrcnn_class_logits" - )(shared) - mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"), name="mrcnn_class")( - mrcnn_class_logits - ) - - # BBox head - # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))] - x = KL.TimeDistributed( - KL.Dense(num_classes * 4, activation="linear"), name="mrcnn_bbox_fc" - )(shared) - # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] - s = K.int_shape(x) - mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x) - - return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox - - -def build_fpn_mask_graph( - rois, feature_maps, image_meta, pool_size, num_classes, train_bn=True -): - """Builds the computation graph of the mask head of Feature Pyramid Network. - - rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized - coordinates. - feature_maps: List of feature maps from different layers of the pyramid, - [P2, P3, P4, P5]. Each has a different resolution. - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - pool_size: The width of the square feature map generated from ROI Pooling. - num_classes: number of classes, which determines the depth of the results - train_bn: Boolean. Train or freeze Batch Norm layers - - Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES] - """ - # ROI Pooling - # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels] - x = PyramidROIAlign([pool_size, pool_size], name="roi_align_mask")( - [rois, image_meta] + feature_maps - ) - - # Conv layers - x = KL.TimeDistributed( - KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv1" - )(x) - x = KL.TimeDistributed(BatchNorm(), name="mrcnn_mask_bn1")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.TimeDistributed( - KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv2" - )(x) - x = KL.TimeDistributed(BatchNorm(), name="mrcnn_mask_bn2")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.TimeDistributed( - KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv3" - )(x) - x = KL.TimeDistributed(BatchNorm(), name="mrcnn_mask_bn3")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.TimeDistributed( - KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv4" - )(x) - x = KL.TimeDistributed(BatchNorm(), name="mrcnn_mask_bn4")(x, training=train_bn) - x = KL.Activation("relu")(x) - - x = KL.TimeDistributed( - KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"), - name="mrcnn_mask_deconv", - )(x) - x = KL.TimeDistributed( - KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"), - name="mrcnn_mask", - )(x) - return x - - -############################################################ -# Loss Functions -############################################################ - - -def smooth_l1_loss(y_true, y_pred): - """Implements Smooth-L1 loss. - y_true and y_pred are typically: [N, 4], but could be any shape. - """ - diff = K.abs(y_true - y_pred) - less_than_one = K.cast(K.less(diff, 1.0), "float32") - loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5) - return loss - - -def rpn_class_loss_graph(rpn_match, rpn_class_logits): - """RPN anchor classifier loss. - - rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, - -1=negative, 0=neutral anchor. - rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG. - """ - # Squeeze last dim to simplify - rpn_match = tf.squeeze(rpn_match, -1) - # Get anchor classes. Convert the -1/+1 match to 0/1 values. - anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32) - # Positive and Negative anchors contribute to the loss, - # but neutral anchors (match value = 0) don't. - indices = tf.where(K.not_equal(rpn_match, 0)) - # Pick rows that contribute to the loss and filter out the rest. - rpn_class_logits = tf.gather_nd(rpn_class_logits, indices) - anchor_class = tf.gather_nd(anchor_class, indices) - # Cross entropy loss - loss = K.sparse_categorical_crossentropy( - target=anchor_class, output=rpn_class_logits, from_logits=True - ) - loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) - return loss - - -def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox): - """Return the RPN bounding box loss graph. - - config: the model config object. - target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))]. - Uses 0 padding to fill in unsed bbox deltas. - rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive, - -1=negative, 0=neutral anchor. - rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))] - """ - # Positive anchors contribute to the loss, but negative and - # neutral anchors (match value of 0 or -1) don't. - rpn_match = K.squeeze(rpn_match, -1) - indices = tf.where(K.equal(rpn_match, 1)) - - # Pick bbox deltas that contribute to the loss - rpn_bbox = tf.gather_nd(rpn_bbox, indices) - - # Trim target bounding box deltas to the same length as rpn_bbox. - batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1) - target_bbox = batch_pack_graph(target_bbox, batch_counts, config.IMAGES_PER_GPU) - - loss = smooth_l1_loss(target_bbox, rpn_bbox) - - loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0)) - return loss - - -def mrcnn_class_loss_graph(target_class_ids, pred_class_logits, active_class_ids): - """Loss for the classifier head of Mask RCNN. - - target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero - padding to fill in the array. - pred_class_logits: [batch, num_rois, num_classes] - active_class_ids: [batch, num_classes]. Has a value of 1 for - classes that are in the dataset of the image, and 0 - for classes that are not in the dataset. - """ - # During model building, Keras calls this function with - # target_class_ids of type float32. Unclear why. Cast it - # to int to get around it. - target_class_ids = tf.cast(target_class_ids, "int64") - - # Find predictions of classes that are not in the dataset. - pred_class_ids = tf.argmax(pred_class_logits, axis=2) - # TODO: Update this line to work with batch > 1. Right now it assumes all - # images in a batch have the same active_class_ids - pred_active = tf.gather(active_class_ids[0], pred_class_ids) - - # Loss - loss = tf.nn.sparse_softmax_cross_entropy_with_logits( - labels=target_class_ids, logits=pred_class_logits - ) - - # Erase losses of predictions of classes that are not in the active - # classes of the image. - loss = loss * pred_active - - # Computer loss mean. Use only predictions that contribute - # to the loss to get a correct mean. - loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active) - return loss - - -def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox): - """Loss for Mask R-CNN bounding box refinement. - - target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))] - target_class_ids: [batch, num_rois]. Integer class IDs. - pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))] - """ - # Reshape to merge batch and roi dimensions for simplicity. - target_class_ids = K.reshape(target_class_ids, (-1,)) - target_bbox = K.reshape(target_bbox, (-1, 4)) - pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4)) - - # Only positive ROIs contribute to the loss. And only - # the right class_id of each ROI. Get their indices. - positive_roi_ix = tf.where(target_class_ids > 0)[:, 0] - positive_roi_class_ids = tf.cast( - tf.gather(target_class_ids, positive_roi_ix), tf.int64 - ) - indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1) - - # Gather the deltas (predicted and true) that contribute to loss - target_bbox = tf.gather(target_bbox, positive_roi_ix) - pred_bbox = tf.gather_nd(pred_bbox, indices) - - # Smooth-L1 Loss - loss = K.switch( - tf.size(target_bbox) > 0, - smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox), - tf.constant(0.0), - ) - loss = K.mean(loss) - return loss - - -def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks): - """Mask binary cross-entropy loss for the masks head. - - target_masks: [batch, num_rois, height, width]. - A float32 tensor of values 0 or 1. Uses zero padding to fill array. - target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded. - pred_masks: [batch, proposals, height, width, num_classes] float32 tensor - with values from 0 to 1. - """ - # Reshape for simplicity. Merge first two dimensions into one. - target_class_ids = K.reshape(target_class_ids, (-1,)) - mask_shape = tf.shape(target_masks) - target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3])) - pred_shape = tf.shape(pred_masks) - pred_masks = K.reshape( - pred_masks, (-1, pred_shape[2], pred_shape[3], pred_shape[4]) - ) - # Permute predicted masks to [N, num_classes, height, width] - pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2]) - - # Only positive ROIs contribute to the loss. And only - # the class specific mask of each ROI. - positive_ix = tf.where(target_class_ids > 0)[:, 0] - positive_class_ids = tf.cast(tf.gather(target_class_ids, positive_ix), tf.int64) - indices = tf.stack([positive_ix, positive_class_ids], axis=1) - - # Gather the masks (predicted and true) that contribute to loss - y_true = tf.gather(target_masks, positive_ix) - y_pred = tf.gather_nd(pred_masks, indices) - - # Compute binary cross entropy. If no positive ROIs, then return 0. - # shape: [batch, roi, num_classes] - loss = K.switch( - tf.size(y_true) > 0, - K.binary_crossentropy(target=y_true, output=y_pred), - tf.constant(0.0), - ) - loss = K.mean(loss) - return loss - - -############################################################ -# Data Generator -############################################################ - - -def load_image_gt( - dataset, config, image_id, augment=False, augmentation=None, use_mini_mask=False -): - """Load and return ground truth data for an image (image, mask, bounding boxes). - - augment: (deprecated. Use augmentation instead). If true, apply random - image augmentation. Currently, only horizontal flipping is offered. - augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. - For example, passing imgaug.augmenters.Fliplr(0.5) flips images - right/left 50% of the time. - use_mini_mask: If False, returns full-size masks that are the same height - and width as the original image. These can be big, for example - 1024x1024x100 (for 100 instances). Mini masks are smaller, typically, - 224x224 and are generated by extracting the bounding box of the - object and resizing it to MINI_MASK_SHAPE. - - Returns: - image: [height, width, 3] - shape: the original shape of the image before resizing and cropping. - class_ids: [instance_count] Integer class IDs - bbox: [instance_count, (y1, x1, y2, x2)] - mask: [height, width, instance_count]. The height and width are those - of the image unless use_mini_mask is True, in which case they are - defined in MINI_MASK_SHAPE. - """ - # Load image and mask - image = dataset.load_image(image_id) - mask, class_ids = dataset.load_mask(image_id) - original_shape = image.shape - image, window, scale, padding, crop = utils.resize_image( - image, - min_dim=config.IMAGE_MIN_DIM, - min_scale=config.IMAGE_MIN_SCALE, - max_dim=config.IMAGE_MAX_DIM, - mode=config.IMAGE_RESIZE_MODE, - ) - mask = utils.resize_mask(mask, scale, padding, crop) - - # Random horizontal flips. - # TODO: will be removed in a future update in favor of augmentation - if augment: - logging.warning("'augment' is deprecated. Use 'augmentation' instead.") - if random.randint(0, 1): - image = np.fliplr(image) - mask = np.fliplr(mask) - - # Augmentation - # This requires the imgaug lib (https://github.com/aleju/imgaug) - if augmentation: - import imgaug - - # Augmenters that are safe to apply to masks - # Some, such as Affine, have settings that make them unsafe, so always - # test your augmentation on masks - MASK_AUGMENTERS = [ - "Sequential", - "SomeOf", - "OneOf", - "Sometimes", - "Fliplr", - "Flipud", - "CropAndPad", - "Affine", - "PiecewiseAffine", - ] - - def hook(images, augmenter, parents, default): - """Determines which augmenters to apply to masks.""" - return augmenter.__class__.__name__ in MASK_AUGMENTERS - - # Store shapes before augmentation to compare - image_shape = image.shape - mask_shape = mask.shape - # Make augmenters deterministic to apply similarly to images and masks - det = augmentation.to_deterministic() - image = det.augment_image(image) - # Change mask to np.uint8 because imgaug doesn't support np.bool - mask = det.augment_image( - mask.astype(np.uint8), hooks=imgaug.HooksImages(activator=hook) - ) - # Verify that shapes didn't change - assert image.shape == image_shape, "Augmentation shouldn't change image size" - assert mask.shape == mask_shape, "Augmentation shouldn't change mask size" - # Change mask back to bool - mask = mask.astype(np.bool) - - # Note that some boxes might be all zeros if the corresponding mask got cropped out. - # and here is to filter them out - _idx = np.sum(mask, axis=(0, 1)) > 0 - mask = mask[:, :, _idx] - class_ids = class_ids[_idx] - # Bounding boxes. Note that some boxes might be all zeros - # if the corresponding mask got cropped out. - # bbox: [num_instances, (y1, x1, y2, x2)] - bbox = utils.extract_bboxes(mask) - - # Active classes - # Different datasets have different classes, so track the - # classes supported in the dataset of this image. - active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32) - source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]] - active_class_ids[source_class_ids] = 1 - - # Resize masks to smaller size to reduce memory usage - if use_mini_mask: - mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE) - - # Image meta data - image_meta = compose_image_meta( - image_id, original_shape, image.shape, window, scale, active_class_ids - ) - - return image, image_meta, class_ids, bbox, mask - - -def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config): - """Generate targets for training Stage 2 classifier and mask heads. - This is not used in normal training. It's useful for debugging or to train - the Mask RCNN heads without using the RPN head. - - Inputs: - rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes. - gt_class_ids: [instance count] Integer class IDs - gt_boxes: [instance count, (y1, x1, y2, x2)] - gt_masks: [height, width, instance count] Ground truth masks. Can be full - size or mini-masks. - - Returns: - rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] - class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. - bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific - bbox refinements. - masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped - to bbox boundaries and resized to neural network output size. - """ - assert rpn_rois.shape[0] > 0 - assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format( - gt_class_ids.dtype - ) - assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(gt_boxes.dtype) - assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(gt_masks.dtype) - - # It's common to add GT Boxes to ROIs but we don't do that here because - # according to XinLei Chen's paper, it doesn't help. - - # Trim empty padding in gt_boxes and gt_masks parts - instance_ids = np.where(gt_class_ids > 0)[0] - assert instance_ids.shape[0] > 0, "Image must contain instances." - gt_class_ids = gt_class_ids[instance_ids] - gt_boxes = gt_boxes[instance_ids] - gt_masks = gt_masks[:, :, instance_ids] - - # Compute areas of ROIs and ground truth boxes. - rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * (rpn_rois[:, 3] - rpn_rois[:, 1]) - gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1]) - - # Compute overlaps [rpn_rois, gt_boxes] - overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0])) - for i in range(overlaps.shape[1]): - gt = gt_boxes[i] - overlaps[:, i] = utils.compute_iou(gt, rpn_rois, gt_box_area[i], rpn_roi_area) - - # Assign ROIs to GT boxes - rpn_roi_iou_argmax = np.argmax(overlaps, axis=1) - rpn_roi_iou_max = overlaps[np.arange(overlaps.shape[0]), rpn_roi_iou_argmax] - # GT box assigned to each ROI - rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax] - rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax] - - # Positive ROIs are those with >= 0.5 IoU with a GT box. - fg_ids = np.where(rpn_roi_iou_max > 0.5)[0] - - # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining) - # TODO: To hard example mine or not to hard example mine, that's the question - # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0] - bg_ids = np.where(rpn_roi_iou_max < 0.5)[0] - - # Subsample ROIs. Aim for 33% foreground. - # FG - fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO) - if fg_ids.shape[0] > fg_roi_count: - keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False) - else: - keep_fg_ids = fg_ids - # BG - remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0] - if bg_ids.shape[0] > remaining: - keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False) - else: - keep_bg_ids = bg_ids - # Combine indices of ROIs to keep - keep = np.concatenate([keep_fg_ids, keep_bg_ids]) - # Need more? - remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0] - if remaining > 0: - # Looks like we don't have enough samples to maintain the desired - # balance. Reduce requirements and fill in the rest. This is - # likely different from the Mask RCNN paper. - - # There is a small chance we have neither fg nor bg samples. - if keep.shape[0] == 0: - # Pick bg regions with easier IoU threshold - bg_ids = np.where(rpn_roi_iou_max < 0.5)[0] - assert bg_ids.shape[0] >= remaining - keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False) - assert keep_bg_ids.shape[0] == remaining - keep = np.concatenate([keep, keep_bg_ids]) - else: - # Fill the rest with repeated bg rois. - keep_extra_ids = np.random.choice(keep_bg_ids, remaining, replace=True) - keep = np.concatenate([keep, keep_extra_ids]) - assert ( - keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE - ), "keep doesn't match ROI batch size {}, {}".format( - keep.shape[0], config.TRAIN_ROIS_PER_IMAGE - ) - - # Reset the gt boxes assigned to BG ROIs. - rpn_roi_gt_boxes[keep_bg_ids, :] = 0 - rpn_roi_gt_class_ids[keep_bg_ids] = 0 - - # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement. - rois = rpn_rois[keep] - roi_gt_boxes = rpn_roi_gt_boxes[keep] - roi_gt_class_ids = rpn_roi_gt_class_ids[keep] - roi_gt_assignment = rpn_roi_iou_argmax[keep] - - # Class-aware bbox deltas. [y, x, log(h), log(w)] - bboxes = np.zeros( - (config.TRAIN_ROIS_PER_IMAGE, config.NUM_CLASSES, 4), dtype=np.float32 - ) - pos_ids = np.where(roi_gt_class_ids > 0)[0] - bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement( - rois[pos_ids], roi_gt_boxes[pos_ids, :4] - ) - # Normalize bbox refinements - bboxes /= config.BBOX_STD_DEV - - # Generate class-specific target masks - masks = np.zeros( - ( - config.TRAIN_ROIS_PER_IMAGE, - config.MASK_SHAPE[0], - config.MASK_SHAPE[1], - config.NUM_CLASSES, - ), - dtype=np.float32, - ) - for i in pos_ids: - class_id = roi_gt_class_ids[i] - assert class_id > 0, "class id must be greater than 0" - gt_id = roi_gt_assignment[i] - class_mask = gt_masks[:, :, gt_id] - - if config.USE_MINI_MASK: - # Create a mask placeholder, the size of the image - placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool) - # GT box - gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id] - gt_w = gt_x2 - gt_x1 - gt_h = gt_y2 - gt_y1 - # Resize mini mask to size of GT box - placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = np.round( - utils.resize(class_mask, (gt_h, gt_w)) - ).astype(bool) - # Place the mini batch in the placeholder - class_mask = placeholder - - # Pick part of the mask and resize it - y1, x1, y2, x2 = rois[i].astype(np.int32) - m = class_mask[y1:y2, x1:x2] - mask = utils.resize(m, config.MASK_SHAPE) - masks[i, :, :, class_id] = mask - - return rois, roi_gt_class_ids, bboxes, masks - - -def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config): - """Given the anchors and GT boxes, compute overlaps and identify positive - anchors and deltas to refine them to match their corresponding GT boxes. - - anchors: [num_anchors, (y1, x1, y2, x2)] - gt_class_ids: [num_gt_boxes] Integer class IDs. - gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)] - - Returns: - rpn_match: [N] (int32) matches between anchors and GT boxes. - 1 = positive anchor, -1 = negative anchor, 0 = neutral - rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas. - """ - # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral - rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32) - # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))] - rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4)) - - # Handle COCO crowds - # A crowd box in COCO is a bounding box around several instances. Exclude - # them from training. A crowd box is given a negative class ID. - crowd_ix = np.where(gt_class_ids < 0)[0] - if crowd_ix.shape[0] > 0: - # Filter out crowds from ground truth class IDs and boxes - non_crowd_ix = np.where(gt_class_ids > 0)[0] - crowd_boxes = gt_boxes[crowd_ix] - gt_class_ids = gt_class_ids[non_crowd_ix] - gt_boxes = gt_boxes[non_crowd_ix] - # Compute overlaps with crowd boxes [anchors, crowds] - crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes) - crowd_iou_max = np.amax(crowd_overlaps, axis=1) - no_crowd_bool = crowd_iou_max < 0.001 - else: - # All anchors don't intersect a crowd - no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool) - - # Compute overlaps [num_anchors, num_gt_boxes] - overlaps = utils.compute_overlaps(anchors, gt_boxes) - - # Match anchors to GT Boxes - # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive. - # If an anchor overlaps a GT box with IoU < 0.3 then it's negative. - # Neutral anchors are those that don't match the conditions above, - # and they don't influence the loss function. - # However, don't keep any GT box unmatched (rare, but happens). Instead, - # match it to the closest anchor (even if its max IoU is < 0.3). - # - # 1. Set negative anchors first. They get overwritten below if a GT box is - # matched to them. Skip boxes in crowd areas. - anchor_iou_argmax = np.argmax(overlaps, axis=1) - anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax] - rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1 - # 2. Set an anchor for each GT box (regardless of IoU value). - # If multiple anchors have the same IoU match all of them - gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:, 0] - rpn_match[gt_iou_argmax] = 1 - # 3. Set anchors with high overlap as positive. - rpn_match[anchor_iou_max >= 0.7] = 1 - - # Subsample to balance positive and negative anchors - # Don't let positives be more than half the anchors - ids = np.where(rpn_match == 1)[0] - extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2) - if extra > 0: - # Reset the extra ones to neutral - ids = np.random.choice(ids, extra, replace=False) - rpn_match[ids] = 0 - # Same for negative proposals - ids = np.where(rpn_match == -1)[0] - extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE - np.sum(rpn_match == 1)) - if extra > 0: - # Rest the extra ones to neutral - ids = np.random.choice(ids, extra, replace=False) - rpn_match[ids] = 0 - - # For positive anchors, compute shift and scale needed to transform them - # to match the corresponding GT boxes. - ids = np.where(rpn_match == 1)[0] - ix = 0 # index into rpn_bbox - # TODO: use box_refinement() rather than duplicating the code here - for i, a in zip(ids, anchors[ids]): - # Closest gt box (it might have IoU < 0.7) - gt = gt_boxes[anchor_iou_argmax[i]] - - # Convert coordinates to center plus width/height. - # GT Box - gt_h = gt[2] - gt[0] - gt_w = gt[3] - gt[1] - gt_center_y = gt[0] + 0.5 * gt_h - gt_center_x = gt[1] + 0.5 * gt_w - # Anchor - a_h = a[2] - a[0] - a_w = a[3] - a[1] - a_center_y = a[0] + 0.5 * a_h - a_center_x = a[1] + 0.5 * a_w - - # Compute the bbox refinement that the RPN should predict. - rpn_bbox[ix] = [ - (gt_center_y - a_center_y) / a_h, - (gt_center_x - a_center_x) / a_w, - np.log(gt_h / a_h), - np.log(gt_w / a_w), - ] - # Normalize - rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV - ix += 1 - - return rpn_match, rpn_bbox - - -def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes): - """Generates ROI proposals similar to what a region proposal network - would generate. - - image_shape: [Height, Width, Depth] - count: Number of ROIs to generate - gt_class_ids: [N] Integer ground truth class IDs - gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels. - - Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels. - """ - # placeholder - rois = np.zeros((count, 4), dtype=np.int32) - - # Generate random ROIs around GT boxes (90% of count) - rois_per_box = int(0.9 * count / gt_boxes.shape[0]) - for i in range(gt_boxes.shape[0]): - gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i] - h = gt_y2 - gt_y1 - w = gt_x2 - gt_x1 - # random boundaries - r_y1 = max(gt_y1 - h, 0) - r_y2 = min(gt_y2 + h, image_shape[0]) - r_x1 = max(gt_x1 - w, 0) - r_x2 = min(gt_x2 + w, image_shape[1]) - - # To avoid generating boxes with zero area, we generate double what - # we need and filter out the extra. If we get fewer valid boxes - # than we need, we loop and try again. - while True: - y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2)) - x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2)) - # Filter out zero area boxes - threshold = 1 - y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:rois_per_box] - x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:rois_per_box] - if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box: - break - - # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape - # into x1, y1, x2, y2 order - x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1) - y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1) - box_rois = np.hstack([y1, x1, y2, x2]) - rois[rois_per_box * i : rois_per_box * (i + 1)] = box_rois - - # Generate random ROIs anywhere in the image (10% of count) - remaining_count = count - (rois_per_box * gt_boxes.shape[0]) - # To avoid generating boxes with zero area, we generate double what - # we need and filter out the extra. If we get fewer valid boxes - # than we need, we loop and try again. - while True: - y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2)) - x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2)) - # Filter out zero area boxes - threshold = 1 - y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >= threshold][:remaining_count] - x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >= threshold][:remaining_count] - if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count: - break - - # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape - # into x1, y1, x2, y2 order - x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1) - y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1) - global_rois = np.hstack([y1, x1, y2, x2]) - rois[-remaining_count:] = global_rois - return rois - - -def data_generator( - dataset, - config, - shuffle=True, - augment=False, - augmentation=None, - random_rois=0, - batch_size=1, - detection_targets=False, - no_augmentation_sources=None, -): - """A generator that returns images and corresponding target class ids, - bounding box deltas, and masks. - - dataset: The Dataset object to pick data from - config: The model config object - shuffle: If True, shuffles the samples before every epoch - augment: (deprecated. Use augmentation instead). If true, apply random - image augmentation. Currently, only horizontal flipping is offered. - augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation. - For example, passing imgaug.augmenters.Fliplr(0.5) flips images - right/left 50% of the time. - random_rois: If > 0 then generate proposals to be used to train the - network classifier and mask heads. Useful if training - the Mask RCNN part without the RPN. - batch_size: How many images to return in each call - detection_targets: If True, generate detection targets (class IDs, bbox - deltas, and masks). Typically for debugging or visualizations because - in trainig detection targets are generated by DetectionTargetLayer. - no_augmentation_sources: Optional. List of sources to exclude for - augmentation. A source is string that identifies a dataset and is - defined in the Dataset class. - - Returns a Python generator. Upon calling next() on it, the - generator returns two lists, inputs and outputs. The contents - of the lists differs depending on the received arguments: - inputs list: - - images: [batch, H, W, C] - - image_meta: [batch, (meta data)] Image details. See compose_image_meta() - - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral) - - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas. - - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs - - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] - - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width - are those of the image unless use_mini_mask is True, in which - case they are defined in MINI_MASK_SHAPE. - - outputs list: Usually empty in regular training. But if detection_targets - is True then the outputs list contains target class_ids, bbox deltas, - and masks. - """ - b = 0 # batch item index - image_index = -1 - image_ids = np.copy(dataset.image_ids) - error_count = 0 - no_augmentation_sources = no_augmentation_sources or [] - - # Anchors - # [anchor_count, (y1, x1, y2, x2)] - backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE) - anchors = utils.generate_pyramid_anchors( - config.RPN_ANCHOR_SCALES, - config.RPN_ANCHOR_RATIOS, - backbone_shapes, - config.BACKBONE_STRIDES, - config.RPN_ANCHOR_STRIDE, - ) - - # Keras requires a generator to run indefinitely. - while True: - try: - # Increment index to pick next image. Shuffle if at the start of an epoch. - image_index = (image_index + 1) % len(image_ids) - if shuffle and image_index == 0: - np.random.shuffle(image_ids) - - # Get GT bounding boxes and masks for image. - image_id = image_ids[image_index] - - # If the image source is not to be augmented pass None as augmentation - if dataset.image_info[image_id]["source"] in no_augmentation_sources: - image, image_meta, gt_class_ids, gt_boxes, gt_masks = load_image_gt( - dataset, - config, - image_id, - augment=augment, - augmentation=None, - use_mini_mask=config.USE_MINI_MASK, - ) - else: - image, image_meta, gt_class_ids, gt_boxes, gt_masks = load_image_gt( - dataset, - config, - image_id, - augment=augment, - augmentation=augmentation, - use_mini_mask=config.USE_MINI_MASK, - ) - - # Skip images that have no instances. This can happen in cases - # where we train on a subset of classes and the image doesn't - # have any of the classes we care about. - if not np.any(gt_class_ids > 0): - continue - - # RPN Targets - rpn_match, rpn_bbox = build_rpn_targets( - image.shape, anchors, gt_class_ids, gt_boxes, config - ) - - # Mask R-CNN Targets - if random_rois: - rpn_rois = generate_random_rois( - image.shape, random_rois, gt_class_ids, gt_boxes - ) - if detection_targets: - ( - rois, - mrcnn_class_ids, - mrcnn_bbox, - mrcnn_mask, - ) = build_detection_targets( - rpn_rois, gt_class_ids, gt_boxes, gt_masks, config - ) - - # Init batch arrays - if b == 0: - batch_image_meta = np.zeros( - (batch_size,) + image_meta.shape, dtype=image_meta.dtype - ) - batch_rpn_match = np.zeros( - [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype - ) - batch_rpn_bbox = np.zeros( - [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], - dtype=rpn_bbox.dtype, - ) - batch_images = np.zeros((batch_size,) + image.shape, dtype=np.float32) - batch_gt_class_ids = np.zeros( - (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32 - ) - batch_gt_boxes = np.zeros( - (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32 - ) - batch_gt_masks = np.zeros( - ( - batch_size, - gt_masks.shape[0], - gt_masks.shape[1], - config.MAX_GT_INSTANCES, - ), - dtype=gt_masks.dtype, - ) - if random_rois: - batch_rpn_rois = np.zeros( - (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype - ) - if detection_targets: - batch_rois = np.zeros( - (batch_size,) + rois.shape, dtype=rois.dtype - ) - batch_mrcnn_class_ids = np.zeros( - (batch_size,) + mrcnn_class_ids.shape, - dtype=mrcnn_class_ids.dtype, - ) - batch_mrcnn_bbox = np.zeros( - (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype - ) - batch_mrcnn_mask = np.zeros( - (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype - ) - - # If more instances than fits in the array, sub-sample from them. - if gt_boxes.shape[0] > config.MAX_GT_INSTANCES: - ids = np.random.choice( - np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False - ) - gt_class_ids = gt_class_ids[ids] - gt_boxes = gt_boxes[ids] - gt_masks = gt_masks[:, :, ids] - - # Add to batch - batch_image_meta[b] = image_meta - batch_rpn_match[b] = rpn_match[:, np.newaxis] - batch_rpn_bbox[b] = rpn_bbox - batch_images[b] = mold_image(image.astype(np.float32), config) - batch_gt_class_ids[b, : gt_class_ids.shape[0]] = gt_class_ids - batch_gt_boxes[b, : gt_boxes.shape[0]] = gt_boxes - batch_gt_masks[b, :, :, : gt_masks.shape[-1]] = gt_masks - if random_rois: - batch_rpn_rois[b] = rpn_rois - if detection_targets: - batch_rois[b] = rois - batch_mrcnn_class_ids[b] = mrcnn_class_ids - batch_mrcnn_bbox[b] = mrcnn_bbox - batch_mrcnn_mask[b] = mrcnn_mask - b += 1 - - # Batch full? - if b >= batch_size: - inputs = [ - batch_images, - batch_image_meta, - batch_rpn_match, - batch_rpn_bbox, - batch_gt_class_ids, - batch_gt_boxes, - batch_gt_masks, - ] - outputs = [] - - if random_rois: - inputs.extend([batch_rpn_rois]) - if detection_targets: - inputs.extend([batch_rois]) - # Keras requires that output and targets have the same number of dimensions - batch_mrcnn_class_ids = np.expand_dims( - batch_mrcnn_class_ids, -1 - ) - outputs.extend( - [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask] - ) - - yield inputs, outputs - - # start a new batch - b = 0 - except (GeneratorExit, KeyboardInterrupt): - raise - except: - # Log it and skip the image - logging.exception( - "Error processing image {}".format(dataset.image_info[image_id]) - ) - error_count += 1 - if error_count > 5: - raise - - -############################################################ -# MaskRCNN Class -############################################################ - - -class MaskRCNN: - """Encapsulates the Mask RCNN model functionality. - - The actual Keras model is in the keras_model property. - """ - - def __init__(self, mode, config, model_dir): - """ - mode: Either "training" or "inference" - config: A Sub-class of the Config class - model_dir: Directory to save training logs and trained weights - """ - assert mode in ["training", "inference"] - self.mode = mode - self.config = config - self.model_dir = model_dir - self.set_log_dir() - self.keras_model = self.build(mode=mode, config=config) - - def build(self, mode, config): - """Build Mask R-CNN architecture. - input_shape: The shape of the input image. - mode: Either "training" or "inference". The inputs and - outputs of the model differ accordingly. - """ - assert mode in ["training", "inference"] - - # Image size must be dividable by 2 multiple times - h, w = config.IMAGE_SHAPE[:2] - if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6): - raise Exception( - "Image size must be dividable by 2 at least 6 times " - "to avoid fractions when downscaling and upscaling." - "For example, use 256, 320, 384, 448, 512, ... etc. " - ) - - # Inputs - input_image = KL.Input( - shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image" - ) - input_image_meta = KL.Input( - shape=[config.IMAGE_META_SIZE], name="input_image_meta" - ) - if mode == "training": - # RPN GT - input_rpn_match = KL.Input( - shape=[None, 1], name="input_rpn_match", dtype=tf.int32 - ) - input_rpn_bbox = KL.Input( - shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32 - ) - - # Detection GT (class IDs, bounding boxes, and masks) - # 1. GT Class IDs (zero padded) - input_gt_class_ids = KL.Input( - shape=[None], name="input_gt_class_ids", dtype=tf.int32 - ) - # 2. GT Boxes in pixels (zero padded) - # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates - input_gt_boxes = KL.Input( - shape=[None, 4], name="input_gt_boxes", dtype=tf.float32 - ) - # Normalize coordinates - gt_boxes = KL.Lambda( - lambda x: norm_boxes_graph(x, K.shape(input_image)[1:3]) - )(input_gt_boxes) - # 3. GT Masks (zero padded) - # [batch, height, width, MAX_GT_INSTANCES] - if config.USE_MINI_MASK: - input_gt_masks = KL.Input( - shape=[config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1], None], - name="input_gt_masks", - dtype=bool, - ) - else: - input_gt_masks = KL.Input( - shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None], - name="input_gt_masks", - dtype=bool, - ) - elif mode == "inference": - # Anchors in normalized coordinates - input_anchors = KL.Input(shape=[None, 4], name="input_anchors") - - # Build the shared convolutional layers. - # Bottom-up Layers - # Returns a list of the last layers of each stage, 5 in total. - # Don't create the thead (stage 5), so we pick the 4th item in the list. - if callable(config.BACKBONE): - _, C2, C3, C4, C5 = config.BACKBONE( - input_image, stage5=True, train_bn=config.TRAIN_BN - ) - else: - _, C2, C3, C4, C5 = resnet_graph( - input_image, config.BACKBONE, stage5=True, train_bn=config.TRAIN_BN - ) - # Top-down Layers - # TODO: add assert to varify feature map sizes match what's in config - P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name="fpn_c5p5")(C5) - P4 = KL.Add(name="fpn_p4add")( - [ - KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5), - KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name="fpn_c4p4")(C4), - ] - ) - P3 = KL.Add(name="fpn_p3add")( - [ - KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4), - KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name="fpn_c3p3")(C3), - ] - ) - P2 = KL.Add(name="fpn_p2add")( - [ - KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3), - KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name="fpn_c2p2")(C2), - ] - ) - # Attach 3x3 conv to all P layers to get the final feature maps. - P2 = KL.Conv2D( - config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2" - )(P2) - P3 = KL.Conv2D( - config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3" - )(P3) - P4 = KL.Conv2D( - config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4" - )(P4) - P5 = KL.Conv2D( - config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5" - )(P5) - # P6 is used for the 5th anchor scale in RPN. Generated by - # subsampling from P5 with stride of 2. - P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5) - - # Note that P6 is used in RPN, but not in the classifier heads. - rpn_feature_maps = [P2, P3, P4, P5, P6] - mrcnn_feature_maps = [P2, P3, P4, P5] - - # Anchors - if mode == "training": - anchors = self.get_anchors(config.IMAGE_SHAPE) - # Duplicate across the batch dimension because Keras requires it - # TODO: can this be optimized to avoid duplicating the anchors? - anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape) - # A hack to get around Keras's bad support for constants - anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")( - input_image - ) - else: - anchors = input_anchors - - # RPN Model - rpn = build_rpn_model( - config.RPN_ANCHOR_STRIDE, - len(config.RPN_ANCHOR_RATIOS), - config.TOP_DOWN_PYRAMID_SIZE, - ) - # Loop through pyramid layers - layer_outputs = [] # list of lists - for p in rpn_feature_maps: - layer_outputs.append(rpn([p])) - # Concatenate layer outputs - # Convert from list of lists of level outputs to list of lists - # of outputs across levels. - # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]] - output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"] - outputs = list(zip(*layer_outputs)) - outputs = [ - KL.Concatenate(axis=1, name=n)(list(o)) - for o, n in zip(outputs, output_names) - ] - - rpn_class_logits, rpn_class, rpn_bbox = outputs - - # Generate proposals - # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates - # and zero padded. - proposal_count = ( - config.POST_NMS_ROIS_TRAINING - if mode == "training" - else config.POST_NMS_ROIS_INFERENCE - ) - rpn_rois = ProposalLayer( - proposal_count=proposal_count, - nms_threshold=config.RPN_NMS_THRESHOLD, - name="ROI", - config=config, - )([rpn_class, rpn_bbox, anchors]) - - if mode == "training": - # Class ID mask to mark class IDs supported by the dataset the image - # came from. - active_class_ids = KL.Lambda( - lambda x: parse_image_meta_graph(x)["active_class_ids"] - )(input_image_meta) - - if not config.USE_RPN_ROIS: - # Ignore predicted ROIs and use ROIs provided as an input. - input_rois = KL.Input( - shape=[config.POST_NMS_ROIS_TRAINING, 4], - name="input_roi", - dtype=np.int32, - ) - # Normalize coordinates - target_rois = KL.Lambda( - lambda x: norm_boxes_graph(x, K.shape(input_image)[1:3]) - )(input_rois) - else: - target_rois = rpn_rois - - # Generate detection targets - # Subsamples proposals and generates target outputs for training - # Note that proposal class IDs, gt_boxes, and gt_masks are zero - # padded. Equally, returned rois and targets are zero padded. - rois, target_class_ids, target_bbox, target_mask = DetectionTargetLayer( - config, name="proposal_targets" - )([target_rois, input_gt_class_ids, gt_boxes, input_gt_masks]) - - # Network Heads - # TODO: verify that this handles zero padded ROIs - mrcnn_class_logits, mrcnn_class, mrcnn_bbox = fpn_classifier_graph( - rois, - mrcnn_feature_maps, - input_image_meta, - config.POOL_SIZE, - config.NUM_CLASSES, - train_bn=config.TRAIN_BN, - fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE, - ) - - mrcnn_mask = build_fpn_mask_graph( - rois, - mrcnn_feature_maps, - input_image_meta, - config.MASK_POOL_SIZE, - config.NUM_CLASSES, - train_bn=config.TRAIN_BN, - ) - - # TODO: clean up (use tf.identify if necessary) - output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois) - - # Losses - rpn_class_loss = KL.Lambda( - lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss" - )([input_rpn_match, rpn_class_logits]) - rpn_bbox_loss = KL.Lambda( - lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss" - )([input_rpn_bbox, input_rpn_match, rpn_bbox]) - class_loss = KL.Lambda( - lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss" - )([target_class_ids, mrcnn_class_logits, active_class_ids]) - bbox_loss = KL.Lambda( - lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss" - )([target_bbox, target_class_ids, mrcnn_bbox]) - mask_loss = KL.Lambda( - lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss" - )([target_mask, target_class_ids, mrcnn_mask]) - - # Model - inputs = [ - input_image, - input_image_meta, - input_rpn_match, - input_rpn_bbox, - input_gt_class_ids, - input_gt_boxes, - input_gt_masks, - ] - if not config.USE_RPN_ROIS: - inputs.append(input_rois) - outputs = [ - rpn_class_logits, - rpn_class, - rpn_bbox, - mrcnn_class_logits, - mrcnn_class, - mrcnn_bbox, - mrcnn_mask, - rpn_rois, - output_rois, - rpn_class_loss, - rpn_bbox_loss, - class_loss, - bbox_loss, - mask_loss, - ] - model = KM.Model(inputs, outputs, name="mask_rcnn") - else: - # Network Heads - # Proposal classifier and BBox regressor heads - mrcnn_class_logits, mrcnn_class, mrcnn_bbox = fpn_classifier_graph( - rpn_rois, - mrcnn_feature_maps, - input_image_meta, - config.POOL_SIZE, - config.NUM_CLASSES, - train_bn=config.TRAIN_BN, - fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE, - ) - - # Detections - # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in - # normalized coordinates - detections = DetectionLayer(config, name="mrcnn_detection")( - [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta] - ) - - # Create masks for detections - detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections) - mrcnn_mask = build_fpn_mask_graph( - detection_boxes, - mrcnn_feature_maps, - input_image_meta, - config.MASK_POOL_SIZE, - config.NUM_CLASSES, - train_bn=config.TRAIN_BN, - ) - - model = KM.Model( - [input_image, input_image_meta, input_anchors], - [ - detections, - mrcnn_class, - mrcnn_bbox, - mrcnn_mask, - rpn_rois, - rpn_class, - rpn_bbox, - ], - name="mask_rcnn", - ) - - # Add multi-GPU support. - if config.GPU_COUNT > 1: - from mrcnn.parallel_model import ParallelModel - - model = ParallelModel(model, config.GPU_COUNT) - - return model - - def find_last(self): - """Finds the last checkpoint file of the last trained model in the - model directory. - Returns: - The path of the last checkpoint file - """ - # Get directory names. Each directory corresponds to a model - dir_names = next(os.walk(self.model_dir))[1] - key = self.config.NAME.lower() - dir_names = filter(lambda f: f.startswith(key), dir_names) - dir_names = sorted(dir_names) - if not dir_names: - import errno - - raise FileNotFoundError( - errno.ENOENT, - "Could not find model directory under {}".format(self.model_dir), - ) - # Pick last directory - dir_name = os.path.join(self.model_dir, dir_names[-1]) - # Find the last checkpoint - checkpoints = next(os.walk(dir_name))[2] - checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints) - checkpoints = sorted(checkpoints) - if not checkpoints: - import errno - - raise FileNotFoundError( - errno.ENOENT, "Could not find weight files in {}".format(dir_name) - ) - checkpoint = os.path.join(dir_name, checkpoints[-1]) - return checkpoint - - def load_weights(self, filepath, by_name=False, exclude=None): - """Modified version of the corresponding Keras function with - the addition of multi-GPU support and the ability to exclude - some layers from loading. - exclude: list of layer names to exclude - """ - import h5py - - # Conditional import to support versions of Keras before 2.2 - # TODO: remove in about 6 months (end of 2018) - try: - from keras.engine import saving - except ImportError: - # Keras before 2.2 used the 'topology' namespace. - from keras.engine import topology as saving - - if exclude: - by_name = True - - if h5py is None: - raise ImportError("`load_weights` requires h5py.") - f = h5py.File(filepath, mode="r") - if "layer_names" not in f.attrs and "model_weights" in f: - f = f["model_weights"] - - # In multi-GPU training, we wrap the model. Get layers - # of the inner model because they have the weights. - keras_model = self.keras_model - layers = ( - keras_model.inner_model.layers - if hasattr(keras_model, "inner_model") - else keras_model.layers - ) - - # Exclude some layers - if exclude: - layers = filter(lambda l: l.name not in exclude, layers) - - if by_name: - saving.load_weights_from_hdf5_group_by_name(f, layers) - else: - saving.load_weights_from_hdf5_group(f, layers) - if hasattr(f, "close"): - f.close() - - # Update the log directory - self.set_log_dir(filepath) - - def get_imagenet_weights(self): - """Downloads ImageNet trained weights from Keras. - Returns path to weights file. - """ - from keras.utils.data_utils import get_file - - TF_WEIGHTS_PATH_NO_TOP = ( - "https://github.com/fchollet/deep-learning-models/" - "releases/download/v0.2/" - "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5" - ) - weights_path = get_file( - "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5", - TF_WEIGHTS_PATH_NO_TOP, - cache_subdir="models", - md5_hash="a268eb855778b3df3c7506639542a6af", - ) - return weights_path - - def compile(self, learning_rate, momentum): - """Gets the model ready for training. Adds losses, regularization, and - metrics. Then calls the Keras compile() function. - """ - # Optimizer object - optimizer = keras.optimizers.SGD( - lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM - ) - # Add Losses - # First, clear previously set losses to avoid duplication - self.keras_model._losses = [] - self.keras_model._per_input_losses = {} - loss_names = [ - "rpn_class_loss", - "rpn_bbox_loss", - "mrcnn_class_loss", - "mrcnn_bbox_loss", - "mrcnn_mask_loss", - ] - for name in loss_names: - layer = self.keras_model.get_layer(name) - if layer.output in self.keras_model.losses: - continue - loss = tf.reduce_mean( - layer.output, keepdims=True - ) * self.config.LOSS_WEIGHTS.get(name, 1.0) - self.keras_model.add_loss(loss) - - # Add L2 Regularization - # Skip gamma and beta weights of batch normalization layers. - reg_losses = [ - keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) - / tf.cast(tf.size(w), tf.float32) - for w in self.keras_model.trainable_weights - if "gamma" not in w.name and "beta" not in w.name - ] - self.keras_model.add_loss(tf.add_n(reg_losses)) - - # Compile - self.keras_model.compile( - optimizer=optimizer, loss=[None] * len(self.keras_model.outputs) - ) - - # Add metrics for losses - for name in loss_names: - if name in self.keras_model.metrics_names: - continue - layer = self.keras_model.get_layer(name) - self.keras_model.metrics_names.append(name) - loss = tf.reduce_mean( - layer.output, keepdims=True - ) * self.config.LOSS_WEIGHTS.get(name, 1.0) - self.keras_model.metrics_tensors.append(loss) - - def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): - """Sets model layers as trainable if their names match - the given regular expression. - """ - # Print message on the first call (but not on recursive calls) - if verbose > 0 and keras_model is None: - log("Selecting layers to train") - - keras_model = keras_model or self.keras_model - - # In multi-GPU training, we wrap the model. Get layers - # of the inner model because they have the weights. - layers = ( - keras_model.inner_model.layers - if hasattr(keras_model, "inner_model") - else keras_model.layers - ) - - for layer in layers: - # Is the layer a model? - if layer.__class__.__name__ == "Model": - print("In model: ", layer.name) - self.set_trainable(layer_regex, keras_model=layer, indent=indent + 4) - continue - - if not layer.weights: - continue - # Is it trainable? - trainable = bool(re.fullmatch(layer_regex, layer.name)) - # Update layer. If layer is a container, update inner layer. - if layer.__class__.__name__ == "TimeDistributed": - layer.layer.trainable = trainable - else: - layer.trainable = trainable - # Print trainable layer names - if trainable and verbose > 0: - log( - "{}{:20} ({})".format( - " " * indent, layer.name, layer.__class__.__name__ - ) - ) - - def set_log_dir(self, model_path=None): - """Sets the model log directory and epoch counter. - - model_path: If None, or a format different from what this code uses - then set a new log directory and start epochs from 0. Otherwise, - extract the log directory and the epoch counter from the file - name. - """ - # Set date and epoch counter as if starting a new model - self.epoch = 0 - now = datetime.datetime.now() - - # If we have a model path with date and epochs use them - if model_path: - # Continue from we left of. Get epoch and date from the file name - # A sample model path might look like: - # \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows) - # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux) - regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5" - m = re.match(regex, model_path) - if m: - now = datetime.datetime( - int(m.group(1)), - int(m.group(2)), - int(m.group(3)), - int(m.group(4)), - int(m.group(5)), - ) - # Epoch number in file is 1-based, and in Keras code it's 0-based. - # So, adjust for that then increment by one to start from the next epoch - self.epoch = int(m.group(6)) - 1 + 1 - print("Re-starting from epoch %d" % self.epoch) - - # Directory for training logs - self.log_dir = os.path.join( - self.model_dir, "{}{:%Y%m%dT%H%M}".format(self.config.NAME.lower(), now) - ) - - # Path to save after each epoch. Include placeholders that get filled by Keras. - self.checkpoint_path = os.path.join( - self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(self.config.NAME.lower()) - ) - self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}") - - def train( - self, - train_dataset, - val_dataset, - learning_rate, - epochs, - layers, - augmentation=None, - custom_callbacks=None, - no_augmentation_sources=None, - ): - """Train the model. - train_dataset, val_dataset: Training and validation Dataset objects. - learning_rate: The learning rate to train with - epochs: Number of training epochs. Note that previous training epochs - are considered to be done alreay, so this actually determines - the epochs to train in total rather than in this particaular - call. - layers: Allows selecting wich layers to train. It can be: - - A regular expression to match layer names to train - - One of these predefined values: - heads: The RPN, classifier and mask heads of the network - all: All the layers - 3+: Train Resnet stage 3 and up - 4+: Train Resnet stage 4 and up - 5+: Train Resnet stage 5 and up - augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) - augmentation. For example, passing imgaug.augmenters.Fliplr(0.5) - flips images right/left 50% of the time. You can pass complex - augmentations as well. This augmentation applies 50% of the - time, and when it does it flips images right/left half the time - and adds a Gaussian blur with a random sigma in range 0 to 5. - - augmentation = imgaug.augmenters.Sometimes(0.5, [ - imgaug.augmenters.Fliplr(0.5), - imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0)) - ]) - custom_callbacks: Optional. Add custom callbacks to be called - with the keras fit_generator method. Must be list of type keras.callbacks. - no_augmentation_sources: Optional. List of sources to exclude for - augmentation. A source is string that identifies a dataset and is - defined in the Dataset class. - """ - assert self.mode == "training", "Create model in training mode." - - # Pre-defined layer regular expressions - layer_regex = { - # all layers but the backbone - "heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", - # From a specific Resnet stage and up - "3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", - "4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", - "5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)", - # All layers - "all": ".*", - } - if layers in layer_regex.keys(): - layers = layer_regex[layers] - - # Data generators - train_generator = data_generator( - train_dataset, - self.config, - shuffle=True, - augmentation=augmentation, - batch_size=self.config.BATCH_SIZE, - no_augmentation_sources=no_augmentation_sources, - ) - val_generator = data_generator( - val_dataset, self.config, shuffle=True, batch_size=self.config.BATCH_SIZE - ) - - # Create log_dir if it does not exist - if not os.path.exists(self.log_dir): - os.makedirs(self.log_dir) - - # Callbacks - callbacks = [ - keras.callbacks.TensorBoard( - log_dir=self.log_dir, - histogram_freq=0, - write_graph=True, - write_images=False, - ), - keras.callbacks.ModelCheckpoint( - self.checkpoint_path, verbose=0, save_weights_only=True - ), - ] - - # Add custom callbacks to the list - if custom_callbacks: - callbacks += custom_callbacks - - # Train - log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate)) - log("Checkpoint Path: {}".format(self.checkpoint_path)) - self.set_trainable(layers) - self.compile(learning_rate, self.config.LEARNING_MOMENTUM) - - # Work-around for Windows: Keras fails on Windows when using - # multiprocessing workers. See discussion here: - # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009 - if os.name is "nt": - workers = 0 - else: - workers = multiprocessing.cpu_count() - - self.keras_model.fit_generator( - train_generator, - initial_epoch=self.epoch, - epochs=epochs, - steps_per_epoch=self.config.STEPS_PER_EPOCH, - callbacks=callbacks, - validation_data=val_generator, - validation_steps=self.config.VALIDATION_STEPS, - max_queue_size=100, - workers=workers, - use_multiprocessing=True, - ) - self.epoch = max(self.epoch, epochs) - - def mold_inputs(self, images): - """Takes a list of images and modifies them to the format expected - as an input to the neural network. - images: List of image matrices [height,width,depth]. Images can have - different sizes. - - Returns 3 Numpy matrices: - molded_images: [N, h, w, 3]. Images resized and normalized. - image_metas: [N, length of meta data]. Details about each image. - windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the - original image (padding excluded). - """ - molded_images = [] - image_metas = [] - windows = [] - for image in images: - # Resize image - # TODO: move resizing to mold_image() - molded_image, window, scale, padding, crop = utils.resize_image( - image, - min_dim=self.config.IMAGE_MIN_DIM, - min_scale=self.config.IMAGE_MIN_SCALE, - max_dim=self.config.IMAGE_MAX_DIM, - mode=self.config.IMAGE_RESIZE_MODE, - ) - molded_image = mold_image(molded_image, self.config) - # Build image_meta - image_meta = compose_image_meta( - 0, - image.shape, - molded_image.shape, - window, - scale, - np.zeros([self.config.NUM_CLASSES], dtype=np.int32), - ) - # Append - molded_images.append(molded_image) - windows.append(window) - image_metas.append(image_meta) - # Pack into arrays - molded_images = np.stack(molded_images) - image_metas = np.stack(image_metas) - windows = np.stack(windows) - return molded_images, image_metas, windows - - def unmold_detections( - self, detections, mrcnn_mask, original_image_shape, image_shape, window - ): - """Reformats the detections of one image from the format of the neural - network output to a format suitable for use in the rest of the - application. - - detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates - mrcnn_mask: [N, height, width, num_classes] - original_image_shape: [H, W, C] Original image shape before resizing - image_shape: [H, W, C] Shape of the image after resizing and padding - window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real - image is excluding the padding. - - Returns: - boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels - class_ids: [N] Integer class IDs for each bounding box - scores: [N] Float probability scores of the class_id - masks: [height, width, num_instances] Instance masks - """ - # How many detections do we have? - # Detections array is padded with zeros. Find the first class_id == 0. - zero_ix = np.where(detections[:, 4] == 0)[0] - N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0] - - # Extract boxes, class_ids, scores, and class-specific masks - boxes = detections[:N, :4] - class_ids = detections[:N, 4].astype(np.int32) - scores = detections[:N, 5] - masks = mrcnn_mask[np.arange(N), :, :, class_ids] - - # Translate normalized coordinates in the resized image to pixel - # coordinates in the original image before resizing - window = utils.norm_boxes(window, image_shape[:2]) - wy1, wx1, wy2, wx2 = window - shift = np.array([wy1, wx1, wy1, wx1]) - wh = wy2 - wy1 # window height - ww = wx2 - wx1 # window width - scale = np.array([wh, ww, wh, ww]) - # Convert boxes to normalized coordinates on the window - boxes = np.divide(boxes - shift, scale) - # Convert boxes to pixel coordinates on the original image - boxes = utils.denorm_boxes(boxes, original_image_shape[:2]) - - # Filter out detections with zero area. Happens in early training when - # network weights are still random - exclude_ix = np.where( - (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0 - )[0] - if exclude_ix.shape[0] > 0: - boxes = np.delete(boxes, exclude_ix, axis=0) - class_ids = np.delete(class_ids, exclude_ix, axis=0) - scores = np.delete(scores, exclude_ix, axis=0) - masks = np.delete(masks, exclude_ix, axis=0) - N = class_ids.shape[0] - - # Resize masks to original image size and set boundary threshold. - full_masks = [] - for i in range(N): - # Convert neural network mask to full size mask - full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape) - full_masks.append(full_mask) - full_masks = ( - np.stack(full_masks, axis=-1) - if full_masks - else np.empty(original_image_shape[:2] + (0,)) - ) - - return boxes, class_ids, scores, full_masks - - def detect(self, images, verbose=0): - """Runs the detection pipeline. - - images: List of images, potentially of different sizes. - - Returns a list of dicts, one dict per image. The dict contains: - rois: [N, (y1, x1, y2, x2)] detection bounding boxes - class_ids: [N] int class IDs - scores: [N] float probability scores for the class IDs - masks: [H, W, N] instance binary masks - """ - assert self.mode == "inference", "Create model in inference mode." - assert ( - len(images) == self.config.BATCH_SIZE - ), "len(images) must be equal to BATCH_SIZE" - - if verbose: - log("Processing {} images".format(len(images))) - for image in images: - log("image", image) - - # Mold inputs to format expected by the neural network - molded_images, image_metas, windows = self.mold_inputs(images) - - # Validate image sizes - # All images in a batch MUST be of the same size - image_shape = molded_images[0].shape - for g in molded_images[1:]: - assert ( - g.shape == image_shape - ), "After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes." - - # Anchors - anchors = self.get_anchors(image_shape) - # Duplicate across the batch dimension because Keras requires it - # TODO: can this be optimized to avoid duplicating the anchors? - anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) - - if verbose: - log("molded_images", molded_images) - log("image_metas", image_metas) - log("anchors", anchors) - # Run object detection - detections, _, _, mrcnn_mask, _, _, _ = self.keras_model.predict( - [molded_images, image_metas, anchors], verbose=0 - ) - # Process detections - results = [] - for i, image in enumerate(images): - ( - final_rois, - final_class_ids, - final_scores, - final_masks, - ) = self.unmold_detections( - detections[i], - mrcnn_mask[i], - image.shape, - molded_images[i].shape, - windows[i], - ) - results.append( - { - "rois": final_rois, - "class_ids": final_class_ids, - "scores": final_scores, - "masks": final_masks, - } - ) - return results - - def detect_molded(self, molded_images, image_metas, verbose=0): - """Runs the detection pipeline, but expect inputs that are - molded already. Used mostly for debugging and inspecting - the model. - - molded_images: List of images loaded using load_image_gt() - image_metas: image meta data, also returned by load_image_gt() - - Returns a list of dicts, one dict per image. The dict contains: - rois: [N, (y1, x1, y2, x2)] detection bounding boxes - class_ids: [N] int class IDs - scores: [N] float probability scores for the class IDs - masks: [H, W, N] instance binary masks - """ - assert self.mode == "inference", "Create model in inference mode." - assert ( - len(molded_images) == self.config.BATCH_SIZE - ), "Number of images must be equal to BATCH_SIZE" - - if verbose: - log("Processing {} images".format(len(molded_images))) - for image in molded_images: - log("image", image) - - # Validate image sizes - # All images in a batch MUST be of the same size - image_shape = molded_images[0].shape - for g in molded_images[1:]: - assert g.shape == image_shape, "Images must have the same size" - - # Anchors - anchors = self.get_anchors(image_shape) - # Duplicate across the batch dimension because Keras requires it - # TODO: can this be optimized to avoid duplicating the anchors? - anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) - - if verbose: - log("molded_images", molded_images) - log("image_metas", image_metas) - log("anchors", anchors) - # Run object detection - detections, _, _, mrcnn_mask, _, _, _ = self.keras_model.predict( - [molded_images, image_metas, anchors], verbose=0 - ) - # Process detections - results = [] - for i, image in enumerate(molded_images): - window = [0, 0, image.shape[0], image.shape[1]] - ( - final_rois, - final_class_ids, - final_scores, - final_masks, - ) = self.unmold_detections( - detections[i], - mrcnn_mask[i], - image.shape, - molded_images[i].shape, - window, - ) - results.append( - { - "rois": final_rois, - "class_ids": final_class_ids, - "scores": final_scores, - "masks": final_masks, - } - ) - return results - - def get_anchors(self, image_shape): - """Returns anchor pyramid for the given image size.""" - backbone_shapes = compute_backbone_shapes(self.config, image_shape) - # Cache anchors and reuse if image shape is the same - if not hasattr(self, "_anchor_cache"): - self._anchor_cache = {} - if not tuple(image_shape) in self._anchor_cache: - # Generate Anchors - a = utils.generate_pyramid_anchors( - self.config.RPN_ANCHOR_SCALES, - self.config.RPN_ANCHOR_RATIOS, - backbone_shapes, - self.config.BACKBONE_STRIDES, - self.config.RPN_ANCHOR_STRIDE, - ) - # Keep a copy of the latest anchors in pixel coordinates because - # it's used in inspect_model notebooks. - # TODO: Remove this after the notebook are refactored to not use it - self.anchors = a - # Normalize coordinates - self._anchor_cache[tuple(image_shape)] = utils.norm_boxes( - a, image_shape[:2] - ) - return self._anchor_cache[tuple(image_shape)] - - def ancestor(self, tensor, name, checked=None): - """Finds the ancestor of a TF tensor in the computation graph. - tensor: TensorFlow symbolic tensor. - name: Name of ancestor tensor to find - checked: For internal use. A list of tensors that were already - searched to avoid loops in traversing the graph. - """ - checked = checked if checked is not None else [] - # Put a limit on how deep we go to avoid very long loops - if len(checked) > 500: - return None - # Convert name to a regex and allow matching a number prefix - # because Keras adds them automatically - if isinstance(name, str): - name = re.compile(name.replace("/", r"(\_\d+)*/")) - - parents = tensor.op.inputs - for p in parents: - if p in checked: - continue - if bool(re.fullmatch(name, p.name)): - return p - checked.append(p) - a = self.ancestor(p, name, checked) - if a is not None: - return a - return None - - def find_trainable_layer(self, layer): - """If a layer is encapsulated by another layer, this function - digs through the encapsulation and returns the layer that holds - the weights. - """ - if layer.__class__.__name__ == "TimeDistributed": - return self.find_trainable_layer(layer.layer) - return layer - - def get_trainable_layers(self): - """Returns a list of layers that have weights.""" - layers = [] - # Loop through all layers - for l in self.keras_model.layers: - # If layer is a wrapper, find inner trainable layer - l = self.find_trainable_layer(l) - # Include layer if it has weights - if l.get_weights(): - layers.append(l) - return layers - - def run_graph(self, images, outputs, image_metas=None): - """Runs a sub-set of the computation graph that computes the given - outputs. - - image_metas: If provided, the images are assumed to be already - molded (i.e. resized, padded, and normalized) - - outputs: List of tuples (name, tensor) to compute. The tensors are - symbolic TensorFlow tensors and the names are for easy tracking. - - Returns an ordered dict of results. Keys are the names received in the - input and values are Numpy arrays. - """ - model = self.keras_model - - # Organize desired outputs into an ordered dict - outputs = OrderedDict(outputs) - for o in outputs.values(): - assert o is not None - - # Build a Keras function to run parts of the computation graph - inputs = model.inputs - if model.uses_learning_phase and not isinstance(K.learning_phase(), int): - inputs += [K.learning_phase()] - kf = K.function(model.inputs, list(outputs.values())) - - # Prepare inputs - if image_metas is None: - molded_images, image_metas, _ = self.mold_inputs(images) - else: - molded_images = images - image_shape = molded_images[0].shape - # Anchors - anchors = self.get_anchors(image_shape) - # Duplicate across the batch dimension because Keras requires it - # TODO: can this be optimized to avoid duplicating the anchors? - anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape) - model_in = [molded_images, image_metas, anchors] - - # Run inference - if model.uses_learning_phase and not isinstance(K.learning_phase(), int): - model_in.append(0.0) - outputs_np = kf(model_in) - - # Pack the generated Numpy arrays into a a dict and log the results. - outputs_np = OrderedDict([(k, v) for k, v in zip(outputs.keys(), outputs_np)]) - for k, v in outputs_np.items(): - log(k, v) - return outputs_np - - -############################################################ -# Data Formatting -############################################################ - - -def compose_image_meta( - image_id, original_image_shape, image_shape, window, scale, active_class_ids -): - """Takes attributes of an image and puts them in one 1D array. - - image_id: An int ID of the image. Useful for debugging. - original_image_shape: [H, W, C] before resizing or padding. - image_shape: [H, W, C] after resizing and padding - window: (y1, x1, y2, x2) in pixels. The area of the image where the real - image is (excluding the padding) - scale: The scaling factor applied to the original image (float32) - active_class_ids: List of class_ids available in the dataset from which - the image came. Useful if training on images from multiple datasets - where not all classes are present in all datasets. - """ - meta = np.array( - [image_id] - + list(original_image_shape) # size=1 - + list(image_shape) # size=3 - + list(window) # size=3 - + [scale] # size=4 (y1, x1, y2, x2) in image cooredinates - + list(active_class_ids) # size=1 # size=num_classes - ) - return meta - - -def parse_image_meta(meta): - """Parses an array that contains image attributes to its components. - See compose_image_meta() for more details. - - meta: [batch, meta length] where meta length depends on NUM_CLASSES - - Returns a dict of the parsed values. - """ - image_id = meta[:, 0] - original_image_shape = meta[:, 1:4] - image_shape = meta[:, 4:7] - window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels - scale = meta[:, 11] - active_class_ids = meta[:, 12:] - return { - "image_id": image_id.astype(np.int32), - "original_image_shape": original_image_shape.astype(np.int32), - "image_shape": image_shape.astype(np.int32), - "window": window.astype(np.int32), - "scale": scale.astype(np.float32), - "active_class_ids": active_class_ids.astype(np.int32), - } - - -def parse_image_meta_graph(meta): - """Parses a tensor that contains image attributes to its components. - See compose_image_meta() for more details. - - meta: [batch, meta length] where meta length depends on NUM_CLASSES - - Returns a dict of the parsed tensors. - """ - image_id = meta[:, 0] - original_image_shape = meta[:, 1:4] - image_shape = meta[:, 4:7] - window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels - scale = meta[:, 11] - active_class_ids = meta[:, 12:] - return { - "image_id": image_id, - "original_image_shape": original_image_shape, - "image_shape": image_shape, - "window": window, - "scale": scale, - "active_class_ids": active_class_ids, - } - - -def mold_image(images, config): - """Expects an RGB image (or array of images) and subtracts - the mean pixel and converts it to float. Expects image - colors in RGB order. - """ - return images.astype(np.float32) - config.MEAN_PIXEL - - -def unmold_image(normalized_images, config): - """Takes a image normalized with mold() and returns the original.""" - return (normalized_images + config.MEAN_PIXEL).astype(np.uint8) - - -############################################################ -# Miscellenous Graph Functions -############################################################ - - -def trim_zeros_graph(boxes, name="trim_zeros"): - """Often boxes are represented with matrices of shape [N, 4] and - are padded with zeros. This removes zero boxes. - - boxes: [N, 4] matrix of boxes. - non_zeros: [N] a 1D boolean mask identifying the rows to keep - """ - non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool) - boxes = tf.boolean_mask(boxes, non_zeros, name=name) - return boxes, non_zeros - - -def batch_pack_graph(x, counts, num_rows): - """Picks different number of values from each row - in x depending on the values in counts. - """ - outputs = [] - for i in range(num_rows): - outputs.append(x[i, : counts[i]]) - return tf.concat(outputs, axis=0) - - -def norm_boxes_graph(boxes, shape): - """Converts boxes from pixel coordinates to normalized coordinates. - boxes: [..., (y1, x1, y2, x2)] in pixel coordinates - shape: [..., (height, width)] in pixels - - Note: In pixel coordinates (y2, x2) is outside the box. But in normalized - coordinates it's inside the box. - - Returns: - [..., (y1, x1, y2, x2)] in normalized coordinates - """ - h, w = tf.split(tf.cast(shape, tf.float32), 2) - scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0) - shift = tf.constant([0.0, 0.0, 1.0, 1.0]) - return tf.divide(boxes - shift, scale) - - -def denorm_boxes_graph(boxes, shape): - """Converts boxes from normalized coordinates to pixel coordinates. - boxes: [..., (y1, x1, y2, x2)] in normalized coordinates - shape: [..., (height, width)] in pixels - - Note: In pixel coordinates (y2, x2) is outside the box. But in normalized - coordinates it's inside the box. - - Returns: - [..., (y1, x1, y2, x2)] in pixel coordinates - """ - h, w = tf.split(tf.cast(shape, tf.float32), 2) - scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0) - shift = tf.constant([0.0, 0.0, 1.0, 1.0]) - return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32) diff --git a/spaces/innnky/soft-vits-vc/transforms.py b/spaces/innnky/soft-vits-vc/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/innnky/soft-vits-vc/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Atdevenglishtobengalidictionarypdffreedownload TOP.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Atdevenglishtobengalidictionarypdffreedownload TOP.md deleted file mode 100644 index f7aaa6aa4b2f73691130a72a8ef47be664db14d4..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Atdevenglishtobengalidictionarypdffreedownload TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -

    atdevenglishtobengalidictionarypdffreedownload


    Download ———>>> https://urlin.us/2uEygh



    -
    -Selected works of britney mccants follow atdevenglishtobengalidictionarypdffreedownload. Buddha dhammapada. Pdf tartuce direito das coisas minhateca ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocad 2011 X64 Portable 19.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Autocad 2011 X64 Portable 19.md deleted file mode 100644 index 65d978a5d9bf297e37a79346e377bc1a956a4bb2..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Autocad 2011 X64 Portable 19.md +++ /dev/null @@ -1,16 +0,0 @@ -

    autocad 2011 x64 portable 19


    DOWNLOAD ✺✺✺ https://urlin.us/2uExkq



    - -This article provides a basic introduction of the Autodesk Inventor project of Altair. It provides the essentials of the 3D modeling of the Inventor project, its advantages and disadvantages. In addition, it provides a comprehensive introduction of the 3D models of the Inventor project and their respective editing and plotting functions. - -The Autodesk Lab Manage – Building environment provides the building user with the ability to plan, build, and manage dynamic building environments. Part of a suite of Autodesk building design and analysis software, Lab Manage – Building Environment allows you to work with Building Designer, Building Constructor, Building Manager, and Building Link to manage building-related activities, including CAD, for use with Autodesk Building Design Suite. Autodesk Lab Manage – Building Environment allows for a single installation of multiple applications, and offers a fast, convenient, and productive method for the building user to complete a variety of tasks. - -For professionals, the Autodesk Design Review Utility helps you manage your business workflow with a single user interface. Review workspaces that include your business plan, marketing strategy, and corporate identity. You can quickly and easily review, comment on, and save different versions of your projects. Start and manage your workflows with dynamic project overview, work item tracking, file type-aware filtering and automatic comment tracking. Commenting on your work is easy, with comment tags that are unique to the project. - -The Autodesk University Training Blog is a free blog by our team of Autodesk Certified Instructors that will give you insights to our classes and tips and tricks on using Autodesk. We encourage everyone to check back regularly as the blog will keep expanding to provide more tips and tricks for you. - -Autodesk provides training materials and courses in 3D design, modeling and animation for beginners to professionals. You can find a broad selection of courses on the Autodesk University website and at the store. The course catalog is constantly growing and is accompanied by news, teaching tips, reviews and recommendations for Autodesk products.CT angiography of abdominal and pelvic arterial vascular pathology. - -Computed tomographic (CT) 4fefd39f24
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Jilebi Malayalam Movie Download Hd.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Jilebi Malayalam Movie Download Hd.md deleted file mode 100644 index 15536e56e1196dd1ce0ea8fbe0e2390b3754e246..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Jilebi Malayalam Movie Download Hd.md +++ /dev/null @@ -1,46 +0,0 @@ -
    -

    Jilebi Malayalam Movie Download Hd: A Comedy of Contrasts

    - -

    Jilebi is a 2015 Malayalam comedy film directed by Arun Shekhar and starring Jayasurya, Remya Nambeesan, Vijayaraghavan, and K.P.A.C. Lalitha. The film is a remake of the 2013 Kannada film Jilebi, which itself was inspired by the 2009 Malayalam short film Jilebi.

    - -

    The film tells the story of Sreekuttan (Jayasurya), a simple village guy who lives happily with his farm and his crops. His life takes a turn when his sister and brother-in-law leave their two children with him for a few days. The children, Pooja (Gauri) and Rahul (Sidharth), are spoiled city brats who have no respect for Sreekuttan or his lifestyle.

    -

    Jilebi Malayalam Movie Download Hd


    Download Filehttps://urlin.us/2uEx9f



    - -

    Sreekuttan tries to bond with them and teach them some values, but they only make fun of him and create trouble for him. Meanwhile, Sreekuttan also falls in love with Sridevi (Remya Nambeesan), a school teacher who comes to his village. However, he faces competition from Prakashan (Dharmajan Bolgatty), a local goon who also likes Sridevi.

    - -

    How will Sreekuttan deal with the children and win Sridevi's heart? Will the children learn to appreciate Sreekuttan and his village? Will Sreekuttan find happiness in his life? These are some of the questions that Jilebi Malayalam Movie Download Hd answers in a humorous and heartwarming way.

    - -

    Why you should watch Jilebi Malayalam Movie Download Hd?

    - -

    Jilebi Malayalam Movie Download Hd is a fun-filled comedy that will make you laugh and smile. The film has a simple but engaging plot that explores the contrast between the urban and rural lifestyles and cultures. The film also has a message about family values, love, and respect.

    - -

    The film is well-acted by the cast, especially Jayasurya who delivers a convincing performance as the innocent and naive Sreekuttan. Remya Nambeesan is charming as the sweet and sensible Sridevi. The child actors Gauri and Sidharth are also impressive as the naughty and mischievous Pooja and Rahul. The supporting actors Vijayaraghavan, K.P.A.C. Lalitha, Dharmajan Bolgatty, Nandu, Sunil Sukhada, etc. add to the comedy quotient of the film.

    - -

    The film is well-directed by Arun Shekhar who maintains a good pace and balance between comedy and emotion. The film is well-written by Arun Shekhar and Raveendran who infuse humor and wit in the dialogues and situations. The film is well-shot by Alby Antony who captures the beauty of the village scenery. The film is well-edited by Lijo Paul who keeps the film crisp and tight. The film is well-scored by Bijibal who composes some catchy and melodious songs for the film.

    - -

    How to download Jilebi Malayalam Movie Download Hd?

    - -

    If you want to download Jilebi Malayalam Movie Download Hd, you have several options:

    -

    - -
      -
    1. You can watch it online on Sun NXT, a streaming platform that offers a wide range of Malayalam movies and shows. You can find Jilebi malayalam on Sun NXT here: https://sunnxt.com/malayalam-movie-jilebi-2015/detail/59432.
    2. -
    3. You can download it from Tormalayalam New, a website that offers Malayalam movies in HD quality. You can find Jilebi (2015) Malayalam Movies HD on Tormalayalam New here: https://tormalayalam.org/malayalam/jilebi-2015/.
    4. -
    5. You can download it from SoundCloud, a platform that allows you to play audiobooks and excerpts on desktop and mobile devices. You can find Jilebi Malayalam Movie Download Hd on SoundCloud here: https://soundcloud.com/shari-nicholson/jilebi-malayalam-movie-download-hd.
    6. -
    - -

    However, before you download Jilebi Malayalam Movie Download Hd, you should be aware of the legal and ethical issues involved in downloading movies from unauthorized sources. You should respect the rights of the filmmakers and producers who have invested their time, money, and effort in making this movie. You should also avoid downloading movies from websites that may contain viruses or malware that can harm your device or data.

    - -

    Conclusion

    - -

    Jilebi Malayalam Movie Download Hd is a comedy film that will entertain you with its humor, emotion, and message. The film has a talented cast, a good director, a good writer, a good cinematographer, a good editor, and a good composer who have made this film a delightful watch.

    - -

    If you want to watch Jilebi Malayalam Movie Download Hd

    -

    , you can choose from the options mentioned above. However, you should also respect the rights of the filmmakers and producers and avoid downloading movies from unauthorized sources.

    - -

    Jilebi Malayalam Movie Download Hd is a movie that will make you laugh and smile with its comedy of contrasts. It is a movie that you can enjoy with your family and friends.

    - -

    So, what are you waiting for? Download Jilebi Malayalam Movie Download Hd today and have a jolly good time!

    -

    This is the end of the article. Thank you for reading.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/colors.py b/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/colors.py deleted file mode 100644 index 6ec81e197ef2b918a352d04f57337b956137b0e6..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/colors.py +++ /dev/null @@ -1,16 +0,0 @@ -from skimage.exposure import match_histograms -import cv2 - -def maintain_colors(prev_img, color_match_sample, mode): - if mode == 'Match Frame 0 RGB': - return match_histograms(prev_img, color_match_sample, multichannel=True) - elif mode == 'Match Frame 0 HSV': - prev_img_hsv = cv2.cvtColor(prev_img, cv2.COLOR_RGB2HSV) - color_match_hsv = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2HSV) - matched_hsv = match_histograms(prev_img_hsv, color_match_hsv, multichannel=True) - return cv2.cvtColor(matched_hsv, cv2.COLOR_HSV2RGB) - else: # Match Frame 0 LAB - prev_img_lab = cv2.cvtColor(prev_img, cv2.COLOR_RGB2LAB) - color_match_lab = cv2.cvtColor(color_match_sample, cv2.COLOR_RGB2LAB) - matched_lab = match_histograms(prev_img_lab, color_match_lab, multichannel=True) - return cv2.cvtColor(matched_lab, cv2.COLOR_LAB2RGB) \ No newline at end of file diff --git a/spaces/jackli888/stable-diffusion-webui/modules/textual_inversion/logging.py b/spaces/jackli888/stable-diffusion-webui/modules/textual_inversion/logging.py deleted file mode 100644 index b2c01f0a4ef6666c0c2e1147dbee9d6850d277c0..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/textual_inversion/logging.py +++ /dev/null @@ -1,24 +0,0 @@ -import datetime -import json -import os - -saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "gradient_step", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"} -saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"} -saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"} -saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet -saved_params_previews = {"preview_prompt", "preview_negative_prompt", "preview_steps", "preview_sampler_index", "preview_cfg_scale", "preview_seed", "preview_width", "preview_height"} - - -def save_settings_to_file(log_directory, all_params): - now = datetime.datetime.now() - params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")} - - keys = saved_params_all - if all_params.get('preview_from_txt2img'): - keys = keys | saved_params_previews - - params.update({k: v for k, v in all_params.items() if k in keys}) - - filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json' - with open(os.path.join(log_directory, filename), "w") as file: - json.dump(params, file, indent=4) diff --git a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.h b/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.h deleted file mode 100644 index 2c403e3f275f472315662321cad54dd0dbc56d00..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/stylegan3/torch_utils/ops/filtered_lrelu.h +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include - -//------------------------------------------------------------------------ -// CUDA kernel parameters. - -struct filtered_lrelu_kernel_params -{ - // These parameters decide which kernel to use. - int up; // upsampling ratio (1, 2, 4) - int down; // downsampling ratio (1, 2, 4) - int2 fuShape; // [size, 1] | [size, size] - int2 fdShape; // [size, 1] | [size, size] - - int _dummy; // Alignment. - - // Rest of the parameters. - const void* x; // Input tensor. - void* y; // Output tensor. - const void* b; // Bias tensor. - unsigned char* s; // Sign tensor in/out. NULL if unused. - const float* fu; // Upsampling filter. - const float* fd; // Downsampling filter. - - int2 pad0; // Left/top padding. - float gain; // Additional gain factor. - float slope; // Leaky ReLU slope on negative side. - float clamp; // Clamp after nonlinearity. - int flip; // Filter kernel flip for gradient computation. - - int tilesXdim; // Original number of horizontal output tiles. - int tilesXrep; // Number of horizontal tiles per CTA. - int blockZofs; // Block z offset to support large minibatch, channel dimensions. - - int4 xShape; // [width, height, channel, batch] - int4 yShape; // [width, height, channel, batch] - int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused. - int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. - int swLimit; // Active width of sign tensor in bytes. - - longlong4 xStride; // Strides of all tensors except signs, same component order as shapes. - longlong4 yStride; // - int64_t bStride; // - longlong3 fuStride; // - longlong3 fdStride; // -}; - -struct filtered_lrelu_act_kernel_params -{ - void* x; // Input/output, modified in-place. - unsigned char* s; // Sign tensor in/out. NULL if unused. - - float gain; // Additional gain factor. - float slope; // Leaky ReLU slope on negative side. - float clamp; // Clamp after nonlinearity. - - int4 xShape; // [width, height, channel, batch] - longlong4 xStride; // Input/output tensor strides, same order as in shape. - int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused. - int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. -}; - -//------------------------------------------------------------------------ -// CUDA kernel specialization. - -struct filtered_lrelu_kernel_spec -{ - void* setup; // Function for filter kernel setup. - void* exec; // Function for main operation. - int2 tileOut; // Width/height of launch tile. - int numWarps; // Number of warps per thread block, determines launch block size. - int xrep; // For processing multiple horizontal tiles per thread block. - int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants. -}; - -//------------------------------------------------------------------------ -// CUDA kernel selection. - -template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); -template void* choose_filtered_lrelu_act_kernel(void); -template cudaError_t copy_filters(cudaStream_t stream); - -//------------------------------------------------------------------------ diff --git a/spaces/jarvis1997/fr_demo1/README.md b/spaces/jarvis1997/fr_demo1/README.md deleted file mode 100644 index 9af54dca9f1956d33877bf7df09b34c2d6ddeeaf..0000000000000000000000000000000000000000 --- a/spaces/jarvis1997/fr_demo1/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Animation Using Thin Plate Spline Motion Model -emoji: 👁 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false -duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jbilcke-hf/LifeSim/src/lib/utils.ts b/spaces/jbilcke-hf/LifeSim/src/lib/utils.ts deleted file mode 100644 index ec79801fe9cdd7711f6dbef26678a134c634a8be..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/LifeSim/src/lib/utils.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { type ClassValue, clsx } from "clsx" -import { twMerge } from "tailwind-merge" - -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/app/server/actions/censorship.ts b/spaces/jbilcke-hf/ai-clip-factory/src/app/server/actions/censorship.ts deleted file mode 100644 index 09a40ca21bb8a25b705d0d86f1df7b073f4c1c97..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/app/server/actions/censorship.ts +++ /dev/null @@ -1,224 +0,0 @@ - -// I don't want to be banned by Replicate because bad actors are asking -// for some naked anime stuff or whatever -// I also want to avoid a PR scandal due to some bad user generated content - -import { computeSecretFingerprint } from "@/lib/computeSecretFingerprint" - -// those keywords have been generated by looking at the logs of the panorama and the AI Comic Factory -// those are real requests some users tried to attempt.. :| - -const chickens = [ - "fcb4dacbd99b21368c50f29c1d47071c87cf2225ab9192282c785460391cd365", - "68840b60ac27eacaa7afe17e898d3c4a2dc71acff8c74d6782c1bcaafd14963d", - "67f745224fd6e1a7a3a244514d5807fcc994cbb62ca4ec8fa44cd14244a515ae", - "681fea565117808c6dbe002520d2cfeeb3e5c67e68630afb4a453449a9da587b", - "2f3d913b3db9e15a930aac43eb2d6fe8817db8e4bcf37794bf0227b06b718d1b", - "922a700b807e4994df82eba2b48a6ac131fe8d8d1035d06b3592d622fb232161", - "cb69ee6774eafcc720adb1f689d28acbb9f47998cbea0299ec66a58dedf91c37", - "bc1d4ab3fd611d6931e7fbaef942555a3a5e836dc7bacbf1939183a96e41a03b", - "cf79a5ba29b984a1d35c4cafe917328838af7d9b10e00f4ce91298317212b2b9" -] - -const ducks = [ - "275123e48637924d00186a87c516116aa576dce3049460bceca691899f3cac90", - "9b7cec7230a2fcdeb8c97597865ca61f5e66ec55e37aaea85cbe934f593856c0", - "2812acc35c3aa389afb3054d36c69f5f9b6cb4ef76d45cf26d9318feee8489db", - "4a6d9b2bcf6e40c84e243861aa7df26773f442f75402e33094be15132eae87cf", - "1c52cb20c0cbc76349fa63232b982bd394cf0850ebc17240dcf33c19fb15a26d", - "e1d4de9b8d464d7da07c276b63a42c1c9922224f0a6cab6b0826427ce4a7461a", - "0be3174bfb1a48a65875c2f035b1ae14fbc8f232f55785018de0cfe2132fa952", - "0f174769641b2e5d2c79b5a83e8ef91e004f6f3e62531cd70cfdff02159268cb", - "e9fb8ae8ff720acd91025229478a21e43e8e976e30119a76c293201adf572736", - "f65a0dc0e07b5d084ff24c69dcdb953f7b57101d2ebb716d4dfb5963076ef807", - "2bf38af1646489c2c086f811d082054cd29e23fa7bb5c525396bec01b3ab688e" -] - -const cats = [ - "fcffc3e997d952007d1b902a9cf40b750ba4a410ac65bfd95475996bf51359e4", - "3172a5fa159754d703489dfba5af520b8ace107cdf170f4c4cb38a6797aa163f", - "500012dbff4498a9c4513369d6b9b373fab9330ffd2cb1e622294043cc21b610", - "84e3a8d34ee7d0c8e7a2926dd1acad46a0b66b9d27725b3a7e5053550f490301" -] - -const roasted = [ - "a2bfbce0046c9a52a0eabf98f73e0f8e09959970431fc892ebdb4e1c97031b50", - "6eca1adf06851f99e9cdfbb496c27d46ff81106903d11f3346a146e96082b016", - "49a124c9ed6fbbad4105b3657dc25de369bcafb9d6787f610c08f584cd607d0f", - "c3afb59420c812cbc7c8f57ad3e8d79407f10106a99f829aa65316c99d0b29c4", - "2b808858836a5c205080f5b93201ef92e098cff931d8de6d9f20dc722997d077", - "07bef89d1a7d63c9c5ed64ba0f73d6cff689811847c2e20c8b3fbfb060e1d64e", - "baeb994922d5473f534aa54322d83effe74c6c4dac807e6b523a677d7acdc17b", - "ea4735a879edd5cc94ca7db26edd5a970df69a41f0009d3444486647e44175af", - "f2412249030454cd13ac6f7965871d924c16daacda0123de81892adb19ce49ac", - "9958c56e12bab8549cf752bcd8bec4ac36cf79c404b1faf5611f057bb71bc0e1", - "76cdade0b3d4caf0888f60318a5cbca00f830a3b0bf37735fc64fdaeb67c34d3", - "1bf53c97869e1ea89bda19da64a9173d48fe4ec823e949e2c898f8abb3fbf457", - "1bf53c97869e1ea89bda19da64a9173d48fe4ec823e949e2c898f8abb3fbf457", - "3d7f973fab8f4a19c0a3e59efe970ed7bd55a1cb795752d9cbe3c19e8a7d81ec", - "62d6c28013b3852fd0b554f1f0d95dfcf6bfc9ce68e04b772aa748dddbe8afa0", - "37694dbebbb59f4289834a582b11db005c402e59ee9f8634772f3fc312ec8ea3", - "a024cb788656f9bda41d572c55e1a5e912a6067d99f2c8630eb0d44537692ec7", - "8175b29dc4d1c6d57b9d986f2f9ebbddf1d4ba597a35962ea246e4a5d4d0b850", - "148767417d2efc77acb2da36f93bc5015a6977ab449ac4fcf07855c3819bbc50", - "598e316d62873f658a35be0b2c00d30426033fa7110874488a0a3b6fb28fe055", - "2f285382bc482be84067745f99d1c0d50781f451823b62706c5b5676dbacbf89", - "8fdd4e08891194ff67e489c042ea08be24e436d97022e5c8241e8f90512bdde2", - "6a73e93289e6b05f767d1a980592588d4409c1c692365b06ad0b1981d183123f", - "42d5a173c8fb1926ea10410f4e4f8bc84dca971305ae7ccfe1c8a600343af89e", - "27f291178bfcdfdc90aaf220dcc4c4b976915fcd2700af6d203dd7405f81c695", - "f0e023194463f09a7ae09c476a5f9d50f62cc18381fcb5bde2185d93a24fe68d", - "6d273ae558f6be0fa67bbefe281b25d3312ad94e59972782557459e3dfd4e458", - "620e96eae4f3e88cbe0770292b33724c5df3866d83f39df6380441f7271c80e2" -] - -const banned = [ - "3ec2ad1877284220d0515bdb8716c9984143f72f9dcadb5396f0ad8f0ba934fb", - "e7db7d13264f1fc484d07e5ae960a702d50ab004055210c9085b6a77a77e182e", - "03398ec4bdce4de69c76caf151cf3e1db5454208e88da147d607327a56d9ec27", - "f27229416e1e11b9e7fe28f81fedff6cba2f6cd21eb8ac0d78e74908fbef1bd5", - "8175b29dc4d1c6d57b9d986f2f9ebbddf1d4ba597a35962ea246e4a5d4d0b850", - "e960effc886938d2668eb534853812e225b85155f7710eae26588de5b53ac021", - "d4610b2275d30ba419844fc84a6f2b5c0eafdd708395076f510e5e4503eec112", - "8b6cc149eb1ab3d92c43ba3bfa828e0b1ef943280b4eab4c5b9253766459a79e", - "cb345cd0e5c7126dfeca689e40cf4793af1d5af2a1aabd32013d30332b128fae", - "d13d07be3208feb83fb20c1d0a89c47d580fac3e1909a696bc07ecd419b22455", - "3c5c25b5b049f5d1bb1fcb32ad09f43c90d696f57bec456e53cde5a83e56bc06", - "68f4a683152587cd54990465d83b5701e57c63359c0f347cb1a81d60be3e3cd9", - "efd9c0a391ee93251046a58326d1b21b33fe21d71a3fb1855b9048ade53df77c", - "c82e151dd07b5ab5f3790c7e18f22d4fc81c339facd116fef4d21360fcfd21c5", - "f59a1abfa25e4cf546c31c81e94f9bae377c79b4e4b199179fa97a328b1a4847", - "1ae56aafb3a9c846294f842ca5a06c9e4fcb18f550878fbed1e7e10ae1af635e", - "8f160c6fd8ccc3fb2a371a4b52748f0bd030766627c4322e2911fe82f6b10497", - "8a05d4869d9d6ce388c6cd2db13ca12b88097b90f9be027d5ffaaa467c7a6e5e", - "0c475212a608138244c5fc150b1563e5ef79c516234fd78dcd5993f726c359a0", - "df17388805f99f2ff3e5ae97a0f55e5c927eb47f17ca65822bf8c88f02bac3dd", - "86c3355d1bd581cdf7306729d8dd0ee9b7a317b9cfd6d7a6f5fad9c0dafe2167", - "23a2484cd420c9ffbfcc2c0075a9b330664450ced1fc64ab6a65e278086b8c6e", - "fb4cabe709b62eea1b4cc0030c76f5e4a43ee677ce19124e8e7bafa86c78ab66", - "d99c26daee85f7dc81c46c061a5874cff7179ed72d884d2316d664d36ffe7ab5", - "b93c38af5aa221d76c60ee3eb762efee0cdb0daf29ceb235b7dda6d46c06490d", - "8cf6c8765dc757319461dd9a785e77c201b8e5a604d36b817cd987c6a5e62500", - "f4a1cb290745717f86c3cee30fc324c0d80a9945fcbc7bbeb010579f58792f1e", - "7c87c47c42fc983119551342be9ddd5b32e530c0504ccdbbaa1e12b1d9f1bbcb", - "d04fad4f21d030da7a1301afbf480ef6246eb7bbf0f26e31865b2e015a25f747", - "d685ff22fb9da01ee949db212770729603989850864ef7a7085e1f086cfa7deb", - "533b90588d9ccf7967da54691f575e9fd4926c6e0b5fd94a47b932bcea270bee", - "9c2d61f28f5bb7f3f1dc9122be64cda8a428b46ce68b70120da4c41dba96ba4c", - "5d4b1a3eebe64dfa631d0e3b084bd96ee9364c3669269f838ca17a4900276264", - "d56f56413b9679fc0820a2c0237224ded8554c61fab8959c174123c8b68ba029", - "323a9ab60739726070d615ff3a05d7ff6bb6e3c4dd9ff16ce24f253ecd7b8851", - "975c6739de7d4999db15972f707f5f4e95649275f1c0c48e895b8c537e8638ec", - "67ee26eb9e1c1c7124797321b02bca90a19c18171782917cd4a487b722484dce", - "6df5aa7b72a4e6e3fb726489ff1437daa5752047507f4da912680b1d6647c7d6", - "b0864805364359e8c5810c233b1bf2c74dedce9055ae5f7680ba05b4e39db8e2", - "a8f841472ecffdd6266151148320c8e36847a24ead9d3338e0313b075c16649d", - "f9b127cd90e85b0ff68dd220361671663f0154b2b827f1f7ea797b020ca0018c", - "d5c20e9a1ecf01c82da24c514d867498b3e5f522adc1523ce29404a6563641d5", - "241022b49d7c0aba24a61eea1137a804f36e4bcb47af42950275baac9b4e7aac", - "fc99a70e17b6c86ef1b537654b0f50353567a7b59912c3ba955f3fca4d1ea696", - "255306e968009003d295cb2a7256f27bfcdb5d1743bf4d9f2aa4b8adf1a7734d", - "048c7b709763dd9c43794d241c369f0abcb079d546ddcbbba9968a1ed1da7ed7", - "520cbfeef3e4c405d79478eedccb97a4d476be585626dd2b1c53292797491bc7", - "f9f28a7ae7e8b1719b350a04dc087a4b8e33478d109ceeef6ba892b32d1105c9", - "d177f1bfe603647ef4c1c0e6f1a7172081fb9bbc2ea859705949f2c5aa5d4f22", - "302feef2c09247fbd23789581f7f5e2219f88ae0a937880954938573c2a52a84", - "99edd6f57b864873835f16f19c805dd94bed9da8967b84e3a62782f106d9ebcc", - "e75e5f01dcd8351c9553e89558085bd68e6feb295dee5d8da0c9b43ee303ce36", - "135e52a026aea9d2e12de358a85e05cf21121a18269269b7c62678c3bc846f5b", - "28e5b2d3eb5f1ef4cc7b570878b03acf303a6ca4ca95893591e0fb943b0beab0", - "a26b26340f8d0363633490556d20bcc250726d10e1431eb8c22d6b1ff3f2b14a", - "27e4ddde96ec6a1dbe1cf12d79448b3e72f144944c15b299629542d1b65fbabf", - "efd9c0a391ee93251046a58326d1b21b33fe21d71a3fb1855b9048ade53df77c", - "6d505fcce416c26a606878aab4d249a034ba2a9846cb1f883e0f9e3fb76ba6da", - "3a37b8a1b72f9bca51233536d50f9c8d33a787434684787871e0049c82347cda", - "16f9b451184a7c3148344c7d0315f5312ca20553d2271912ecaad91810d977e6", - "7406537eb74d1885bd05e191228de313b13702a64d90ae1736c6377b25ab579a", - "7e4d1395ae18980015cab16c85ffa20b4cb90a2db594126e893d0f7ac6eecaa8", - "ba813ee6c25698f0f68a07121d38bb47c9aa404c1ab0a6e767595cb75e1747b8", - "6586c93f3ece83e01ecc1eb84a7711e7975826a388d478a009468ea0ed9dc03e", - "8960174c74d86e03ae88fb6774580170e49952f2286d960be08c556bbd0dda95", - "4d611454369aa1a4e2b7eed1734fac5d480f08fb86b87a162967e416370f2a8e", - "59d48440f85eabf565fe8d3bc6b973ba64c70df3b36b0511e0e67ceca91762b3", - "cd926926e2af74e43d1a6a420a7e1933b78662320477a3c018b2711d8765e339", - "80e90057df6a59823f51aafac36ed5bc4e5ac26d675d9c1467501590c82f12d4", - "a9cf28b869b70e258adde5639a048f866ec86f8f3f3d53bfc960b86aa6da9239", - "cc2adbf8ac0cddeefa304d7b20f14a7e047a4b2299cc5e8f898f5c59660bd964", - "92a150a46146e9d3f84899cf15e12514af684e7ee18d7add782ddd4f4a15ef18", - "d9b2e84ef6dc0ce449357d52c9095f69b173a1b848ea2921199d33b0ec10024a", - "a9329a7e4d367a0135c1ca86c6ce5ecabcc26529235229d71b6bf991f7689e21", - "8f160c6fd8ccc3fb2a371a4b52748f0bd030766627c4322e2911fe82f6b10497", - "620e96eae4f3e88cbe0770292b33724c5df3866d83f39df6380441f7271c80e2", - "cafa3481fa3c45ed1e55cd0129c12b477eeab5aa3d6da20cae6d6292f19b0e6d", - "be07994e9a83aa3689e79b6e96123676ccc4fa29f523c28c750c6d60505531ee", - "f6498069768cd3aa79b2b0c91879694f05a259c8ee4a6bb343f0435f74eb1b53", - "c9b6b26cb3a694eb78fcac0a14ad18d46d50907186a9add41022d31d191b2b65" -] - -const young = [ - "ffdf66787b4a33b78b18c18822e334cfe2c8406caf442851deef451bd43140a1", - "858f22219afc4b32a7ba9a27a213d7f495e77c3cceed8147eae5282bf3e23d39", - "8c3c46df84ace3d58d4ce0fbc513017986b33c6002ae369d9f7dd1f892a898cb", - "66caa22b9483fdf026ce67de61067d81535a7c9b3169cbc5c2a455ac8dcc7bec", - "76893047b1eff9fadc7be07b13adb5aaed9c73bcdeea46ee07098605e2c7ff76", - "526cb848754e2baaa17376a5693d90ba3f69f71fd2a866f22876ac8a075849a7", - "f59c38e31d0f64dc1bfcdf34451723bc1a65570e209e5496c8d1d7f6d3d649db", - "e013a67e275c62c1402ccbbb11ad14afb8b8a82318a44c07d67599ed5ac874de", - "3bef34219fb07f867ecbff4d6748f598d6cc0761e17dd0d431ee1f4ec3281374", - "8211bf5f613fac06cd5d074d34c16dfacc9367c8afaa6ad3aff99d145e5221be" -] - -const getFingerprint = (word: string) => { - return computeSecretFingerprint( - word.toLocaleLowerCase().replaceAll(/[^a-zA-Z0-9]/gi, "") - ) -} - -const encode = (list: string[]) => { - console.log(JSON.stringify( - list.sort((a, b) => (b.length - a.length)) - .map(item => getFingerprint(item)), null, 2)) -} - -/* -encode([ -]) -*/ - -export const filterOutBadWords = (sentence: string) => { - if (process.env.ENABLE_CENSORSHIP !== "true") { return sentence } - - let requireCensorship = false - - const words = sentence.replaceAll(/[^a-zA-Z0-9]/gi, " ").replaceAll(/\s+/gi, " ").trim().split(" ") - - const sanitized = words.map(word => { - const fingerprint = getFingerprint(word) - - let result: string = word - // some users want to play it smart and bypass our system so let's play too - if (chickens.includes(fingerprint)) { - result = "chicken" - } else if (ducks.includes(fingerprint)) { - result = "duck" - } else if (cats.includes(fingerprint)) { - result = "cat" - } else if (roasted.includes(fingerprint)) { - result = "penguin" - } else if (young.includes(fingerprint)) { - result = "woman" - } else if (banned.includes(fingerprint)) { - result = "_BANNED_" - } - - if (result !== word) { - requireCensorship = true - } - return result - }).filter(item => item !== "_BANNED_").join(" ") - - // if the user didn't try to use a bad word, we leave it untouched - // he words array has been degraded by the replace operation, but it removes commas etc which isn't great - // so if the request was genuine and SFW, it's best to return the original prompt - return requireCensorship ? sanitized : sentence -} \ No newline at end of file diff --git a/spaces/jcenaa/Segment-Any-RGBD/CODE_OF_CONDUCT.md b/spaces/jcenaa/Segment-Any-RGBD/CODE_OF_CONDUCT.md deleted file mode 100644 index 83f431e8feeb7e80d571f39c9f6c1b96857b5f85..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,80 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic -address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a -professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when there is a -reasonable belief that an individual's behavior may have a negative impact on -the project or its community. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/datasets/register_voc_seg.py b/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/datasets/register_voc_seg.py deleted file mode 100644 index b8c2be16f4bb5348de8f1051f3579e02e362488f..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/datasets/register_voc_seg.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets import load_sem_seg - -PASCALVOC20_NAMES = ( - "aeroplane", - "bicycle", - "bird", - "boat", - "bottle", - "bus", - "car", - "cat", - "chair", - "cow", - "diningtable", - "dog", - "horse", - "motorbike", - "person", - "pottedplant", - "sheep", - "sofa", - "train", - "tvmonitor", -) - -def _get_voc_meta(cat_list): - ret = { - "stuff_classes": cat_list, - } - return ret - - -def register_pascalvoc(root): - root = os.path.join(root, "VOCdevkit/VOC2012") - meta = _get_voc_meta(PASCALVOC20_NAMES) - - for name, image_dirname, sem_seg_dirname in [ - ("val", "JPEGImages", "annotations_detectron2/val"), - ]: - image_dir = os.path.join(root, image_dirname) - gt_dir = os.path.join(root, sem_seg_dirname) - all_name = f"pascalvoc20_sem_seg_{name}" - DatasetCatalog.register( - all_name, - lambda x=image_dir, y=gt_dir: load_sem_seg( - y, x, gt_ext="png", image_ext="jpg" - ), - ) - MetadataCatalog.get(all_name).set( - image_root=image_dir, - sem_seg_root=gt_dir, - evaluator_type="sem_seg", - ignore_label=255, - **meta, - ) - -_root = os.getenv("DETECTRON2_DATASETS", "datasets") -register_pascalvoc(_root) diff --git a/spaces/jerryyan21/wav2lip_demo_test/README.md b/spaces/jerryyan21/wav2lip_demo_test/README.md deleted file mode 100644 index 2433fbad773370bf11871d2e40e67e678686ac20..0000000000000000000000000000000000000000 --- a/spaces/jerryyan21/wav2lip_demo_test/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Wav2lip_demo_test -emoji: 👀 -colorFrom: gray -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/mandarin.py b/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/mandarin.py deleted file mode 100644 index 093d8826809aa2681f6088174427337a59e0c882..0000000000000000000000000000000000000000 --- a/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/mandarin.py +++ /dev/null @@ -1,329 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - -logging.getLogger('jieba').setLevel(logging.WARNING) -jieba.initialize() - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (bopomofo, ipa2) pairs: -_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'pwo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'tɕ'), - ('ㄑ', 'tɕʰ'), - ('ㄒ', 'ɕ'), - ('ㄓ', 'tʂ'), - ('ㄔ', 'tʂʰ'), - ('ㄕ', 'ʂ'), - ('ㄖ', 'ɻ'), - ('ㄗ', 'ts'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ɤ'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'yæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'yn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'ɤŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'y'), - ('ˉ', '˥'), - ('ˊ', '˧˥'), - ('ˇ', '˨˩˦'), - ('ˋ', '˥˩'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa2(text): - for regex, replacement in _bopomofo_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i([aoe])', r'y\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i([aoe])', r'j\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_ipa2(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa2(text) - text = re.sub(r'i([aoe])', r'j\1', text) - text = re.sub(r'u([aoəe])', r'w\1', text) - text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) - text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) - return text \ No newline at end of file diff --git a/spaces/jkang/demo-painttransformer/gradio_painttransformer.py b/spaces/jkang/demo-painttransformer/gradio_painttransformer.py deleted file mode 100644 index 23568a56de0e960757e0967fc8d6f3f5df820149..0000000000000000000000000000000000000000 --- a/spaces/jkang/demo-painttransformer/gradio_painttransformer.py +++ /dev/null @@ -1,90 +0,0 @@ -'''PaintTransformer Demo - -- 2021-12-21 first created - - See: https://github.com/wzmsltw/PaintTransformer - -''' - -import os -# import cv2 # <== error -import imageio -import network -from time import time -from glob import glob -from loguru import logger -import numpy as np -import gradio as gr - -import paddle -import render_utils -import render_serial - -# ---------- Settings ---------- -GPU_ID = '-1' -os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID -DEVICE = 'cpu' if GPU_ID == '-1' else f'cuda:{GPU_ID}' - -examples = sorted(glob(os.path.join('input', '*.jpg'))) -WIDTH = 512 -HEIGHT = 512 -STROKE_NUM = 8 -FPS = 10 - -# ---------- Logger ---------- -logger.add('app.log', mode='a') -logger.info('===== APP RESTARTED =====') - -# ---------- Model ---------- -MODEL_FILE = 'paint_best.pdparams' -if not os.path.exists(MODEL_FILE): - os.system('gdown --id 1G0O81qSvGp0kFCgyaQHmPygbVHFi1--q') - logger.info('model downloaded') -else: - logger.info('model already exists') - -paddle.set_device(DEVICE) -net_g = network.Painter(5, STROKE_NUM, 256, 8, 3, 3) -net_g.set_state_dict(paddle.load(MODEL_FILE)) -net_g.eval() -for param in net_g.parameters(): - param.stop_gradient = True - -brush_large_vertical = render_utils.read_img('brush/brush_large_vertical.png', 'L') -brush_large_horizontal = render_utils.read_img('brush/brush_large_horizontal.png', 'L') -meta_brushes = paddle.concat([brush_large_vertical, brush_large_horizontal], axis=0) - -def predict(image_file): - original_img = render_utils.read_img(image_file, 'RGB', WIDTH, HEIGHT) - logger.info(f'--- image loaded & resized {WIDTH}x{HEIGHT}') - - logger.info('--- doing inference...') - t0 = time() - final_result_list = render_serial.render_serial(original_img, net_g, meta_brushes) - logger.info(f'--- inference took {time() - t0:.4f} sec') - - # out = cv2.VideoWriter('output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), FPS, (WIDTH, HEIGHT)) - frames = [] - for idx, frame in enumerate(final_result_list): - # out.write(frame) - frames.append(frame[:,:,::-1]) # BGR -> RGB - # out.release() - imageio.mimsave('output.gif', frames) - logger.info('--- animation generated') - return 'output.gif' - -iface = gr.Interface( - predict, - title='🎨 Paint Transformer', - description='This demo converts an image into a sequence of painted images (takes about 2 min ^^;)', - inputs=[ - gr.inputs.Image(label='Input image', type='filepath') - ], - outputs=[ - gr.outputs.Image(label='Output animation') - ], - examples=examples, - enable_queue=True, - article='

    Original work: PaintTransformer

    ' -) - -iface.launch(debug=True) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/implementations/git.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/implementations/git.py deleted file mode 100644 index 80c73e066d83211da6cfb2940edf97ab5cfe0789..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fsspec/implementations/git.py +++ /dev/null @@ -1,127 +0,0 @@ -import os - -import pygit2 - -from fsspec.spec import AbstractFileSystem - -from .memory import MemoryFile - - -class GitFileSystem(AbstractFileSystem): - """Browse the files of a local git repo at any hash/tag/branch - - (experimental backend) - """ - - root_marker = "" - cachable = True - - def __init__(self, path=None, fo=None, ref=None, **kwargs): - """ - - Parameters - ---------- - path: str (optional) - Local location of the repo (uses current directory if not given). - May be deprecated in favour of ``fo``. When used with a higher - level function such as fsspec.open(), may be of the form - "git://[path-to-repo[:]][ref@]path/to/file" (but the actual - file path should not contain "@" or ":"). - fo: str (optional) - Same as ``path``, but passed as part of a chained URL. This one - takes precedence if both are given. - ref: str (optional) - Reference to work with, could be a hash, tag or branch name. Defaults - to current working tree. Note that ``ls`` and ``open`` also take hash, - so this becomes the default for those operations - kwargs - """ - super().__init__(**kwargs) - self.repo = pygit2.Repository(fo or path or os.getcwd()) - self.ref = ref or "master" - - @classmethod - def _strip_protocol(cls, path): - path = super()._strip_protocol(path).lstrip("/") - if ":" in path: - path = path.split(":", 1)[1] - if "@" in path: - path = path.split("@", 1)[1] - return path.lstrip("/") - - def _path_to_object(self, path, ref): - comm, ref = self.repo.resolve_refish(ref or self.ref) - parts = path.split("/") - tree = comm.tree - for part in parts: - if part and isinstance(tree, pygit2.Tree): - tree = tree[part] - return tree - - @staticmethod - def _get_kwargs_from_urls(path): - if path.startswith("git://"): - path = path[6:] - out = {} - if ":" in path: - out["path"], path = path.split(":", 1) - if "@" in path: - out["ref"], path = path.split("@", 1) - return out - - def ls(self, path, detail=True, ref=None, **kwargs): - path = self._strip_protocol(path) - tree = self._path_to_object(path, ref) - if isinstance(tree, pygit2.Tree): - out = [] - for obj in tree: - if isinstance(obj, pygit2.Tree): - out.append( - { - "type": "directory", - "name": "/".join([path, obj.name]).lstrip("/"), - "hex": obj.hex, - "mode": "%o" % obj.filemode, - "size": 0, - } - ) - else: - out.append( - { - "type": "file", - "name": "/".join([path, obj.name]).lstrip("/"), - "hex": obj.hex, - "mode": "%o" % obj.filemode, - "size": obj.size, - } - ) - else: - obj = tree - out = [ - { - "type": "file", - "name": obj.name, - "hex": obj.hex, - "mode": "%o" % obj.filemode, - "size": obj.size, - } - ] - if detail: - return out - return [o["name"] for o in out] - - def ukey(self, path, ref=None): - return self.info(path, ref=ref)["hex"] - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - cache_options=None, - ref=None, - **kwargs, - ): - obj = self._path_to_object(path, ref or self.ref) - return MemoryFile(data=obj.data) diff --git a/spaces/jone/GFPGAN/experiments/pretrained_models/README.md b/spaces/jone/GFPGAN/experiments/pretrained_models/README.md deleted file mode 100644 index 3401a5ca9b393e0033f58c5af8905961565826d9..0000000000000000000000000000000000000000 --- a/spaces/jone/GFPGAN/experiments/pretrained_models/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Pre-trained Models and Other Data - -Download pre-trained models and other data. Put them in this folder. - -1. [Pretrained StyleGAN2 model: StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth) -1. [Component locations of FFHQ: FFHQ_eye_mouth_landmarks_512.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/FFHQ_eye_mouth_landmarks_512.pth) -1. [A simple ArcFace model: arcface_resnet18.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/arcface_resnet18.pth) diff --git a/spaces/jone/Music_Source_Separation/scripts/3_create_evaluation_audios/piano-symphony/create_evaluation_audios.sh b/spaces/jone/Music_Source_Separation/scripts/3_create_evaluation_audios/piano-symphony/create_evaluation_audios.sh deleted file mode 100644 index 517ea426871105ffaf6bd9d0b5de2db5cb869b00..0000000000000000000000000000000000000000 --- a/spaces/jone/Music_Source_Separation/scripts/3_create_evaluation_audios/piano-symphony/create_evaluation_audios.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -PIANO_DATASET_DIR=${1:-"./datasets/maestro"} -SYMPHONY_DATASET_DIR=${2:-"./datasets/instruments_solo/symphony_solo/v0.1"} -WORKSPACE=${3:-"./workspaces/bytesep"} - -SAMPLE_RATE=44100 -CHANNELS=2 -EVALUATION_SEGMENTS_NUM=100 - -EVLUATION_AUDIOS_DIR="${WORKSPACE}/evaluation_audios/piano-symphony" - -python3 bytesep/dataset_creation/create_evaluation_audios/piano-symphony.py \ - --piano_dataset_dir=$PIANO_DATASET_DIR \ - --symphony_dataset_dir=$SYMPHONY_DATASET_DIR \ - --evaluation_audios_dir=$EVLUATION_AUDIOS_DIR \ - --sample_rate=$SAMPLE_RATE \ - --channels=$CHANNELS \ - --evaluation_segments_num=$EVALUATION_SEGMENTS_NUM - \ No newline at end of file diff --git a/spaces/josStorer/ChatGLM-6B-Int4-API-OpenAI-Compatible/models/models--silver--chatglm-6b-int4-slim/snapshots/02e096b3805c579caf5741a6d8eddd5ba7a74e0d/modeling_chatglm.py b/spaces/josStorer/ChatGLM-6B-Int4-API-OpenAI-Compatible/models/models--silver--chatglm-6b-int4-slim/snapshots/02e096b3805c579caf5741a6d8eddd5ba7a74e0d/modeling_chatglm.py deleted file mode 100644 index 499d928f69ddf07a8cdd7958bf1b083057572f03..0000000000000000000000000000000000000000 --- a/spaces/josStorer/ChatGLM-6B-Int4-API-OpenAI-Compatible/models/models--silver--chatglm-6b-int4-slim/snapshots/02e096b3805c579caf5741a6d8eddd5ba7a74e0d/modeling_chatglm.py +++ /dev/null @@ -1,1302 +0,0 @@ -""" PyTorch ChatGLM model. """ - -import math -import copy -import os -import warnings - -import torch -import torch.utils.checkpoint -import torch.nn.functional as F -from torch import nn -from torch.nn import CrossEntropyLoss, LayerNorm -from torch.nn.utils import skip_init -from typing import Optional, Tuple, Union, List, Callable - -from transformers.utils import ( - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, -) -from transformers.modeling_outputs import ( - BaseModelOutputWithPast, - CausalLMOutputWithPast, - BaseModelOutputWithPastAndCrossAttentions, -) -from transformers.modeling_utils import PreTrainedModel -from transformers.utils import logging -from transformers.generation.logits_process import LogitsProcessor -from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig - -from .configuration_chatglm import ChatGLMConfig - - -# flags required to enable jit fusion kernels -torch._C._jit_set_profiling_mode(False) -torch._C._jit_set_profiling_executor(False) -torch._C._jit_override_can_fuse_on_cpu(True) -torch._C._jit_override_can_fuse_on_gpu(True) - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B" -_CONFIG_FOR_DOC = "ChatGLM6BConfig" - -CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "silver/chatglm-6b-int4-slim", - # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm -] - - -class InvalidScoreLogitsProcessor(LogitsProcessor): - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: - if torch.isnan(scores).any() or torch.isinf(scores).any(): - scores.zero_() - scores[..., 5] = 5e4 - return scores - - -def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path): - """Load tf checkpoints in a pytorch model.""" - try: - import re - - import numpy as np - import tensorflow as tf - except ImportError: - logger.error( - "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " - "https://www.tensorflow.org/install/ for installation instructions." - ) - raise - tf_path = os.path.abspath(tf_checkpoint_path) - logger.info(f"Converting TensorFlow checkpoint from {tf_path}") - # Load weights from TF model - init_vars = tf.train.list_variables(tf_path) - names = [] - arrays = [] - for name, shape in init_vars: - logger.info(f"Loading TF weight {name} with shape {shape}") - array = tf.train.load_variable(tf_path, name) - names.append(name) - arrays.append(array) - - for name, array in zip(names, arrays): - name = name.split("/") - # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v - # which are not required for using pretrained model - if any( - n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] - for n in name - ): - logger.info(f"Skipping {'/'.join(name)}") - continue - pointer = model - for m_name in name: - if re.fullmatch(r"[A-Za-z]+_\d+", m_name): - scope_names = re.split(r"_(\d+)", m_name) - else: - scope_names = [m_name] - if scope_names[0] == "kernel" or scope_names[0] == "gamma": - pointer = getattr(pointer, "weight") - elif scope_names[0] == "output_bias" or scope_names[0] == "beta": - pointer = getattr(pointer, "bias") - elif scope_names[0] == "output_weights": - pointer = getattr(pointer, "weight") - elif scope_names[0] == "squad": - pointer = getattr(pointer, "classifier") - else: - try: - pointer = getattr(pointer, scope_names[0]) - except AttributeError: - logger.info(f"Skipping {'/'.join(name)}") - continue - if len(scope_names) >= 2: - num = int(scope_names[1]) - pointer = pointer[num] - if m_name[-11:] == "_embeddings": - pointer = getattr(pointer, "weight") - elif m_name == "kernel": - array = np.transpose(array) - try: - assert ( - pointer.shape == array.shape - ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" - except AssertionError as e: - e.args += (pointer.shape, array.shape) - raise - logger.info(f"Initialize PyTorch weight {name}") - pointer.data = torch.from_numpy(array) - return model - - -@torch.jit.script -def gelu_impl(x): - """OpenAI's gelu implementation.""" - return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * - (1.0 + 0.044715 * x * x))) - - -def gelu(x): - return gelu_impl(x) - - -class RotaryEmbedding(torch.nn.Module): - def __init__(self, dim, base=10000, precision=torch.half, learnable=False): - super().__init__() - inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) - inv_freq = inv_freq.half() - self.learnable = learnable - if learnable: - self.inv_freq = torch.nn.Parameter(inv_freq) - self.max_seq_len_cached = None - else: - self.register_buffer('inv_freq', inv_freq) - self.max_seq_len_cached = None - self.cos_cached = None - self.sin_cached = None - self.precision = precision - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, - error_msgs): - pass - - def forward(self, x, seq_dim=1, seq_len=None): - if seq_len is None: - seq_len = x.shape[seq_dim] - if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached): - self.max_seq_len_cached = None if self.learnable else seq_len - t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype) - freqs = torch.einsum('i,j->ij', t, self.inv_freq) - # Different from paper, but it uses a different permutation in order to obtain the same calculation - emb = torch.cat((freqs, freqs), dim=-1).to(x.device) - if self.precision == torch.bfloat16: - emb = emb.float() - - # [sx, 1 (b * np), hn] - cos_cached = emb.cos()[:, None, :] - sin_cached = emb.sin()[:, None, :] - if self.precision == torch.bfloat16: - cos_cached = cos_cached.bfloat16() - sin_cached = sin_cached.bfloat16() - if self.learnable: - return cos_cached, sin_cached - self.cos_cached, self.sin_cached = cos_cached, sin_cached - return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...] - - def _apply(self, fn): - if self.cos_cached is not None: - self.cos_cached = fn(self.cos_cached) - if self.sin_cached is not None: - self.sin_cached = fn(self.sin_cached) - return super()._apply(fn) - -def rotate_half(x): - x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] - return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions - - -@torch.jit.script -def apply_rotary_pos_emb_index(q, k, cos, sin, position_id): - # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn] - cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \ - F.embedding(position_id, sin.squeeze(1)).unsqueeze(2) - q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) - return q, k - - -def attention_fn( - self, - query_layer, - key_layer, - value_layer, - attention_mask, - hidden_size_per_partition, - layer_id, - layer_past=None, - scaling_attention_score=True, - use_cache=False, -): - if layer_past is not None: - past_key, past_value = layer_past - key_layer = torch.cat((past_key, key_layer), dim=0) - value_layer = torch.cat((past_value, value_layer), dim=0) - - # seqlen, batch, num_attention_heads, hidden_size_per_attention_head - seq_len, b, nh, hidden_size = key_layer.shape - - if use_cache: - present = (key_layer, value_layer) - else: - present = None - - query_key_layer_scaling_coeff = float(layer_id + 1) - if scaling_attention_score: - query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff) - - # =================================== - # Raw attention scores. [b, np, s, s] - # =================================== - - # [b, np, sq, sk] - output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) - - # [sq, b, np, hn] -> [sq, b * np, hn] - query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) - # [sk, b, np, hn] -> [sk, b * np, hn] - key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) - - matmul_result = torch.empty( - output_size[0] * output_size[1], - output_size[2], - output_size[3], - dtype=query_layer.dtype, - device=query_layer.device, - ) - - matmul_result = torch.baddbmm( - matmul_result, - query_layer.transpose(0, 1), # [b * np, sq, hn] - key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] - beta=0.0, - alpha=1.0, - ) - - # change view to [b, np, sq, sk] - attention_scores = matmul_result.view(*output_size) - - if self.scale_mask_softmax: - self.scale_mask_softmax.scale = query_key_layer_scaling_coeff - attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous()) - else: - if not (attention_mask == 0).all(): - # if auto-regressive, skip - attention_scores.masked_fill_(attention_mask, -10000.0) - dtype = attention_scores.type() - attention_scores = attention_scores.float() - attention_scores = attention_scores * query_key_layer_scaling_coeff - - attention_probs = F.softmax(attention_scores, dim=-1) - - attention_probs = attention_probs.type(dtype) - - # ========================= - # Context layer. [sq, b, hp] - # ========================= - - # value_layer -> context layer. - # [sk, b, np, hn] --> [b, np, sq, hn] - - # context layer shape: [b, np, sq, hn] - output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) - - # change view [sk, b * np, hn] - value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) - - # change view [b * np, sq, sk] - attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) - - # matmul: [b * np, sq, hn] - context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) - - # change view [b, np, sq, hn] - context_layer = context_layer.view(*output_size) - - # [b, np, sq, hn] --> [sq, b, np, hn] - context_layer = context_layer.permute(2, 0, 1, 3).contiguous() - - # [sq, b, np, hn] --> [sq, b, hp] - new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,) - context_layer = context_layer.view(*new_context_layer_shape) - - outputs = (context_layer, present, attention_probs) - - return outputs - - -class SelfAttention(torch.nn.Module): - def __init__(self, hidden_size, num_attention_heads, - layer_id, hidden_size_per_attention_head=None, bias=True, - params_dtype=torch.float, position_encoding_2d=True): - super(SelfAttention, self).__init__() - - self.layer_id = layer_id - self.hidden_size = hidden_size - self.hidden_size_per_partition = hidden_size - self.num_attention_heads = num_attention_heads - self.num_attention_heads_per_partition = num_attention_heads - self.position_encoding_2d = position_encoding_2d - self.rotary_emb = RotaryEmbedding( - self.hidden_size // (self.num_attention_heads * 2) - if position_encoding_2d - else self.hidden_size // self.num_attention_heads, - base=10000, - precision=torch.half, - learnable=False, - ) - - self.scale_mask_softmax = None - - if hidden_size_per_attention_head is None: - self.hidden_size_per_attention_head = hidden_size // num_attention_heads - else: - self.hidden_size_per_attention_head = hidden_size_per_attention_head - - self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head - - # Strided linear layer. - self.query_key_value = skip_init( - torch.nn.Linear, - hidden_size, - 3 * self.inner_hidden_size, - bias=bias, - dtype=params_dtype, - ) - - self.dense = skip_init( - torch.nn.Linear, - self.inner_hidden_size, - hidden_size, - bias=bias, - dtype=params_dtype, - ) - - @staticmethod - def attention_mask_func(attention_scores, attention_mask): - attention_scores.masked_fill_(attention_mask, -10000.0) - return attention_scores - - def split_tensor_along_last_dim(self, tensor, num_partitions, - contiguous_split_chunks=False): - """Split a tensor along its last dimension. - Arguments: - tensor: input tensor. - num_partitions: number of partitions to split the tensor - contiguous_split_chunks: If True, make each chunk contiguous - in memory. - """ - # Get the size and dimension. - last_dim = tensor.dim() - 1 - last_dim_size = tensor.size()[last_dim] // num_partitions - # Split. - tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) - # Note: torch.split does not create contiguous tensors by default. - if contiguous_split_chunks: - return tuple(chunk.contiguous() for chunk in tensor_list) - - return tensor_list - - def forward( - self, - hidden_states: torch.Tensor, - position_ids, - attention_mask: torch.Tensor, - layer_id, - layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - use_cache: bool = False, - output_attentions: bool = False, - ): - """ - hidden_states: [seq_len, batch, hidden_size] - attention_mask: [(1, 1), seq_len, seq_len] - """ - - # [seq_len, batch, 3 * hidden_size] - mixed_raw_layer = self.query_key_value(hidden_states) - - # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head] - new_tensor_shape = mixed_raw_layer.size()[:-1] + ( - self.num_attention_heads_per_partition, - 3 * self.hidden_size_per_attention_head, - ) - mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape) - - # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] - (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3) - - if self.position_encoding_2d: - q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1)) - k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1)) - cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) - position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \ - position_ids[:, 1, :].transpose(0, 1).contiguous() - q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids) - q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids) - query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1)) - key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1)) - else: - position_ids = position_ids.transpose(0, 1) - cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1) - # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] - query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids) - - # [seq_len, batch, hidden_size] - context_layer, present, attention_probs = attention_fn( - self=self, - query_layer=query_layer, - key_layer=key_layer, - value_layer=value_layer, - attention_mask=attention_mask, - hidden_size_per_partition=self.hidden_size_per_partition, - layer_id=layer_id, - layer_past=layer_past, - use_cache=use_cache - ) - - output = self.dense(context_layer) - - outputs = (output, present) - - if output_attentions: - outputs += (attention_probs,) - - return outputs # output, present, attention_probs - - -class GEGLU(torch.nn.Module): - def __init__(self): - super().__init__() - self.activation_fn = F.gelu - - def forward(self, x): - # dim=-1 breaks in jit for pt<1.10 - x1, x2 = x.chunk(2, dim=(x.ndim - 1)) - return x1 * self.activation_fn(x2) - - -class GLU(torch.nn.Module): - def __init__(self, hidden_size, inner_hidden_size=None, - layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float): - super(GLU, self).__init__() - self.layer_id = layer_id - self.activation_func = activation_func - - # Project to 4h. - self.hidden_size = hidden_size - if inner_hidden_size is None: - inner_hidden_size = 4 * hidden_size - self.inner_hidden_size = inner_hidden_size - self.dense_h_to_4h = skip_init( - torch.nn.Linear, - self.hidden_size, - self.inner_hidden_size, - bias=bias, - dtype=params_dtype, - ) - # Project back to h. - self.dense_4h_to_h = skip_init( - torch.nn.Linear, - self.inner_hidden_size, - self.hidden_size, - bias=bias, - dtype=params_dtype, - ) - - def forward(self, hidden_states): - """ - hidden_states: [seq_len, batch, hidden_size] - """ - - # [seq_len, batch, inner_hidden_size] - intermediate_parallel = self.dense_h_to_4h(hidden_states) - - intermediate_parallel = self.activation_func(intermediate_parallel) - - output = self.dense_4h_to_h(intermediate_parallel) - - return output - - -class GLMBlock(torch.nn.Module): - def __init__( - self, - hidden_size, - num_attention_heads, - layernorm_epsilon, - layer_id, - inner_hidden_size=None, - hidden_size_per_attention_head=None, - layernorm=LayerNorm, - use_bias=True, - params_dtype=torch.float, - num_layers=28, - position_encoding_2d=True - ): - super(GLMBlock, self).__init__() - # Set output layer initialization if not provided. - - self.layer_id = layer_id - - # Layernorm on the input data. - self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) - - self.position_encoding_2d = position_encoding_2d - - # Self attention. - self.attention = SelfAttention( - hidden_size, - num_attention_heads, - layer_id, - hidden_size_per_attention_head=hidden_size_per_attention_head, - bias=use_bias, - params_dtype=params_dtype, - position_encoding_2d=self.position_encoding_2d - ) - - # Layernorm on the input data. - self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) - - self.num_layers = num_layers - - # GLU - self.mlp = GLU( - hidden_size, - inner_hidden_size=inner_hidden_size, - bias=use_bias, - layer_id=layer_id, - params_dtype=params_dtype, - ) - - def forward( - self, - hidden_states: torch.Tensor, - position_ids, - attention_mask: torch.Tensor, - layer_id, - layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - use_cache: bool = False, - output_attentions: bool = False, - ): - """ - hidden_states: [seq_len, batch, hidden_size] - attention_mask: [(1, 1), seq_len, seq_len] - """ - - # Layer norm at the begining of the transformer layer. - # [seq_len, batch, hidden_size] - attention_input = self.input_layernorm(hidden_states) - - # Self attention. - attention_outputs = self.attention( - attention_input, - position_ids, - attention_mask=attention_mask, - layer_id=layer_id, - layer_past=layer_past, - use_cache=use_cache, - output_attentions=output_attentions - ) - - attention_output = attention_outputs[0] - - outputs = attention_outputs[1:] - - # Residual connection. - alpha = (2 * self.num_layers) ** 0.5 - hidden_states = attention_input * alpha + attention_output - - mlp_input = self.post_attention_layernorm(hidden_states) - - # MLP. - mlp_output = self.mlp(mlp_input) - - # Second residual connection. - output = mlp_input * alpha + mlp_output - - if use_cache: - outputs = (output,) + outputs - else: - outputs = (output,) + outputs[1:] - - return outputs # hidden_states, present, attentions - - -class ChatGLMPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and - a simple interface for downloading and loading pretrained models. - """ - - is_parallelizable = False - supports_gradient_checkpointing = False - config_class = ChatGLMConfig - base_model_prefix = "transformer" - _no_split_modules = ["GLM6BBlock"] - - def __init__(self, *inputs, **kwargs): - super().__init__(*inputs, **kwargs) - - def _init_weights(self, module: nn.Module): - """Initialize the weights.""" - return - - -CHATGLM_6B_START_DOCSTRING = r""" - This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general - usage and behavior. - - Parameters: - config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the configuration. - Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -CHATGLM_6B_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`ChatGLM6BTokenizer`]. - See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. - Selected in the range `[0, config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert *input_ids* indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.", - CHATGLM_6B_START_DOCSTRING, -) -class ChatGLMModel(ChatGLMPreTrainedModel): - """ - - The model can behave as an encoder (with only self-attention) as well - as a decoder, in which case a layer of cross-attention is added between - the self-attention layers, following the architecture described in [Attention is - all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, - Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - - To behave as an decoder the model needs to be initialized with the - `is_decoder` argument of the configuration set to `True`. - To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` - argument and `add_cross_attention` set to `True`; an - `encoder_hidden_states` is then expected as an input to the forward pass. - """ - - def __init__(self, config: ChatGLMConfig): - super().__init__(config) - - # recording parameters - self.max_sequence_length = config.max_sequence_length - self.hidden_size = config.hidden_size - self.params_dtype = torch.half - self.num_attention_heads = config.num_attention_heads - self.vocab_size = config.vocab_size - self.num_layers = config.num_layers - self.layernorm_epsilon = config.layernorm_epsilon - self.inner_hidden_size = config.inner_hidden_size - self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads - self.position_encoding_2d = config.position_encoding_2d - - self.word_embeddings = skip_init( - torch.nn.Embedding, - num_embeddings=self.vocab_size, embedding_dim=self.hidden_size, - dtype=self.params_dtype - ) - - def get_layer(layer_id): - return GLMBlock( - self.hidden_size, - self.num_attention_heads, - self.layernorm_epsilon, - layer_id, - inner_hidden_size=self.inner_hidden_size, - hidden_size_per_attention_head=self.hidden_size_per_attention_head, - layernorm=LayerNorm, - use_bias=True, - params_dtype=self.params_dtype, - position_encoding_2d=self.position_encoding_2d, - ) - - self.layers = torch.nn.ModuleList( - [get_layer(layer_id) for layer_id in range(self.num_layers)] - ) - - # Final layer norm before output. - self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon) - - def get_input_embeddings(self): - return self.word_embeddings - - def set_input_embeddings(self, new_embeddings: torch.Tensor): - self.word_embeddings = new_embeddings - - def get_masks(self, seq, device): - context_length = seq.index(self.config.bos_token_id) + 1 - - attention_mask = torch.ones((1, len(seq), len(seq)), device=device) - attention_mask.tril_() - attention_mask[..., :context_length - 1] = 1 - attention_mask.unsqueeze_(1) - attention_mask = (attention_mask < 0.5).bool() - - return attention_mask - - def get_position_ids(self, seq, mask_position, device, gmask=False): - context_length = seq.index(self.config.bos_token_id) + 1 - if self.position_encoding_2d: - seq_length = seq.index(self.config.bos_token_id) - position_ids = torch.arange(context_length, dtype=torch.long, device=device) - if not gmask: - position_ids[seq_length:] = mask_position - block_position_ids = torch.cat(( - torch.zeros(seq_length, dtype=torch.long, device=device), - torch.arange(context_length - seq_length, dtype=torch.long, device=device) + 1 - )) - position_ids = torch.stack((position_ids, block_position_ids), dim=0) - else: - position_ids = torch.arange(context_length, dtype=torch.long, device=device) - if not gmask: - position_ids[context_length - 1:] = mask_position - - position_ids = position_ids.unsqueeze(0) - - return position_ids - - @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=BaseModelOutputWithPastAndCrossAttentions, - config_class=_CONFIG_FOR_DOC, - ) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, - inputs_embeds: Optional[torch.LongTensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]: - - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - batch_size, seq_length = input_ids.shape[:2] - elif inputs_embeds is not None: - batch_size, seq_length, _ = inputs_embeds.shape[:2] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if past_key_values is None: - past_key_values = tuple([None] * len(self.layers)) - seq = input_ids[0].tolist() - - if attention_mask is None: - attention_mask = self.get_masks( - seq=seq, - device=input_ids.device - ) - - if position_ids is None: - MASK, gMASK = 130000, 130001 - mask_token = MASK if MASK in input_ids else gMASK - use_gmask = False if MASK in input_ids else gMASK - - mask_position = seq.index(mask_token) - position_ids = self.get_position_ids( - seq=seq, - mask_position=mask_position, - device=input_ids.device, - gmask=use_gmask - ) - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - - # [seq_len, batch, hidden_size] - hidden_states = inputs_embeds.transpose(0, 1) - - presents = () if use_cache else None - all_self_attentions = () if output_attentions else None - all_hidden_states = () if output_hidden_states else None - - seq_length_with_past = seq_length - past_key_values_length = 0 - if past_key_values[0] is not None: - past_key_values_length = past_key_values[0][0].shape[0] - seq_length_with_past = seq_length_with_past + past_key_values_length - if attention_mask is None: - attention_mask = torch.zeros(1, 1, device=input_ids.device).bool() - - else: - attention_mask = attention_mask.to(input_ids.device) - - for i, layer in enumerate(self.layers): - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_ret = layer( - hidden_states, - position_ids=position_ids, - attention_mask=attention_mask, - layer_id=torch.tensor(i), - layer_past=past_key_values[i], - use_cache=use_cache, - output_attentions=output_attentions - ) - - hidden_states = layer_ret[0] - - if use_cache: - presents = presents + (layer_ret[1],) - - if output_attentions: - all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],) - - # Final layer norm. - hidden_states = self.final_layernorm(hidden_states) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) - - return BaseModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=presents, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - -class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): - def __init__(self, config: ChatGLMConfig): - super().__init__(config) - - # self.hidden_size = config.hidden_size - # self.params_dtype = torch.half - # self.vocab_size = config.vocab_size - self.max_sequence_length = config.max_sequence_length - - self.position_encoding_2d = config.position_encoding_2d - - self.transformer = ChatGLMModel(config) - - self.lm_head = skip_init( - nn.Linear, - config.hidden_size, - config.vocab_size, - bias=False, - dtype=torch.half - ) - - self.config = config - - self.quantized = False - - if self.config.quantization_bit: - self.quantize(self.config.quantization_bit, self.config.quantization_embeddings, use_quantization_cache=True, empty_init=True) - - def get_output_embeddings(self): - return self.lm_head - - def set_output_embeddings(self, new_embeddings): - self.lm_head = new_embeddings - - def get_masks_and_position_ids(self, seq, mask_position, context_length, device, gmask=False): - attention_mask = torch.ones((1, context_length, context_length), device=device) - attention_mask.tril_() - attention_mask[..., :context_length - 1] = 1 - attention_mask.unsqueeze_(1) - attention_mask = (attention_mask < 0.5).bool() - - if self.position_encoding_2d: - seq_length = seq.index(self.config.bos_token_id) - position_ids = torch.arange(context_length, dtype=torch.long, device=device) - if not gmask: - position_ids[seq_length:] = mask_position - block_position_ids = torch.cat(( - torch.zeros(seq_length, dtype=torch.long, device=device), - torch.arange(context_length - seq_length, dtype=torch.long, device=device) + 1 - )) - position_ids = torch.stack((position_ids, block_position_ids), dim=0) - else: - position_ids = torch.arange(context_length, dtype=torch.long, device=device) - if not gmask: - position_ids[context_length - 1:] = mask_position - - position_ids = position_ids.unsqueeze(0) - - return attention_mask, position_ids - - def prepare_inputs_for_generation( - self, - input_ids: torch.LongTensor, - past: Optional[torch.Tensor] = None, - past_key_values: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - **kwargs - ) -> dict: - - MASK, gMASK = 130000, 130001 - mask_token = MASK if MASK in input_ids else gMASK - use_gmask = False if MASK in input_ids else gMASK - seq = input_ids[0].tolist() - mask_position = seq.index(mask_token) - - if mask_token not in seq: - raise ValueError("You have to add either [MASK] or [gMASK] in your input") - - # only last token for input_ids if past is not None - if past is not None or past_key_values is not None: - context_length = seq.index(self.config.bos_token_id) - last_token = input_ids[:, -1].unsqueeze(-1) - if self.position_encoding_2d: - position_ids = torch.tensor([[[mask_position], [len(seq) - context_length]]], dtype=torch.long, - device=input_ids.device) - else: - position_ids = torch.tensor([[mask_position]], dtype=torch.long, device=input_ids.device) - - if past is None: - past = past_key_values - return { - "input_ids": last_token, - "past_key_values": past, - "position_ids": position_ids, - } - else: - attention_mask, position_ids = self.get_masks_and_position_ids( - seq=seq, - mask_position=mask_position, - context_length=len(seq), - device=input_ids.device, - gmask=use_gmask - ) - - return { - "input_ids": input_ids, - "past_key_values": past, - "position_ids": position_ids, - "attention_mask": attention_mask - } - - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[Tuple[torch.FloatTensor]] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ): - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - transformer_outputs = self.transformer( - input_ids=input_ids, - position_ids=position_ids, - attention_mask=attention_mask, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - hidden_states = transformer_outputs[0] - - lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous() - - loss = None - if labels is not None: - lm_logits = lm_logits.to(torch.float32) - - # Shift so that tokens < n predict n - shift_logits = lm_logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - loss_fct = CrossEntropyLoss() - loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) - - lm_logits = lm_logits.to(hidden_states.dtype) - loss = loss.to(hidden_states.dtype) - - if not return_dict: - output = (lm_logits,) + transformer_outputs[1:] - return ((loss,) + output) if loss is not None else output - - return CausalLMOutputWithPast( - loss=loss, - logits=lm_logits, - past_key_values=transformer_outputs.past_key_values, - hidden_states=transformer_outputs.hidden_states, - attentions=transformer_outputs.attentions, - ) - - @staticmethod - def _reorder_cache( - past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: - """ - This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or - [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct - beam_idx at every generation step. - - Output shares the same memory storage as `past`. - """ - return tuple( - ( - layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)), - layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)), - ) - for layer_past in past - ) - - @torch.no_grad() - def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1, - do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): - if history is None: - history = [] - if logits_processor is None: - logits_processor = LogitsProcessorList() - logits_processor.append(InvalidScoreLogitsProcessor()) - gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p, - "temperature": temperature, "logits_processor": logits_processor, **kwargs} - if not history: - prompt = query - else: - prompt = "" - for i, (old_query, response) in enumerate(history): - prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) - prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) - input_ids = tokenizer([prompt], return_tensors="pt", padding=True) - input_ids = input_ids.to(self.device) - outputs = self.generate(**input_ids, **gen_kwargs) - outputs = outputs.tolist()[0][len(input_ids["input_ids"][0]):] - response = tokenizer.decode(outputs) - response = response.strip() - response = response.replace("[[训练时间]]", "2023年") - history = history + [(query, response)] - return response, history - - @torch.no_grad() - def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, - do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): - if history is None: - history = [] - if logits_processor is None: - logits_processor = LogitsProcessorList() - logits_processor.append(InvalidScoreLogitsProcessor()) - gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p, - "temperature": temperature, "logits_processor": logits_processor, **kwargs} - if not history: - prompt = query - else: - prompt = "" - for i, (old_query, response) in enumerate(history): - prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) - prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) - input_ids = tokenizer([prompt], return_tensors="pt", padding=True) - input_ids = input_ids.to(self.device) - for outputs in self.stream_generate(**input_ids, **gen_kwargs): - outputs = outputs.tolist()[0][len(input_ids["input_ids"][0]):] - response = tokenizer.decode(outputs) - response = response.strip() - response = response.replace("[[训练时间]]", "2023年") - new_history = history + [(query, response)] - yield response, new_history - - @torch.no_grad() - def stream_generate( - self, - input_ids, - generation_config: Optional[GenerationConfig] = None, - logits_processor: Optional[LogitsProcessorList] = None, - stopping_criteria: Optional[StoppingCriteriaList] = None, - prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, - **kwargs, - ): - batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] - - if generation_config is None: - generation_config = self.generation_config - generation_config = copy.deepcopy(generation_config) - model_kwargs = generation_config.update(**kwargs) - bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id - - if isinstance(eos_token_id, int): - eos_token_id = [eos_token_id] - - has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None - if has_default_max_length and generation_config.max_new_tokens is None: - warnings.warn( - f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " - "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" - " recommend using `max_new_tokens` to control the maximum length of the generation.", - UserWarning, - ) - elif generation_config.max_new_tokens is not None: - generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length - if not has_default_max_length: - logger.warn( - f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" - f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " - "Please refer to the documentation for more information. " - "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", - UserWarning, - ) - - if input_ids_seq_length >= generation_config.max_length: - input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" - logger.warning( - f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" - f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" - " increasing `max_new_tokens`." - ) - - # 2. Set generation parameters if not already defined - logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() - stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() - - logits_processor = self._get_logits_processor( - generation_config=generation_config, - input_ids_seq_length=input_ids_seq_length, - encoder_input_ids=input_ids, - prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, - logits_processor=logits_processor, - ) - - stopping_criteria = self._get_stopping_criteria( - generation_config=generation_config, stopping_criteria=stopping_criteria - ) - logits_warper = self._get_logits_warper(generation_config) - - unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) - scores = None - while True: - model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) - # forward pass to get next token - outputs = self( - **model_inputs, - return_dict=True, - output_attentions=False, - output_hidden_states=False, - ) - - next_token_logits = outputs.logits[:, -1, :] - - # pre-process distribution - next_token_scores = logits_processor(input_ids, next_token_logits) - next_token_scores = logits_warper(input_ids, next_token_scores) - - # sample - probs = nn.functional.softmax(next_token_scores, dim=-1) - if generation_config.do_sample: - next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) - else: - next_tokens = torch.argmax(probs, dim=-1) - - # update generated ids, model inputs, and length for next step - input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) - model_kwargs = self._update_model_kwargs_for_generation( - outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder - ) - unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long()) - - # stop when each sentence is finished, or if we exceed the maximum length - if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): - break - yield input_ids - - def quantize(self, bits: int, quantize_embeddings=False, use_quantization_cache=False, empty_init=False, **kwargs): - if bits == 0: - return - - from .quantization import quantize, QuantizedEmbedding, QuantizedLinear, load_cpu_kernel - - if self.quantized: - if self.device == torch.device("cpu"): - logger.info("Already quantized, reloading cpu kernel.") - load_cpu_kernel(**kwargs) - else: - logger.info("Already quantized.") - return self - - self.quantized = True - - self.config.quantization_bit = bits - self.config.quantization_embeddings = quantize_embeddings - - self.transformer = quantize(self.transformer, bits, use_quantization_cache=use_quantization_cache, empty_init=empty_init, **kwargs) - - if quantize_embeddings: - logger.info("Applying quantization to embeddings") - self.transformer.word_embeddings = QuantizedEmbedding( - weight_bit_width=bits, - weight_tensor=self.transformer.word_embeddings.weight.to(self.device), - num_embeddings=self.transformer.word_embeddings.num_embeddings, - embedding_dim=self.transformer.word_embeddings.embedding_dim, - dtype=torch.half, - device=self.transformer.word_embeddings.weight.device, - ) - self.lm_head = QuantizedLinear( - weight_bit_width=bits, - weight_tensor=self.lm_head.weight.to(self.device), - bias_tensor=None, - in_features=self.lm_head.in_features, - out_features=self.lm_head.out_features, - bias=False, - quantized_weight=self.transformer.word_embeddings.weight, - quantized_weight_scale=self.transformer.word_embeddings.weight_scale, - dtype=torch.half, - device=self.lm_head.weight.device, - ) - - return self diff --git a/spaces/jpwahle/paraphrase-type-tasks/app.py b/spaces/jpwahle/paraphrase-type-tasks/app.py deleted file mode 100644 index c48f5d7579aaff03ae1cb8d51b8aeed38dab88bb..0000000000000000000000000000000000000000 --- a/spaces/jpwahle/paraphrase-type-tasks/app.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/ -# All rights reserved. - -import os -import random -import time - -import gradio as gr -import openai - -openai.api_key = os.environ.get("OPENAI_API_KEY") - - -def create_prompt(sentence, paraphrase_type): - """ - Creates a prompt for generating a paraphrase of a given sentence with specified types. - - Args: - sentence (str): The original sentence to be paraphrased. - paraphrase_type (str): The type of paraphrase to be generated. - - Returns: - dict: A dictionary containing the prompt message. - """ - return { - "messages": [ - { - "role": "user", - "content": ( - "Given the following sentence, generate a paraphrase with" - f" the following types. Sentence: {sentence}. Paraphrase" - f" Types: {paraphrase_type}" - ), - } - ] - } - - -paraphrase_types = [ - "Derivational Changes", - "Inflectional Changes", - "Modal Verb Changes", - "Spelling changes", - "Change of format", - "Same Polarity Substitution (contextual)", - "Same Polarity Substitution (habitual)", - "Same Polarity Substitution (named ent.)", - "Converse substitution", - "Opposite polarity substitution (contextual)", - "Opposite polarity substitution (habitual)", - "Synthetic/analytic substitution", - "Coordination changes", - "Diathesis alternation", - "Ellipsis", - "Negation switching", - "Subordination and nesting changes", - "Direct/indirect style alternations", - "Punctuation changes", - "Syntax/discourse structure changes", - "Entailment", - "Identity", - "Non-paraphrase", - "Addition/Deletion", - "Change of order", - "Semantic-based", -] - -with gr.Blocks() as demo: - description = gr.Markdown( - """ - ## Paraphrase Type Generator - This demo uses a fine-tuned ChatGPT-3.5 model to generate paraphrases given specific paraphrase types. - - **How to use:** - 1. Select one or many type of paraphrase from the dropdown menu. - 2. Enter a sentence in the text box. - 3. Click the "Submit" button or hit enter. - 4. The application will generate a paraphrase of the input sentence based on the selected type. - """ - ) - chatbot = gr.Chatbot() - types = gr.Dropdown( - paraphrase_types, - value="Syntax/discourse structure changes", - multiselect=True, - allow_custom_value=True, - ) - msg = gr.Textbox() - submit = gr.Button("Submit") - clear = gr.Button("Clear") - - def user(user_message, history): - """ - This function takes in a user message and a history of previous messages, and returns an empty string and an updated history list with the user message appended to it. - - Args: - - user_message (str): The message sent by the user. - - history (list): A list of previous messages, where each message is a list containing the message text and the bot's response. - - Returns: - - A tuple containing an empty string and the updated history list. - """ - return "", history + [[user_message, None]] - - def generate_paraphrase(user_message, paraphrase_type, history): - """ - Generates a paraphrase of the user's message using OpenAI's GPT-3 model. - - Args: - user_message (str): The message to be paraphrased. - paraphrase_type (str): The type of paraphrase to generate. - history (list): A list of previous messages in the conversation. - - Yields: - list: A list of previous messages in the conversation, including the new paraphrase. - """ - types_as_str = ",".join(paraphrase_type) - history[-1][1] = f"[System: {types_as_str}]\n\n" - prompt = create_prompt(history[-1][0], paraphrase_type) - bot_message = openai.ChatCompletion.create( - model="ft:gpt-3.5-turbo-0613:personal::7xbU0xQ2", - messages=prompt["messages"], - ) - for character in bot_message.choices[0].message.content: - history[-1][1] += character - time.sleep(0.01) - yield history - - chatbot.value = [ - [ - ( - "These outlined a" - " theory of the photoelectric effect, explained Brownian" - " motion, introduced his special theory of relativity—a theory" - " which addressed the inability of classical mechanics to" - " account satisfactorily for the behavior of the" - " electromagnetic field—and demonstrated that if the special" - " theory is correct, mass and energy are equivalent to each" - " other." - ), - ( - "[System: Syntax/discourse structure changes]\n\nAnother of" - " the papers introduced Einstein's special theory of" - " relativity, which addressed the inability of classical" - " mechanics to account for the behavior of the electromagnetic" - " field, and demonstrated that, if the special theory is" - " correct, mass and energy are equivalent to each other." - ), - ] - ] - - msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( - generate_paraphrase, [msg, types, chatbot], chatbot - ) - submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then( - generate_paraphrase, [msg, types, chatbot], chatbot - ) - clear.click(lambda: None, None, chatbot, queue=False) - -demo.queue() -demo.launch() diff --git a/spaces/jskalbg/ChatDev01/camel/agents/tool_agents/hugging_face_tool_agent.py b/spaces/jskalbg/ChatDev01/camel/agents/tool_agents/hugging_face_tool_agent.py deleted file mode 100644 index 0bf4b7b71bb0d7b5459e42e15c680a97db27ecaa..0000000000000000000000000000000000000000 --- a/spaces/jskalbg/ChatDev01/camel/agents/tool_agents/hugging_face_tool_agent.py +++ /dev/null @@ -1,188 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from typing import Any, Optional - -from camel.agents.tool_agents import BaseToolAgent - - -# flake8: noqa :E501 -class HuggingFaceToolAgent(BaseToolAgent): - r"""Tool agent for calling HuggingFace models. This agent is a wrapper - around agents from the `transformers` library. For more information - about the available models, please see the `transformers` documentation - at https://huggingface.co/docs/transformers/transformers_agents. - - Args: - name (str): The name of the agent. - *args (Any): Additional positional arguments to pass to the underlying - Agent class. - remote (bool, optional): Flag indicating whether to run the agent - remotely. (default: :obj:`True`) - **kwargs (Any): Additional keyword arguments to pass to the underlying - Agent class. - """ - - def __init__( - self, - name: str, - *args: Any, - remote: bool = True, - **kwargs: Any, - ) -> None: - try: - # TODO: Support other tool agents - from transformers.tools import OpenAiAgent - except ImportError: - raise ValueError( - "Could not import transformers tool agents. " - "Please setup the environment with " - "pip install huggingface_hub==0.14.1 transformers==4.29.0 diffusers accelerate datasets torch soundfile sentencepiece opencv-python" - ) - self.agent = OpenAiAgent(*args, **kwargs) - self.name = name - self.remote = remote - self.description = f"""The `{self.name}` is a tool agent that can perform a variety of tasks including: -- Document question answering: given a document (such as a PDF) in image format, answer a question on this document -- Text question answering: given a long text and a question, answer the question in the text -- Unconditional image captioning: Caption the image! -- Image question answering: given an image, answer a question on this image -- Image segmentation: given an image and a prompt, output the segmentation mask of that prompt -- Speech to text: given an audio recording of a person talking, transcribe the speech into text -- Text to speech: convert text to speech -- Zero-shot text classification: given a text and a list of labels, identify to which label the text corresponds the most -- Text summarization: summarize a long text in one or a few sentences -- Translation: translate the text into a given language -- Text downloading: to download a text from a web URL -- Text to image: generate an image according to a prompt, leveraging stable diffusion -- Image transformation: modify an image given an initial image and a prompt, leveraging instruct pix2pix stable diffusion -- Text to video: generate a small video according to a prompt - -Here are some python code examples of what you can do with this agent: - -Single execution (step) mode, the single execution method is when using the step() method of the agent: -``` -# Text to image -rivers_and_lakes_image = {self.name}.step("Draw me a picture of rivers and lakes.") -rivers_and_lakes_image.save("./rivers_and_lakes_image.png") - -# Text to image -> Image transformation -sea_add_island_image = {self.name}.step("Draw me a picture of the sea then transform the picture to add an island") -sea_add_island_image.save("./sea_add_island_image.png") - -# If you'd like to keep a state across executions or to pass non-text objects to the agent, -# you can do so by specifying variables that you would like the agent to use. For example, -# you could generate the first image of rivers and lakes, and ask the model to update that picture to add an island by doing the following: -picture = {self.name}.step("Generate a picture of rivers and lakes.") -picture.save("./picture.png") -updated_picture = {self.name}.step("Transform the image in `picture` to add an island to it.", picture=picture) -updated_picture.save("./updated_picture.png") - -capybara_sea_image = {self.name}.step("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea") -capybara_sea_image.save("./capybara_sea_image.png") - -# Document question answering -answer = {self.name}.step( - "In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?", - document=document, -) -print(answer) - - -# Text to image -boat_image = {self.name}.step("Generate an image of a boat in the water") -boat_image.save("./boat_image.png") - -# Unconditional image captioning -boat_image_caption = {self.name}.step("Can you caption the `boat_image`?", boat_image=boat_image) -print(boat_image_caption) - -# Text to image -> Unconditional image captioning -> Text to speech -boat_audio = {self.name}.step("Can you generate an image of a boat? Please read out loud the contents of the image afterwards") - -# Text downloading -document = {self.name}.step("Download the text from http://hf.co") -print(document) - -# Text summarization -summary = {self.name}.step("Summarize the following text: `document`", document=document) -print(summary) - -# Text downloading -> Text summarization -> Text to speech -audio = {self.name}.step("Read out loud the summary of http://hf.co") -``` - -Chat-based execution (chat), the agent also has a chat-based approach, using the chat() method: -``` -# Clean the chat history -{self.name}.reset() - -# Text to image -capybara_image = {self.name}.chat("Show me an an image of a capybara") -capybara_image.save("./capybara_image.png") - -# Image transformation -transformed_capybara_image = {self.name}.chat("Transform the image so that it snows") -transformed_capybara_image.save("./transformed_capybara_image.png") - -# Image segmentation -segmented_transformed_capybara_image = {self.name}.chat("Show me a mask of the snowy capybaras") -segmented_transformed_capybara_image.save("./segmented_transformed_capybara_image.png") -``` -""" - - def reset(self) -> None: - r"""Resets the chat history of the agent.""" - self.agent.prepare_for_new_chat() - - def step( - self, - *args: Any, - remote: Optional[bool] = None, - **kwargs: Any, - ) -> Any: - r"""Runs the agent in single execution mode. - - Args: - *args (Any): Positional arguments to pass to the agent. - remote (bool, optional): Flag indicating whether to run the agent - remotely. Overrides the default setting. (default: :obj:`None`) - **kwargs (Any): Keyword arguments to pass to the agent. - - Returns: - str: The response from the agent. - """ - if remote is None: - remote = self.remote - return self.agent.run(*args, remote=remote, **kwargs) - - def chat( - self, - *args: Any, - remote: Optional[bool] = None, - **kwargs: Any, - ) -> Any: - r"""Runs the agent in a chat conversation mode. - - Args: - *args (Any): Positional arguments to pass to the agent. - remote (bool, optional): Flag indicating whether to run the agent - remotely. Overrides the default setting. (default: :obj:`None`) - **kwargs (Any): Keyword arguments to pass to the agent. - - Returns: - str: The response from the agent. - """ - if remote is None: - remote = self.remote - return self.agent.chat(*args, remote=remote, **kwargs) diff --git a/spaces/jvde/sovits-webui/text/japanese.py b/spaces/jvde/sovits-webui/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/jvde/sovits-webui/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/k1ngtai/MMS/app.py b/spaces/k1ngtai/MMS/app.py deleted file mode 100644 index 9e128ddcc5dab9d167d4483186dcb8a089e0e939..0000000000000000000000000000000000000000 --- a/spaces/k1ngtai/MMS/app.py +++ /dev/null @@ -1,94 +0,0 @@ -import gradio as gr -import librosa -from asr import transcribe -from tts import synthesize, TTS_EXAMPLES - -ALL_LANGUAGES = {} - -for task in ["asr", "tts", "lid"]: - ALL_LANGUAGES.setdefault(task, {}) - with open(f"data/{task}/all_langs.tsv") as f: - for line in f: - iso, name = line.split(" ", 1) - ALL_LANGUAGES[task][iso] = name - - -def identify(microphone, file_upload): - LID_SAMPLING_RATE = 16_000 - - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - audio_fp = microphone if microphone is not None else file_upload - inputs = librosa.load(audio_fp, sr=LID_SAMPLING_RATE, mono=True)[0] - - raw_output = {"eng": 0.9, "hin": 0.04, "heb": 0.03, "ara": 0.02, "fra": 0.01} - return {(k + ": " + ALL_LANGUAGES["lid"][k]): v for k, v in raw_output.items()} - - -demo = gr.Blocks() - -mms_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.Audio(source="microphone", type="filepath"), - gr.Audio(source="upload", type="filepath"), - gr.Dropdown( - [f"{k}: {v}" for k, v in ALL_LANGUAGES["asr"].items()], - label="Language", - value="shn: Shan", - ), - ], - outputs="text", - title="Speech-to-text", - description=("Transcribe audio!"), - allow_flagging="never", -) - -mms_synthesize = gr.Interface( - fn=synthesize, - inputs=[ - gr.Text(label="Input text"), - gr.Dropdown( - [f"{k}: {v}" for k, v in ALL_LANGUAGES["tts"].items()], - label="Language", - value="shn: Shan", - ), - gr.Slider(minimum=0.1, maximum=4.0, value=1.0, step=0.1, label="Speed"), - ], - outputs=[ - gr.Audio(label="Generated Audio", type="numpy"), - gr.Text(label="Filtered text after removing OOVs"), - ], - examples=TTS_EXAMPLES, - title="Text-to-speech", - description=("Generate audio!"), - allow_flagging="never", -) - -mms_identify = gr.Interface( - fn=identify, - inputs=[ - gr.Audio(source="microphone", type="filepath"), - gr.Audio(source="upload", type="filepath"), - ], - outputs=gr.Label(num_top_classes=10), - title="Language Identification", - description=("Identity the language of audio!"), - allow_flagging="never", -) - -with demo: - gr.TabbedInterface( - [mms_transcribe, mms_synthesize, mms_identify], - ["Speech-to-text", "Text-to-speech", "Language Identification"], - ) - -demo.launch() diff --git a/spaces/k4black/codebleu/tests.py b/spaces/k4black/codebleu/tests.py deleted file mode 100644 index 601ed757507caebec67493462d11eb4c8901c2a1..0000000000000000000000000000000000000000 --- a/spaces/k4black/codebleu/tests.py +++ /dev/null @@ -1,17 +0,0 @@ -test_cases = [ - { - "predictions": [0, 0], - "references": [1, 1], - "result": {"metric_score": 0} - }, - { - "predictions": [1, 1], - "references": [1, 1], - "result": {"metric_score": 1} - }, - { - "predictions": [1, 0], - "references": [1, 1], - "result": {"metric_score": 0.5} - } -] \ No newline at end of file diff --git a/spaces/kaicheng/ChatGPT_ad/assets/custom.css b/spaces/kaicheng/ChatGPT_ad/assets/custom.css deleted file mode 100644 index 22108488886cfc8d7772214dd9b83727b3fca6a3..0000000000000000000000000000000000000000 --- a/spaces/kaicheng/ChatGPT_ad/assets/custom.css +++ /dev/null @@ -1,468 +0,0 @@ -:root { - --chatbot-color-light: #000000; - --chatbot-color-dark: #FFFFFF; - --chatbot-background-color-light: #F3F3F3; - --chatbot-background-color-dark: #121111; - --message-user-background-color-light: #95EC69; - --message-user-background-color-dark: #26B561; - --message-bot-background-color-light: #FFFFFF; - --message-bot-background-color-dark: #2C2C2C; -} - -#app_title { - font-weight: var(--prose-header-text-weight); - font-size: var(--text-xxl); - line-height: 1.3; - text-align: left; - margin-top: 6px; - white-space: nowrap; -} -#description { - text-align: center; - margin: 32px 0 4px 0; -} - -/* gradio的页脚信息 */ -footer { - /* display: none !important; */ - margin-top: .2em !important; - font-size: 85%; -} -#footer { - text-align: center; -} -#footer div { - display: inline-block; -} -#footer .versions{ - font-size: 85%; - opacity: 0.60; -} - -#float_display { - position: absolute; - max-height: 30px; -} -/* user_info */ -#user_info { - white-space: nowrap; - position: absolute; left: 8em; top: .2em; - z-index: var(--layer-2); - box-shadow: var(--block-shadow); - border: none; border-radius: var(--block-label-radius); - background: var(--color-accent); - padding: var(--block-label-padding); - font-size: var(--block-label-text-size); line-height: var(--line-sm); - width: auto; min-height: 30px!important; - opacity: 1; - transition: opacity 0.3s ease-in-out; -} -#user_info .wrap { - opacity: 0; -} -#user_info p { - color: white; - font-weight: var(--block-label-text-weight); -} -#user_info.hideK { - opacity: 0; - transition: opacity 1s ease-in-out; -} - -/* status_display */ -#status_display { - display: flex; - min-height: 2em; - align-items: flex-end; - justify-content: flex-end; -} -#status_display p { - font-size: .85em; - font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace; - /* Windows下中文的monospace会fallback为新宋体,实在太丑,这里折中使用微软雅黑 */ - color: var(--body-text-color-subdued); -} - -#status_display { - transition: all 0.6s; -} -#chuanhu_chatbot { - transition: height 0.3s ease; -} - -/* usage_display */ -.insert_block { - position: relative; - margin: 0; - padding: .5em 1em; - box-shadow: var(--block-shadow); - border-width: var(--block-border-width); - border-color: var(--block-border-color); - border-radius: var(--block-radius); - background: var(--block-background-fill); - width: 100%; - line-height: var(--line-sm); - min-height: 2em; -} -#usage_display p, #usage_display span { - margin: 0; - font-size: .85em; - color: var(--body-text-color-subdued); -} -.progress-bar { - background-color: var(--input-background-fill);; - margin: .5em 0 !important; - height: 20px; - border-radius: 10px; - overflow: hidden; -} -.progress { - background-color: var(--block-title-background-fill); - height: 100%; - border-radius: 10px; - text-align: right; - transition: width 0.5s ease-in-out; -} -.progress-text { - /* color: white; */ - color: var(--color-accent) !important; - font-size: 1em !important; - font-weight: bold; - padding-right: 10px; - line-height: 20px; -} - -.apSwitch { - top: 2px; - display: inline-block; - height: 24px; - position: relative; - width: 48px; - border-radius: 12px; -} -.apSwitch input { - display: none !important; -} -.apSlider { - background-color: var(--neutral-200); - bottom: 0; - cursor: pointer; - left: 0; - position: absolute; - right: 0; - top: 0; - transition: .4s; - font-size: 18px; - border-radius: 12px; -} -.apSlider::before { - bottom: -1.5px; - left: 1px; - position: absolute; - transition: .4s; - content: "🌞"; -} -input:checked + .apSlider { - background-color: var(--primary-600); -} -input:checked + .apSlider::before { - transform: translateX(23px); - content:"🌚"; -} - -/* Override Slider Styles (for webkit browsers like Safari and Chrome) - * 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410 - * 进度滑块在各个平台还是太不统一了 - */ -input[type="range"] { - -webkit-appearance: none; - height: 4px; - background: var(--input-background-fill); - border-radius: 5px; - background-image: linear-gradient(var(--primary-500),var(--primary-500)); - background-size: 0% 100%; - background-repeat: no-repeat; -} -input[type="range"]::-webkit-slider-thumb { - -webkit-appearance: none; - height: 20px; - width: 20px; - border-radius: 50%; - border: solid 0.5px #ddd; - background-color: white; - cursor: ew-resize; - box-shadow: var(--input-shadow); - transition: background-color .1s ease; -} -input[type="range"]::-webkit-slider-thumb:hover { - background: var(--neutral-50); -} -input[type=range]::-webkit-slider-runnable-track { - -webkit-appearance: none; - box-shadow: none; - border: none; - background: transparent; -} - -#submit_btn, #cancel_btn { - height: 42px !important; -} -#submit_btn::before { - content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} -#cancel_btn::before { - content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} - -/* 亮色(默认) */ -#chuanhu_chatbot { - background-color: var(--chatbot-background-color-light) !important; - color: var(--chatbot-color-light) !important; -} -[data-testid = "bot"] { - background-color: var(--message-bot-background-color-light) !important; -} -[data-testid = "user"] { - background-color: var(--message-user-background-color-light) !important; -} -/* 暗色 */ -.dark #chuanhu_chatbot { - background-color: var(--chatbot-background-color-dark) !important; - color: var(--chatbot-color-dark) !important; -} -.dark [data-testid = "bot"] { - background-color: var(--message-bot-background-color-dark) !important; -} -.dark [data-testid = "user"] { - background-color: var(--message-user-background-color-dark) !important; -} - -/* 屏幕宽度大于等于500px的设备 */ -/* update on 2023.4.8: 高度的细致调整已写入JavaScript */ -@media screen and (min-width: 500px) { - #chuanhu_chatbot { - height: calc(100vh - 200px); - } - #chuanhu_chatbot .wrap { - max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } -} -/* 屏幕宽度小于500px的设备 */ -@media screen and (max-width: 499px) { - #chuanhu_chatbot { - height: calc(100vh - 140px); - } - #chuanhu_chatbot .wrap { - max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } - [data-testid = "bot"] { - max-width: 95% !important; - } - #app_title h1{ - letter-spacing: -1px; font-size: 22px; - } -} -#chuanhu_chatbot .wrap { - overflow-x: hidden; -} -/* 对话气泡 */ -.message { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: var(--text-md) !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - border-bottom-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; -} - -.message.user p { - white-space: pre-wrap; -} -.message .user-message { - display: block; - padding: 0 !important; - white-space: pre-wrap; -} - -.message .md-message p { - margin-top: 0.6em !important; - margin-bottom: 0.6em !important; -} -.message .md-message p:first-child { margin-top: 0 !important; } -.message .md-message p:last-of-type { margin-bottom: 0 !important; } - -.message .md-message { - display: block; - padding: 0 !important; -} -.message .raw-message p { - margin:0 !important; -} -.message .raw-message { - display: block; - padding: 0 !important; - white-space: pre-wrap; -} -.raw-message.hideM, .md-message.hideM { - display: none; -} - -/* custom buttons */ -.chuanhu-btn { - border-radius: 5px; - /* background-color: #E6E6E6 !important; */ - color: rgba(120, 120, 120, 0.64) !important; - padding: 4px !important; - position: absolute; - right: -22px; - cursor: pointer !important; - transition: color .2s ease, background-color .2s ease; -} -.chuanhu-btn:hover { - background-color: rgba(167, 167, 167, 0.25) !important; - color: unset !important; -} -.chuanhu-btn:active { - background-color: rgba(167, 167, 167, 0.5) !important; -} -.chuanhu-btn:focus { - outline: none; -} -.copy-bot-btn { - /* top: 18px; */ - bottom: 0; -} -.toggle-md-btn { - /* top: 0; */ - bottom: 20px; -} -.copy-code-btn { - position: relative; - float: right; - font-size: 1em; - cursor: pointer; -} - -.message-wrap>div img{ - border-radius: 10px !important; -} - -/* history message */ -.wrap>.history-message { - padding: 10px !important; -} -.history-message { - /* padding: 0 !important; */ - opacity: 80%; - display: flex; - flex-direction: column; -} -.history-message>.history-message { - padding: 0 !important; -} -.history-message>.message-wrap { - padding: 0 !important; - margin-bottom: 16px; -} -.history-message>.message { - margin-bottom: 16px; -} -.wrap>.history-message::after { - content: ""; - display: block; - height: 2px; - background-color: var(--body-text-color-subdued); - margin-bottom: 10px; - margin-top: -10px; - clear: both; -} -.wrap>.history-message>:last-child::after { - content: "仅供查看"; - display: block; - text-align: center; - color: var(--body-text-color-subdued); - font-size: 0.8em; -} - -/* 表格 */ -table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -td,th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -thead { - background-color: rgba(175,184,193,0.2); -} -thead th { - padding: .5em .2em; -} -/* 行内代码 */ -.message :not(pre) code { - display: inline; - white-space: break-spaces; - font-family: var(--font-mono); - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -.message pre, -.message pre[class*=language-] { - color: #fff; - overflow-x: auto; - overflow-y: hidden; - margin: .8em 1em 1em 0em !important; - padding: var(--spacing-xl) 1.2em !important; - border-radius: var(--radius-lg) !important; -} -.message pre code, -.message pre code[class*=language-] { - color: #fff; - padding: 0; - margin: 0; - background-color: unset; - text-shadow: none; - font-family: var(--font-mono); -} -/* 覆盖 gradio 丑陋的复制按钮样式 */ -pre button[title="copy"] { - border-radius: 5px; - transition: background-color .2s ease; -} -pre button[title="copy"]:hover { - background-color: #333232; -} -pre button .check { - color: #fff !important; - background: var(--neutral-950) !important; -} - -/* 覆盖prism.css */ -.language-css .token.string, -.style .token.string, -.token.entity, -.token.operator, -.token.url { - background: none !important; -} diff --git a/spaces/karolmajek/YOLOR/train.py b/spaces/karolmajek/YOLOR/train.py deleted file mode 100644 index b920bf268cc2208c0300946f563a775a0be1e3e7..0000000000000000000000000000000000000000 --- a/spaces/karolmajek/YOLOR/train.py +++ /dev/null @@ -1,619 +0,0 @@ -import argparse -import logging -import math -import os -import random -import time -from pathlib import Path -from warnings import warn - -import numpy as np -import torch.distributed as dist -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch.optim.lr_scheduler as lr_scheduler -import torch.utils.data -import yaml -from torch.cuda import amp -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -import test # import test.py to get mAP after each epoch -#from models.yolo import Model -from models.models import * -from utils.autoanchor import check_anchors -from utils.datasets import create_dataloader -from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ - fitness, fitness_p, fitness_r, fitness_ap50, fitness_ap, fitness_f, strip_optimizer, get_latest_run,\ - check_dataset, check_file, check_git_status, check_img_size, print_mutation, set_logging -from utils.google_utils import attempt_download -from utils.loss import compute_loss -from utils.plots import plot_images, plot_labels, plot_results, plot_evolution -from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first - -logger = logging.getLogger(__name__) - -try: - import wandb -except ImportError: - wandb = None - logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)") - -def train(hyp, opt, device, tb_writer=None, wandb=None): - logger.info(f'Hyperparameters {hyp}') - save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank - - # Directories - wdir = save_dir / 'weights' - wdir.mkdir(parents=True, exist_ok=True) # make dir - last = wdir / 'last.pt' - best = wdir / 'best.pt' - results_file = save_dir / 'results.txt' - - # Save run settings - with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.dump(hyp, f, sort_keys=False) - with open(save_dir / 'opt.yaml', 'w') as f: - yaml.dump(vars(opt), f, sort_keys=False) - - # Configure - plots = not opt.evolve # create plots - cuda = device.type != 'cpu' - init_seeds(2 + rank) - with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict - with torch_distributed_zero_first(rank): - check_dataset(data_dict) # check - train_path = data_dict['train'] - test_path = data_dict['val'] - nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names - assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check - - # Model - pretrained = weights.endswith('.pt') - if pretrained: - with torch_distributed_zero_first(rank): - attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location=device) # load checkpoint - model = Darknet(opt.cfg).to(device) # create - state_dict = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()} - model.load_state_dict(state_dict, strict=False) - print('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report - else: - model = Darknet(opt.cfg).to(device) # create - - # Optimizer - nbs = 64 # nominal batch size - accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay - - pg0, pg1, pg2 = [], [], [] # optimizer parameter groups - for k, v in dict(model.named_parameters()).items(): - if '.bias' in k: - pg2.append(v) # biases - elif 'Conv2d.weight' in k: - pg1.append(v) # apply weight_decay - elif 'm.weight' in k: - pg1.append(v) # apply weight_decay - elif 'w.weight' in k: - pg1.append(v) # apply weight_decay - else: - pg0.append(v) # all else - - if opt.adam: - optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum - else: - optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) - - optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay - optimizer.add_param_group({'params': pg2}) # add pg2 (biases) - logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) - del pg0, pg1, pg2 - - # Scheduler https://arxiv.org/pdf/1812.01187.pdf - # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR - lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # plot_lr_scheduler(optimizer, scheduler, epochs) - - # Logging - if wandb and wandb.run is None: - opt.hyp = hyp # add hyperparameters - wandb_run = wandb.init(config=opt, resume="allow", - project='YOLOR' if opt.project == 'runs/train' else Path(opt.project).stem, - name=save_dir.stem, - id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) - - # Resume - start_epoch, best_fitness = 0, 0.0 - best_fitness_p, best_fitness_r, best_fitness_ap50, best_fitness_ap, best_fitness_f = 0.0, 0.0, 0.0, 0.0, 0.0 - if pretrained: - # Optimizer - if ckpt['optimizer'] is not None: - optimizer.load_state_dict(ckpt['optimizer']) - best_fitness = ckpt['best_fitness'] - best_fitness_p = ckpt['best_fitness_p'] - best_fitness_r = ckpt['best_fitness_r'] - best_fitness_ap50 = ckpt['best_fitness_ap50'] - best_fitness_ap = ckpt['best_fitness_ap'] - best_fitness_f = ckpt['best_fitness_f'] - - # Results - if ckpt.get('training_results') is not None: - with open(results_file, 'w') as file: - file.write(ckpt['training_results']) # write results.txt - - # Epochs - start_epoch = ckpt['epoch'] + 1 - if opt.resume: - assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) - if epochs < start_epoch: - logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % - (weights, ckpt['epoch'], epochs)) - epochs += ckpt['epoch'] # finetune additional epochs - - del ckpt, state_dict - - # Image sizes - gs = 64 #int(max(model.stride)) # grid size (max stride) - imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples - - # DP mode - if cuda and rank == -1 and torch.cuda.device_count() > 1: - model = torch.nn.DataParallel(model) - - # SyncBatchNorm - if opt.sync_bn and cuda and rank != -1: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - logger.info('Using SyncBatchNorm()') - - # EMA - ema = ModelEMA(model) if rank in [-1, 0] else None - - # DDP mode - if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) - - # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, - rank=rank, world_size=opt.world_size, workers=opt.workers) - mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class - nb = len(dataloader) # number of batches - assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) - - # Process 0 - if rank in [-1, 0]: - ema.updates = start_epoch * nb // accumulate # set EMA updates - testloader = create_dataloader(test_path, imgsz_test, batch_size*2, gs, opt, - hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, - rank=-1, world_size=opt.world_size, workers=opt.workers)[0] # testloader - - if not opt.resume: - labels = np.concatenate(dataset.labels, 0) - c = torch.tensor(labels[:, 0]) # classes - # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency - # model._initialize_biases(cf.to(device)) - if plots: - plot_labels(labels, save_dir=save_dir) - if tb_writer: - tb_writer.add_histogram('classes', c, 0) - if wandb: - wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.png')]}) - - # Anchors - # if not opt.noautoanchor: - # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) - - # Model parameters - hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset - model.nc = nc # attach number of classes to model - model.hyp = hyp # attach hyperparameters to model - model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) - model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights - model.names = names - - # Start training - t0 = time.time() - nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) - # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training - maps = np.zeros(nc) # mAP per class - results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = amp.GradScaler(enabled=cuda) - logger.info('Image sizes %g train, %g test\n' - 'Using %g dataloader workers\nLogging results to %s\n' - 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs)) - - torch.save(model, wdir / 'init.pt') - - for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - model.train() - - # Update image weights (optional) - if opt.image_weights: - # Generate indices - if rank in [-1, 0]: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - # Broadcast if DDP - if rank != -1: - indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() - dist.broadcast(indices, 0) - if rank != 0: - dataset.indices = indices.cpu().numpy() - - # Update mosaic border - # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) - # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - - mloss = torch.zeros(4, device=device) # mean losses - if rank != -1: - dataloader.sampler.set_epoch(epoch) - pbar = enumerate(dataloader) - logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) - if rank in [-1, 0]: - pbar = tqdm(pbar, total=nb) # progress bar - optimizer.zero_grad() - for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- - ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 - - # Warmup - if ni <= nw: - xi = [0, nw] # x interp - # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) - for j, x in enumerate(optimizer.param_groups): - # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) - - # Multi-scale - if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size - sf = sz / max(imgs.shape[2:]) # scale factor - if sf != 1: - ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) - - # Forward - with amp.autocast(enabled=cuda): - pred = model(imgs) # forward - loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size - if rank != -1: - loss *= opt.world_size # gradient averaged between devices in DDP mode - - # Backward - scaler.scale(loss).backward() - - # Optimize - if ni % accumulate == 0: - scaler.step(optimizer) # optimizer.step - scaler.update() - optimizer.zero_grad() - if ema: - ema.update(model) - - # Print - if rank in [-1, 0]: - mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) - s = ('%10s' * 2 + '%10.4g' * 6) % ( - '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) - pbar.set_description(s) - - # Plot - if plots and ni < 3: - f = save_dir / f'train_batch{ni}.jpg' # filename - plot_images(images=imgs, targets=targets, paths=paths, fname=f) - # if tb_writer: - # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) - # tb_writer.add_graph(model, imgs) # add model to tensorboard - elif plots and ni == 3 and wandb: - wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]}) - - # end batch ------------------------------------------------------------------------------------------------ - # end epoch ---------------------------------------------------------------------------------------------------- - - # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard - scheduler.step() - - # DDP process 0 or single-GPU - if rank in [-1, 0]: - # mAP - if ema: - ema.update_attr(model) - final_epoch = epoch + 1 == epochs - if not opt.notest or final_epoch: # Calculate mAP - if epoch >= 3: - results, maps, times = test.test(opt.data, - batch_size=batch_size*2, - imgsz=imgsz_test, - model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema, - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=save_dir, - plots=plots and final_epoch, - log_imgs=opt.log_imgs if wandb else 0) - - # Write - with open(results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - if len(opt.name) and opt.bucket: - os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) - - # Log - tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params - for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): - if tb_writer: - tb_writer.add_scalar(tag, x, epoch) # tensorboard - if wandb: - wandb.log({tag: x}) # W&B - - # Update best mAP - fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - fi_p = fitness_p(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - fi_r = fitness_r(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - fi_ap50 = fitness_ap50(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - fi_ap = fitness_ap(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - if (fi_p > 0.0) or (fi_r > 0.0): - fi_f = fitness_f(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - else: - fi_f = 0.0 - if fi > best_fitness: - best_fitness = fi - if fi_p > best_fitness_p: - best_fitness_p = fi_p - if fi_r > best_fitness_r: - best_fitness_r = fi_r - if fi_ap50 > best_fitness_ap50: - best_fitness_ap50 = fi_ap50 - if fi_ap > best_fitness_ap: - best_fitness_ap = fi_ap - if fi_f > best_fitness_f: - best_fitness_f = fi_f - - # Save model - save = (not opt.nosave) or (final_epoch and not opt.evolve) - if save: - with open(results_file, 'r') as f: # create checkpoint - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'best_fitness_p': best_fitness_p, - 'best_fitness_r': best_fitness_r, - 'best_fitness_ap50': best_fitness_ap50, - 'best_fitness_ap': best_fitness_ap, - 'best_fitness_f': best_fitness_f, - 'training_results': f.read(), - 'model': ema.ema.module.state_dict() if hasattr(ema, 'module') else ema.ema.state_dict(), - 'optimizer': None if final_epoch else optimizer.state_dict(), - 'wandb_id': wandb_run.id if wandb else None} - - # Save last, best and delete - torch.save(ckpt, last) - if best_fitness == fi: - torch.save(ckpt, best) - if (best_fitness == fi) and (epoch >= 200): - torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch)) - if best_fitness == fi: - torch.save(ckpt, wdir / 'best_overall.pt') - if best_fitness_p == fi_p: - torch.save(ckpt, wdir / 'best_p.pt') - if best_fitness_r == fi_r: - torch.save(ckpt, wdir / 'best_r.pt') - if best_fitness_ap50 == fi_ap50: - torch.save(ckpt, wdir / 'best_ap50.pt') - if best_fitness_ap == fi_ap: - torch.save(ckpt, wdir / 'best_ap.pt') - if best_fitness_f == fi_f: - torch.save(ckpt, wdir / 'best_f.pt') - if epoch == 0: - torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) - if ((epoch+1) % 25) == 0: - torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) - if epoch >= (epochs-5): - torch.save(ckpt, wdir / 'last_{:03d}.pt'.format(epoch)) - elif epoch >= 420: - torch.save(ckpt, wdir / 'last_{:03d}.pt'.format(epoch)) - del ckpt - # end epoch ---------------------------------------------------------------------------------------------------- - # end training - - if rank in [-1, 0]: - # Strip optimizers - n = opt.name if opt.name.isnumeric() else '' - fresults, flast, fbest = save_dir / f'results{n}.txt', wdir / f'last{n}.pt', wdir / f'best{n}.pt' - for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', results_file], [flast, fbest, fresults]): - if f1.exists(): - os.rename(f1, f2) # rename - if str(f2).endswith('.pt'): # is *.pt - strip_optimizer(f2) # strip optimizer - os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload - # Finish - if plots: - plot_results(save_dir=save_dir) # save as results.png - if wandb: - wandb.log({"Results": [wandb.Image(str(save_dir / x), caption=x) for x in - ['results.png', 'precision-recall_curve.png']]}) - logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) - else: - dist.destroy_process_group() - - wandb.run.finish() if wandb and wandb.run else None - torch.cuda.empty_cache() - return results - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='yolor_p6.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path') - parser.add_argument('--hyp', type=str, default='data/hyp.scratch.1280.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300) - parser.add_argument('--batch-size', type=int, default=8, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[1280, 1280], help='[train, test] image sizes') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--notest', action='store_true', help='only test final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') - parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') - parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100') - parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') - parser.add_argument('--project', default='runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - opt = parser.parse_args() - - # Set DDP variables - opt.total_batch_size = opt.batch_size - opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 - opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 - set_logging(opt.global_rank) - if opt.global_rank in [-1, 0]: - check_git_status() - - # Resume - if opt.resume: # resume an interrupted run - ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path - assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' - with open(Path(ckpt).parent.parent / 'opt.yaml') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace - opt.cfg, opt.weights, opt.resume = '', ckpt, True - logger.info('Resuming training from %s' % ckpt) - else: - # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') - opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) - opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run - - # DDP mode - device = select_device(opt.device, batch_size=opt.batch_size) - if opt.local_rank != -1: - assert torch.cuda.device_count() > opt.local_rank - torch.cuda.set_device(opt.local_rank) - device = torch.device('cuda', opt.local_rank) - dist.init_process_group(backend='nccl', init_method='env://') # distributed backend - assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' - opt.batch_size = opt.total_batch_size // opt.world_size - - # Hyperparameters - with open(opt.hyp) as f: - hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps - if 'box' not in hyp: - warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' % - (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120')) - hyp['box'] = hyp.pop('giou') - - # Train - logger.info(opt) - if not opt.evolve: - tb_writer = None # init loggers - if opt.global_rank in [-1, 0]: - logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') - tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(hyp, opt, device, tb_writer, wandb) - - # Evolve hyperparameters (optional) - else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0)} # image mixup (probability) - - assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' - opt.notest, opt.nosave = True, True # only test/save final epoch - # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here - if opt.bucket: - os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists - - for _ in range(300): # generations to evolve - if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate - # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt('evolve.txt', ndmin=2) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() # weights - if parent == 'single' or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': - x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination - - # Mutate - mp, s = 0.8, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([x[0] for x in meta.values()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all(v == 1): # mutate until a change occurs (prevent duplicates) - v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device, wandb=wandb) - - # Write mutation results - print_mutation(hyp.copy(), results, yaml_file, opt.bucket) - - # Plot results - plot_evolution(yaml_file) - print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' - f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') diff --git a/spaces/katanaml-org/sparrow-ui/Dockerfile b/spaces/katanaml-org/sparrow-ui/Dockerfile deleted file mode 100644 index 6ac220a6985533e2af69a685130ead53e689174a..0000000000000000000000000000000000000000 --- a/spaces/katanaml-org/sparrow-ui/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -FROM python:3.11-slim - -WORKDIR /code - -COPY requirements.txt ./ - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -RUN useradd -m -u 1000 user - -USER user - -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -WORKDIR $HOME/app - -COPY --chown=user . $HOME/app - -COPY --chown=user config/config.toml $HOME/app/.streamlit/config.toml - -CMD ["streamlit", "run", "main.py", "--server.port=7860", "--server.address=0.0.0.0"] diff --git a/spaces/kcagle/AutoGPT/autogpt/__main__.py b/spaces/kcagle/AutoGPT/autogpt/__main__.py deleted file mode 100644 index 128f9eea4900429e88276abdde3419b806001ac7..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/autogpt/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Auto-GPT: A GPT powered AI Assistant""" -import autogpt.cli - -if __name__ == "__main__": - autogpt.cli.main() diff --git a/spaces/kdrkdrkdr/AzusaTTS/README.md b/spaces/kdrkdrkdr/AzusaTTS/README.md deleted file mode 100644 index 09da013000a9785607fd3e86cc8f72302d7c6381..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/AzusaTTS/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AzusaTTS -emoji: 😄 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kepl/gpt/g4f/Provider/Providers/Bard.py b/spaces/kepl/gpt/g4f/Provider/Providers/Bard.py deleted file mode 100644 index 4c37c4b719430031fce41ce49946f0e6ac93d155..0000000000000000000000000000000000000000 --- a/spaces/kepl/gpt/g4f/Provider/Providers/Bard.py +++ /dev/null @@ -1,74 +0,0 @@ -import os, requests, json, browser_cookie3, re, random -from ...typing import sha256, Dict, get_type_hints - -url = 'https://bard.google.com' -model = ['Palm2'] -supports_stream = False -needs_auth = True - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome( - domain_name='.google.com')}['__Secure-1PSID'] - - formatted = '\n'.join([ - '%s: %s' % (message['role'], message['content']) for message in messages - ]) - prompt = f'{formatted}\nAssistant:' - - proxy = kwargs.get('proxy', False) - if proxy == False: - print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work') - - snlm0e = None - conversation_id = None - response_id = None - choice_id = None - - client = requests.Session() - client.proxies = { - 'http': f'http://{proxy}', - 'https': f'http://{proxy}'} if proxy else None - - client.headers = { - 'authority': 'bard.google.com', - 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8', - 'origin': 'https://bard.google.com', - 'referer': 'https://bard.google.com/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', - 'x-same-domain': '1', - 'cookie': f'__Secure-1PSID={psid}' - } - - snlm0e = re.search(r'SNlM0e\":\"(.*?)\"', - client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e - - params = { - 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', - '_reqid': random.randint(1111, 9999), - 'rt': 'c' - } - - data = { - 'at': snlm0e, - 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])} - - intents = '.'.join([ - 'assistant', - 'lamda', - 'BardFrontendService' - ]) - - response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate', - data=data, params=params) - - chat_data = json.loads(response.content.splitlines()[3])[0][2] - if chat_data: - json_chat_data = json.loads(chat_data) - - yield json_chat_data[0][0] - - else: - yield 'error' - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/keras-dreambooth/Pokemon-dreambooth/README.md b/spaces/keras-dreambooth/Pokemon-dreambooth/README.md deleted file mode 100644 index 42a0a55d2102c7e6cb311181d30927460752dd24..0000000000000000000000000000000000000000 --- a/spaces/keras-dreambooth/Pokemon-dreambooth/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Pokemon Dreambooth -emoji: 🌖 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -tags: - - keras-dreambooth - - scifi -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/speaker_encoder/preprocess.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/speaker_encoder/preprocess.py deleted file mode 100644 index fe5ab25ef7cb4adeb76cad11962f179d6a38edcc..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/speaker_encoder/preprocess.py +++ /dev/null @@ -1,285 +0,0 @@ -from multiprocess.pool import ThreadPool -from speaker_encoder.params_data import * -from speaker_encoder.config import librispeech_datasets, anglophone_nationalites -from datetime import datetime -from speaker_encoder import audio -from pathlib import Path -from tqdm import tqdm -import numpy as np - - -class DatasetLog: - """ - Registers metadata about the dataset in a text file. - """ - def __init__(self, root, name): - self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w") - self.sample_data = dict() - - start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) - self.write_line("Creating dataset %s on %s" % (name, start_time)) - self.write_line("-----") - self._log_params() - - def _log_params(self): - from speaker_encoder import params_data - self.write_line("Parameter values:") - for param_name in (p for p in dir(params_data) if not p.startswith("__")): - value = getattr(params_data, param_name) - self.write_line("\t%s: %s" % (param_name, value)) - self.write_line("-----") - - def write_line(self, line): - self.text_file.write("%s\n" % line) - - def add_sample(self, **kwargs): - for param_name, value in kwargs.items(): - if not param_name in self.sample_data: - self.sample_data[param_name] = [] - self.sample_data[param_name].append(value) - - def finalize(self): - self.write_line("Statistics:") - for param_name, values in self.sample_data.items(): - self.write_line("\t%s:" % param_name) - self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values))) - self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values))) - self.write_line("-----") - end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) - self.write_line("Finished on %s" % end_time) - self.text_file.close() - - -def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog): - dataset_root = datasets_root.joinpath(dataset_name) - if not dataset_root.exists(): - print("Couldn\'t find %s, skipping this dataset." % dataset_root) - return None, None - return dataset_root, DatasetLog(out_dir, dataset_name) - - -def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, extension, - skip_existing, logger): - print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs))) - - # Function to preprocess utterances for one speaker - def preprocess_speaker(speaker_dir: Path): - # Give a name to the speaker that includes its dataset - speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) - - # Create an output directory with that name, as well as a txt file containing a - # reference to each source file. - speaker_out_dir = out_dir.joinpath(speaker_name) - speaker_out_dir.mkdir(exist_ok=True) - sources_fpath = speaker_out_dir.joinpath("_sources.txt") - - # There's a possibility that the preprocessing was interrupted earlier, check if - # there already is a sources file. - if sources_fpath.exists(): - try: - with sources_fpath.open("r") as sources_file: - existing_fnames = {line.split(",")[0] for line in sources_file} - except: - existing_fnames = {} - else: - existing_fnames = {} - - # Gather all audio files for that speaker recursively - sources_file = sources_fpath.open("a" if skip_existing else "w") - for in_fpath in speaker_dir.glob("**/*.%s" % extension): - # Check if the target output file already exists - out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) - out_fname = out_fname.replace(".%s" % extension, ".npy") - if skip_existing and out_fname in existing_fnames: - continue - - # Load and preprocess the waveform - wav = audio.preprocess_wav(in_fpath) - if len(wav) == 0: - continue - - # Create the mel spectrogram, discard those that are too short - frames = audio.wav_to_mel_spectrogram(wav) - if len(frames) < partials_n_frames: - continue - - out_fpath = speaker_out_dir.joinpath(out_fname) - np.save(out_fpath, frames) - logger.add_sample(duration=len(wav) / sampling_rate) - sources_file.write("%s,%s\n" % (out_fname, in_fpath)) - - sources_file.close() - - # Process the utterances for each speaker - with ThreadPool(8) as pool: - list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs), - unit="speakers")) - logger.finalize() - print("Done preprocessing %s.\n" % dataset_name) - - -# Function to preprocess utterances for one speaker -def __preprocess_speaker(speaker_dir: Path, datasets_root: Path, out_dir: Path, extension: str, skip_existing: bool): - # Give a name to the speaker that includes its dataset - speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) - - # Create an output directory with that name, as well as a txt file containing a - # reference to each source file. - speaker_out_dir = out_dir.joinpath(speaker_name) - speaker_out_dir.mkdir(exist_ok=True) - sources_fpath = speaker_out_dir.joinpath("_sources.txt") - - # There's a possibility that the preprocessing was interrupted earlier, check if - # there already is a sources file. - # if sources_fpath.exists(): - # try: - # with sources_fpath.open("r") as sources_file: - # existing_fnames = {line.split(",")[0] for line in sources_file} - # except: - # existing_fnames = {} - # else: - # existing_fnames = {} - existing_fnames = {} - # Gather all audio files for that speaker recursively - sources_file = sources_fpath.open("a" if skip_existing else "w") - - for in_fpath in speaker_dir.glob("**/*.%s" % extension): - # Check if the target output file already exists - out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) - out_fname = out_fname.replace(".%s" % extension, ".npy") - if skip_existing and out_fname in existing_fnames: - continue - - # Load and preprocess the waveform - wav = audio.preprocess_wav(in_fpath) - if len(wav) == 0: - continue - - # Create the mel spectrogram, discard those that are too short - frames = audio.wav_to_mel_spectrogram(wav) - if len(frames) < partials_n_frames: - continue - - out_fpath = speaker_out_dir.joinpath(out_fname) - np.save(out_fpath, frames) - # logger.add_sample(duration=len(wav) / sampling_rate) - sources_file.write("%s,%s\n" % (out_fname, in_fpath)) - - sources_file.close() - return len(wav) - -def _preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, extension, - skip_existing, logger): - # from multiprocessing import Pool, cpu_count - from pathos.multiprocessing import ProcessingPool as Pool - # Function to preprocess utterances for one speaker - def __preprocess_speaker(speaker_dir: Path): - # Give a name to the speaker that includes its dataset - speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) - - # Create an output directory with that name, as well as a txt file containing a - # reference to each source file. - speaker_out_dir = out_dir.joinpath(speaker_name) - speaker_out_dir.mkdir(exist_ok=True) - sources_fpath = speaker_out_dir.joinpath("_sources.txt") - - existing_fnames = {} - # Gather all audio files for that speaker recursively - sources_file = sources_fpath.open("a" if skip_existing else "w") - wav_lens = [] - for in_fpath in speaker_dir.glob("**/*.%s" % extension): - # Check if the target output file already exists - out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) - out_fname = out_fname.replace(".%s" % extension, ".npy") - if skip_existing and out_fname in existing_fnames: - continue - - # Load and preprocess the waveform - wav = audio.preprocess_wav(in_fpath) - if len(wav) == 0: - continue - - # Create the mel spectrogram, discard those that are too short - frames = audio.wav_to_mel_spectrogram(wav) - if len(frames) < partials_n_frames: - continue - - out_fpath = speaker_out_dir.joinpath(out_fname) - np.save(out_fpath, frames) - # logger.add_sample(duration=len(wav) / sampling_rate) - sources_file.write("%s,%s\n" % (out_fname, in_fpath)) - wav_lens.append(len(wav)) - sources_file.close() - return wav_lens - - print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs))) - # Process the utterances for each speaker - # with ThreadPool(8) as pool: - # list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs), - # unit="speakers")) - pool = Pool(processes=20) - for i, wav_lens in enumerate(pool.map(__preprocess_speaker, speaker_dirs), 1): - for wav_len in wav_lens: - logger.add_sample(duration=wav_len / sampling_rate) - print(f'{i}/{len(speaker_dirs)} \r') - - logger.finalize() - print("Done preprocessing %s.\n" % dataset_name) - - -def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False): - for dataset_name in librispeech_datasets["train"]["other"]: - # Initialize the preprocessing - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Preprocess all speakers - speaker_dirs = list(dataset_root.glob("*")) - _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "flac", - skip_existing, logger) - - -def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False): - # Initialize the preprocessing - dataset_name = "VoxCeleb1" - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Get the contents of the meta file - with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile: - metadata = [line.split("\t") for line in metafile][1:] - - # Select the ID and the nationality, filter out non-anglophone speakers - nationalities = {line[0]: line[3] for line in metadata} - # keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if - # nationality.lower() in anglophone_nationalites] - keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items()] - print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." % - (len(keep_speaker_ids), len(nationalities))) - - # Get the speaker directories for anglophone speakers only - speaker_dirs = dataset_root.joinpath("wav").glob("*") - speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if - speaker_dir.name in keep_speaker_ids] - print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." % - (len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs))) - - # Preprocess all speakers - _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "wav", - skip_existing, logger) - - -def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False): - # Initialize the preprocessing - dataset_name = "VoxCeleb2" - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Get the speaker directories - # Preprocess all speakers - speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*")) - _preprocess_speaker_dirs_vox2(speaker_dirs, dataset_name, datasets_root, out_dir, "m4a", - skip_existing, logger) diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/speaker_encoder/data_objects/random_cycler.py b/spaces/kevinwang676/ChatGLM2-SadTalker/speaker_encoder/data_objects/random_cycler.py deleted file mode 100644 index c405db6b27f46d874d8feb37e3f9c1e12c251109..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/speaker_encoder/data_objects/random_cycler.py +++ /dev/null @@ -1,37 +0,0 @@ -import random - -class RandomCycler: - """ - Creates an internal copy of a sequence and allows access to its items in a constrained random - order. For a source sequence of n items and one or several consecutive queries of a total - of m items, the following guarantees hold (one implies the other): - - Each item will be returned between m // n and ((m - 1) // n) + 1 times. - - Between two appearances of the same item, there may be at most 2 * (n - 1) other items. - """ - - def __init__(self, source): - if len(source) == 0: - raise Exception("Can't create RandomCycler from an empty collection") - self.all_items = list(source) - self.next_items = [] - - def sample(self, count: int): - shuffle = lambda l: random.sample(l, len(l)) - - out = [] - while count > 0: - if count >= len(self.all_items): - out.extend(shuffle(list(self.all_items))) - count -= len(self.all_items) - continue - n = min(count, len(self.next_items)) - out.extend(self.next_items[:n]) - count -= n - self.next_items = self.next_items[n:] - if len(self.next_items) == 0: - self.next_items = shuffle(list(self.all_items)) - return out - - def __next__(self): - return self.sample(1)[0] - diff --git a/spaces/king007/biogpt-testing/README.md b/spaces/king007/biogpt-testing/README.md deleted file mode 100644 index f65f5c8592678be9fa869181e2fac6ec0da96a2a..0000000000000000000000000000000000000000 --- a/spaces/king007/biogpt-testing/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: BioGpt -emoji: 🌖 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: flash64/biogpt-testing ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/eruda.min.js b/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/eruda.min.js deleted file mode 100644 index 0609b9e8f15d39918a3818abaf979cdb7238b3d5..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/web/static/js/eruda.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! eruda v1.5.4 https://eruda.liriliri.io/ */ -!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.eruda=t():e.eruda=t()}("undefined"!=typeof self?self:this,function(){return function(e){function t(r){if(n[r])return n[r].exports;var i=n[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,t),i.l=!0,i.exports}var n={};return t.m=e,t.c=n,t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:r})},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="/assets/",t(t.s=82)}([function(e,t,n){"use strict";(function(e,r){function i(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(t,"__esModule",{value:!0}),t.wrap=t.viewportScale=t.unique=t.uniqId=t.tryIt=t.stripHtmlTag=t.LocalStore=t.stringify=t.type=t.ajax=t.Url=t.query=t.getFileName=t.trim=t.rtrim=t.rmCookie=t.pxToNum=t.perfNow=t.orientation=t.Store=t.Logger=t.Emitter=t.once=t.partial=t.restArgs=t.now=t.nextTick=t.detectBrowser=t.toInt=t.ms=t.toNum=t.meta=t.safeStorage=t.memStorage=t.$=t.$class=t.some=t.cloneDeep=t.mapObj=void 0,t.concat=t.$event=t.delegate=t.$show=t.$remove=t.$property=t.$offset=t.$insert=t.$css=t.$data=t.$attr=t.$safeEls=t.Select=t.MutationObserver=t.Enum=t.Class=t.toArr=t.cookie=t.decodeUriComponent=t.map=t.evalCss=t.filter=t.safeCb=t.matcher=t.ltrim=t.dateFormat=t.lpad=t.repeat=t.loadJs=t.isRegExp=t.isNull=t.isNative=t.toSrc=t.isNil=t.isNaN=t.prefix=t.isMobile=t.memoize=t.isMatch=t.isErudaEl=t.isErr=t.isEl=t.isCrossOrig=t.startWith=t.isBool=t.isEmpty=t.isStr=t.contain=t.values=t.extendOwn=t.clone=t.extend=t.defaults=t.createAssigner=t.each=t.isArrLike=t.isNum=t.isMiniProgram=t.isFn=t.isDate=t.safeGet=t.castPath=t.isArr=t.isArgs=t.objToStr=t.identity=t.getObjType=t.upperFirst=t.fullUrl=t.fileSize=t.escapeRegExp=t.escapeJsonStr=t.escapeJsStr=t.escape=t.endWith=t.optimizeCb=t.detectOs=t.freeze=t.keys=t.detectMocha=t.root=t.utf8=t.ucs2=t.toStr=t.idxOf=t.clamp=t.chunk=t.kebabCase=t.camelCase=t.splitCase=t.before=t.allKeys=t.noop=t.isBrowser=t.slice=t.has=t.inherits=t.isObj=t.isUndef=t.last=void 0;var o=n(28),a=i(o),s=n(123),u=i(s),l=n(66),c=i(l),d=n(34),f=i(d),p=n(130),h=i(p),v=n(35),g=i(v),m=n(135),_=i(m),b=n(73),y=i(b),x=n(25),w=i(x),k={},E=k.last=function(){function e(e){var t=e?e.length:0;if(t)return e[t-1]}return e}();t.last=E;var S=t.isUndef=k.isUndef=function(){function e(e){return void 0===e}return e}(),T=t.isObj=k.isObj=function(){function e(e){var t=void 0===e?"undefined":(0,w.default)(e);return!!e&&("function"===t||"object"===t)}return e}(),O=t.inherits=k.inherits=function(){function e(e,r){if(n)return e.prototype=n(r.prototype);t.prototype=r.prototype,e.prototype=new t}function t(){}var n=y.default;return e}(),A=t.has=k.has=function(){function e(e,n){return t.call(e,n)}var t=Object.prototype.hasOwnProperty;return e}(),C=t.slice=k.slice=function(){function e(e,t,n){var r=e.length;t=null==t?0:t<0?Math.max(r+t,0):Math.min(t,r),n=null==n?r:n<0?Math.max(r+n,0):Math.min(n,r);for(var i=[];t0&&(n=t.apply(this,arguments)),e<=1&&(t=null),n}}return e}(),L=t.splitCase=k.splitCase=function(){function e(e){return e=e.replace(t,"-$1").toLowerCase().replace(n,"-").replace(r,""),e.split("-")}var t=/([A-Z])/g,n=/[_.\- ]+/g,r=/(^-)|(-$)/g;return e}(),N=t.camelCase=k.camelCase=function(){function e(e){var n=L(e),r=n[0];return n.shift(),n.forEach(t,n),r+=n.join("")}function t(e,t){this[t]=e.replace(/\w/,function(e){return e.toUpperCase()})}return e}(),D=t.kebabCase=k.kebabCase=function(){function e(e){return L(e).join("-")}return e}(),I=(t.chunk=k.chunk=function(){function e(e,t){var n=[];t=t||1;for(var r=0,i=Math.ceil(e.length/t);rn?n:e}return e}()),K=t.idxOf=k.idxOf=function(){function e(e,t,n){return Array.prototype.indexOf.call(e,t,n)}return e}(),z=t.toStr=k.toStr=function(){function e(e){return null==e?"":e.toString()}return e}(),F=t.ucs2=k.ucs2=function(e){return{encode:function(e){return _.default.apply(String,e)},decode:function(e){for(var t=[],n=0,r=e.length;n=55296&&i<=56319&&n>6*t)+n);t>0;){r+=f(128|63&e>>6*(t-1)),t--}return r}function n(e){for(;;){if(o>=a&&l){if(e)return r();throw new Error("Invalid byte index")}if(o===a)return!1;var t=i[o];if(o++,l){if(td){if(e)return o--,r();throw new Error("Invalid continuation byte")}if(c=128,d=191,s=s<<6|63&t,++u===l){var n=s;return s=0,l=0,u=0,n}}else{if(0==(128&t))return t;if(192==(224&t))l=1,s=31&t;else if(224==(240&t))224===t&&(c=160),237===t&&(d=159),l=2,s=15&t;else{if(240!=(248&t)){if(e)return r();throw new Error("Invalid UTF-8 detected")}240===t&&(c=144),244===t&&(d=143),l=3,s=7&t}}}}function r(){var e=o-u-1;return o=e+1,s=0,l=0,u=0,c=128,d=191,i[e]}e={encode:function(e){for(var n=F.decode(e),r="",i=0,o=n.length;i-1}return e=e||(j?navigator.userAgent:""),e=e.toLowerCase(),t("windows phone")?"windows phone":t("win")?"windows":t("android")?"android":t("ipad")||t("iphone")||t("ipod")?"ios":t("mac")?"os x":t("linux")?"linux":"unknown"}return e}(),t.optimizeCb=k.optimizeCb=function(){function e(e,t,n){if(S(t))return e;switch(null==n?3:n){case 1:return function(n){return e.call(t,n)};case 3:return function(n,r,i){return e.call(t,n,r,i)};case 4:return function(n,r,i,o){return e.call(t,n,r,i,o)}}return function(){return e.apply(t,arguments)}}return e}()),G=(t.endWith=k.endWith=function(){function e(e,t){var n=e.length-t.length;return n>=0&&e.indexOf(t,n)===n}return e}(),t.escape=k.escape=function(){function e(e){return i.test(e)?e.replace(o,t):e}function t(e){return n[e]}var n=e.map={"&":"&","<":"<",">":">",'"':""","'":"'","`":"`"},r="(?:"+U(n).join("|")+")",i=new RegExp(r),o=new RegExp(r,"g");return e}(),t.escapeJsStr=k.escapeJsStr=function(){function e(e){return z(e).replace(t,function(e){switch(e){case'"':case"'":case"\\":return"\\"+e;case"\n":return"\\n";case"\r":return"\\r";case"\u2028":return"\\u2028";case"\u2029":return"\\u2029"}})}var t=/["'\\\n\r\u2028\u2029]/g;return e}()),q=(t.escapeJsonStr=k.escapeJsonStr=function(){function e(e){return G(e).replace(/\\'/g,"'").replace(/\t/g,"\\t")}return e}(),t.escapeRegExp=k.escapeRegExp=function(){function e(e){return e.replace(/\W/g,"\\$&")}return e}(),t.fileSize=k.fileSize=function(){function e(e){if(e<=0)return"0";var n=Math.floor(Math.log(e)/Math.log(1024));return+(e/Math.pow(2,10*n)).toFixed(2)+t[n]}var t=["","K","M","G","T"];return e}(),t.fullUrl=k.fullUrl=function(){function e(e){return t.href=e,t.protocol+"//"+t.host+t.pathname+t.search+t.hash}var t=document.createElement("a");return e}(),t.upperFirst=k.upperFirst=function(){function e(e){return e.length<1?e:e[0].toUpperCase()+e.slice(1)}return e}()),J=(t.getObjType=k.getObjType=function(){function e(e){return e.constructor&&e.constructor.name?e.constructor.name:q({}.toString.call(e).replace(/(\[object )|]/g,""))}return e}(),t.identity=k.identity=function(){function e(e){return e}return e}()),Y=t.objToStr=k.objToStr=function(){function e(e){return t.call(e)}var t=Object.prototype.toString;return e}(),Q=t.isArgs=k.isArgs=function(){function e(e){return"[object Arguments]"===Y(e)}return e}(),X=t.isArr=k.isArr=function(e){return Array.isArray||function(e){return"[object Array]"===Y(e)}}(),Z=t.castPath=k.castPath=function(){function e(e,r){if(X(e))return e;if(r&&A(r,e))return[e];var i=[];return e.replace(t,function(e,t,r,o){i.push(r?o.replace(n,"$1"):t||e)}),i}var t=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,n=/\\(\\)?/g;return e}(),ee=t.safeGet=k.safeGet=function(){function e(e,t){t=Z(t,e);var n;for(n=t.shift();!S(n);){if(null==(e=e[n]))return;n=t.shift()}return e}return e}(),te=t.isDate=k.isDate=function(){function e(e){return"[object Date]"===Y(e)}return e}(),ne=t.isFn=k.isFn=function(){function e(e){var t=Y(e);return"[object Function]"===t||"[object GeneratorFunction]"===t}return e}(),re=t.isMiniProgram=k.isMiniProgram=function(e){return"undefined"!=typeof wx&&ne(wx.openLocation)}(),ie=t.isNum=k.isNum=function(){function e(e){return"[object Number]"===Y(e)}return e}(),oe=t.isArrLike=k.isArrLike=function(){function e(e){if(!e)return!1;var n=e.length;return ie(n)&&n>=0&&n<=t&&!ne(e)}var t=Math.pow(2,53)-1;return e}(),ae=k.each=function(){function e(e,t,n){t=W(t,n);var r,i;if(oe(e))for(r=0,i=e.length;r=0}return e}(),he=t.isStr=k.isStr=function(){function e(e){return"[object String]"===Y(e)}return e}(),ve=t.isEmpty=k.isEmpty=function(){function e(e){return null==e||(oe(e)&&(X(e)||he(e)||Q(e))?0===e.length:0===U(e).length)}return e}(),ge=(t.isBool=k.isBool=function(){function e(e){return!0===e||!1===e}return e}(),t.startWith=k.startWith=function(){function e(e,t){return 0===e.indexOf(t)}return e}()),me=(t.isCrossOrig=k.isCrossOrig=function(){function e(e){return!ge(e,t)}var t=window.location.origin;return e}(),t.isEl=k.isEl=function(){function e(e){return!(!e||1!==e.nodeType)}return e}(),t.isErr=k.isErr=function(){function e(e){return"[object Error]"===Y(e)}return e}(),t.isErudaEl=k.isErudaEl=function(){function e(e){var t=e.parentNode;if(!t)return!1;for(;t;)if((t=t.parentNode)&&"eruda"===t.id)return!0;return!1}return e}(),t.isMatch=k.isMatch=function(){function e(e,t){var n=U(t),r=n.length;if(null==e)return!r;e=Object(e);for(var i=0;i0;)1&t&&(n+=e),t>>=1,e+=e;return n}}()),Se=t.lpad=k.lpad=function(){function e(e,t,n){e=z(e);var r=e.length;return n=n||" ",r0?"-":"+")+t(100*Math.floor(Math.abs(y)/60)+Math.abs(y)%60,4),S:["th","st","nd","rd"][f%10>3?0:(f%100-f%10!=10)*f%10]};return s.replace(n,function(e){return e in x?x[e]:e.slice(1,e.length-1)})}function t(e,t){return Se(z(e),t||2,"0")}var n=/d{1,4}|m{1,4}|yy(?:yy)?|([HhMsTt])\1?|[LloSZWN]|'[^']*'|'[^']*'/g,r=/\b(?:[PMCEA][SDP]T|(?:Pacific|Mountain|Central|Eastern|Atlantic) (?:Standard|Daylight|Prevailing) Time|(?:GMT|UTC)(?:[-+]\d{4})?)\b/g,i=/\d/,o=/[^-+\dA-Z]/g;return e.masks={default:"ddd mmm dd yyyy HH:MM:ss",shortDate:"m/d/yy",mediumDate:"mmm d, yyyy",longDate:"mmmm d, yyyy",fullDate:"dddd, mmmm d, yyyy",shortTime:"h:MM TT",mediumTime:"h:MM:ss TT",longTime:"h:MM:ss TT Z",isoDate:"yyyy-mm-dd",isoTime:"HH:MM:ss",isoDateTime:"yyyy-mm-dd'T'HH:MM:sso",isoUtcDateTime:"UTC:yyyy-mm-dd'T'HH:MM:ss'Z'",expiresHeaderFormat:"ddd, dd mmm yyyy HH:MM:ss Z"},e.i18n={dayNames:["Sun","Mon","Tue","Wed","Thu","Fri","Sat","Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],monthNames:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec","January","February","March","April","May","June","July","August","September","October","November","December"]},e}(),t.ltrim=k.ltrim=function(){function e(e,n){if(null==n)return e.replace(t,"");for(var r,i,o=0,a=e.length,s=n.length,u=!0;u&&o=a?"":e.substr(o,a)}var t=/^\s+/;return e}()),Oe=t.matcher=k.matcher=function(){function e(e){return e=de({},e),function(t){return me(t,e)}}return e}(),Ae=t.safeCb=k.safeCb=function(e){return function(e,t,n){return null==e?J:ne(e)?W(e,t,n):T(e)?Oe(e):function(e){return function(t){return null==t?void 0:t[e]}}}}(),Ce=t.filter=k.filter=function(){function e(e,t,n){var r=[];return t=Ae(t,n),ae(e,function(e,n,i){t(e,n,i)&&r.push(e)}),r}return e}(),je=(t.evalCss=k.evalCss=function(){function e(r,i){r=z(r);for(var o=0,a=n.length;o=0&&e=t[n[s]]){a=n[s];break}return+(o/t[a]).toFixed(2)+a}var t={ms:1,s:1e3};t.m=60*t.s,t.h=60*t.m,t.d=24*t.h,t.y=365.25*t.d;var n=["y","d","h","m","s"],r=/^((?:\d+)?\.?\d+) *(s|m|h|d|y)?$/;return e}(),t.toInt=k.toInt=function(){function e(e){return e?(e=et(e))-e%1:0===e?e:0}return e}()),nt=(t.detectBrowser=k.detectBrowser=function(){function e(e){e=e||(j?navigator.userAgent:""),e=e.toLowerCase();var o=t(e,"msie ");if(o)return{version:o,name:"ie"};if(r.test(e))return{version:11,name:"ie"};for(var a=0,s=i.length;a-1)return tt(e.substring(n+t.length,e.indexOf(".",n)))}var n={edge:/edge\/([0-9._]+)/,firefox:/firefox\/([0-9.]+)(?:\s|$)/,opera:/opera\/([0-9.]+)(?:\s|$)/,android:/android\s([0-9.]+)/,ios:/version\/([0-9._]+).*mobile.*safari.*/,safari:/version\/([0-9._]+).*safari/,chrome:/(?!chrom.*opr)chrom(?:e|ium)\/([0-9.]+)(:?\s|$)/},r=/trident\/7\./,i=U(n);return e}(),t.nextTick=k.nextTick=function(e){function t(e){if("function"!=typeof e)throw new TypeError(e+" is not a function");return e}return"object"===(void 0===r?"undefined":(0,w.default)(r))&&r.nextTick?r.nextTick:"function"==typeof u.default?function(e){(0,u.default)(t(e))}:function(e){setTimeout(t(e),0)}}(),t.now=k.now=function(e){return Date.now||function(){return(new Date).getTime()}}()),rt=t.restArgs=k.restArgs=function(){function e(e,t){return t=null==t?e.length-1:+t,function(){var n,r=Math.max(arguments.length-t,0),i=new Array(r);for(n=0;nwindow.innerHeight?"landscape":"portrait"}},at.mixin(e),window.addEventListener("orientationchange",function(){setTimeout(function(){e.emit("change",e.get())},200)},!1),e}({}),t.perfNow=k.perfNow=function(e){var t,n=H.performance,r=H.process;if(n&&n.now)e=function(){return n.now()};else if(r&&r.hrtime){var i=function(){var e=r.hrtime();return 1e9*e[0]+e[1]};t=i()-1e9*r.uptime(),e=function(){return(i()-t)/1e6}}else t=nt(),e=function(){return nt()-t};return e}({}),t.pxToNum=k.pxToNum=function(){function e(e){return et(e.replace("px",""))}return e}(),t.rmCookie=k.rmCookie=function(){function e(e){function t(t){return t=t||{},Pe.remove(e,t),!Pe.get(e)}var n,r=window.location,i=r.hostname,o=r.pathname,a=i.split("."),s=o.split("/"),u="",l=s.length;if(!t())for(var c=a.length-1;c>=0;c--){var d=a[c];if(""!==d){if(u=""===u?d:d+"."+u,n="/",t({domain:u,path:n})||t({domain:u}))return;for(var f=0;f=0;)for(s=!1,r=-1,i=e.charAt(o);++r=0?e.substring(0,o+1):""}var t=/\s+$/;return e}()),lt=t.trim=k.trim=function(){function e(e,n){return null==n?e.replace(t,""):Te(ut(e,n),n)}var t=/^\s+|\s+$/g;return e}(),ct=(t.getFileName=k.getFileName=function(){function e(e){var t=E(e.split("/"));return t.indexOf("?")>-1&&(t=lt(t.split("?")[0])),""===t?"unknown":t}return e}(),t.query=k.query=function(e){e={parse:function(e){var n={};return e=lt(e).replace(t,""),ae(e.split("&"),function(e){var t=e.split("="),r=t.shift(),i=t.length>0?t.join("="):null;r=decodeURIComponent(r),i=decodeURIComponent(i),S(n[r])?n[r]=i:X(n[r])?n[r].push(i):n[r]=[n[r],i]}),n},stringify:function(t,n){return Ce(je(t,function(t,r){return T(t)&&ve(t)?"":X(t)?e.stringify(t,r):(n?encodeURIComponent(n):encodeURIComponent(r))+"="+encodeURIComponent(t)}),function(e){return e.length>0}).join("&")}};var t=/^(\?|#|&)/g;return e}({})),dt=(t.Url=k.Url=function(e){e=Le({className:"Url",initialize:function(t){!t&&j&&(t=window.location.href),le(this,e.parse(t||""))},setQuery:function(e,t){var n=this.query;return T(e)?ae(e,function(e,t){n[t]=e}):n[e]=t,this},rmQuery:function(e){var t=this.query;return X(e)||(e=Re(e)),ae(e,function(e){delete t[e]}),this},toString:function(){return e.stringify(this)}},{parse:function(e){var i={protocol:"",auth:"",hostname:"",hash:"",query:{},port:"",pathname:"",slashes:!1},o=lt(e),a=o.match(t);if(a&&(a=a[0],i.protocol=a.toLowerCase(),o=o.substr(a.length)),a){var s="//"===o.substr(0,2);s&&(o=o.slice(2),i.slashes=!0)}if(s){for(var u=-1,l=0,c=r.length;l=200&&t<300||304===t){e=f.responseText,"xml"===s&&(e=f.responseXML);try{"json"===s&&(e=JSON.parse(e))}catch(e){}u(e,f)}else l(f);d(f)}},"GET"===r?(o=ct.stringify(o),i+=i.indexOf("?")>-1?"&"+o:"?"+o):"application/x-www-form-urlencoded"===t.contentType?T(o)&&(o=ct.stringify(o)):"application/json"===t.contentType&&T(o)&&(o=(0,a.default)(o)),f.open(r,i,!0),f.setRequestHeader("Content-Type",t.contentType),c>0&&(n=setTimeout(function(){f.onreadystatechange=M,f.abort(),l(f,"timeout"),d(f)},c)),f.send("GET"===r?null:o),f}function t(e,t,n,r){return ne(t)&&(r=n,n=t,t={}),{url:e,data:t,success:n,dataType:r}}return e.setting={type:"GET",success:M,error:M,complete:M,dataType:"json",contentType:"application/x-www-form-urlencoded",data:{},xhr:function(){return new XMLHttpRequest},timeout:0},e.get=function(){return e(t.apply(null,arguments))},e.post=function(){var n=t.apply(null,arguments);return n.type="POST",e(n)},e}(),t.type=k.type=function(){function e(e){if(null===e)return"null";if(void 0===e)return"undefined";if(ye(e))return"nan";var n=Y(e).match(t);return n?n[1].toLowerCase():""}var t=/^\[object\s+(.*?)]$/;return e}()),ft=t.stringify=k.stringify=function(){function e(e,n){return(0,a.default)(e,t(),n)}function t(){var e=[],t=[];return function(n,r){if(e.length>0){var i=e.indexOf(this);i>-1?(e.splice(i+1),t.splice(i,1/0,n)):(e.push(this),t.push(n));var o=e.indexOf(r);o>-1&&(r=e[0]===r?"[Circular ~]":"[Circular ~."+t.slice(0,o).join(".")+"]")}else e.push(r);return ke(r)||ne(r)?r="["+q(dt(r))+" "+z(r)+"]":S(r)&&(r=null),r}}return e}();t.LocalStore=k.LocalStore=function(e){var t=Xe("local");return st.extend({initialize:function(e,n){this._name=e;var r=t.getItem(e);try{r=JSON.parse(r)}catch(e){r={}}T(r)||(r={}),n=ue(r,n),this.callSuper(st,"initialize",[n])},save:function(e){if(ve(e))return t.removeItem(this._name);t.setItem(this._name,ft(e))}})}(),t.stripHtmlTag=k.stripHtmlTag=function(){function e(e){return e.replace(t,"")}var t=/<[^>]*>/g;return e}(),t.tryIt=k.tryIt=function(){function e(e,t){t=t||M;try{t(null,e())}catch(e){return void t(e)}}return e}(),t.uniqId=k.uniqId=function(){function e(e){var n=++t+"";return e?e+n:n}var t=0;return e}(),t.unique=k.unique=function(){function e(e,n){return n=n||t,Ce(e,function(e,t,r){for(var i=r.length;++t= 2.0.0-beta.1",7:">= 4.0.0"};t.REVISION_CHANGES=f;r.prototype={constructor:r,logger:d.default,log:d.default.log,registerHelper:function(e,t){if("[object Object]"===o.toString.call(e)){if(t)throw new s.default("Arg not supported with multiple helpers");o.extend(this.helpers,e)}else this.helpers[e]=t},unregisterHelper:function(e){delete this.helpers[e]},registerPartial:function(e,t){if("[object Object]"===o.toString.call(e))o.extend(this.partials,e);else{if(void 0===t)throw new s.default('Attempting to register a partial called "'+e+'" as undefined');this.partials[e]=t}},unregisterPartial:function(e){delete this.partials[e]},registerDecorator:function(e,t){if("[object Object]"===o.toString.call(e)){if(t)throw new s.default("Arg not supported with multiple decorators");o.extend(this.decorators,e)}else this.decorators[e]=t},unregisterDecorator:function(e){delete this.decorators[e]}};var p=d.default.log;t.log=p,t.createFrame=o.createFrame,t.logger=d.default},function(e,t){"use strict";function n(e){return c[e]}function r(e){for(var t=1;t":">",'"':""","'":"'","`":"`","=":"="},d=/[&<>"'`=]/g,f=/[&<>"'`=]/,p=Object.prototype.toString;t.toString=p;var h=function(e){return"function"==typeof e};h(/x/)&&(t.isFunction=h=function(e){return"function"==typeof e&&"[object Function]"===p.call(e)}),t.isFunction=h;var v=Array.isArray||function(e){return!(!e||"object"!=typeof e)&&"[object Array]"===p.call(e)};t.isArray=v},function(e,t,n){"use strict";function r(e,t){var n=t&&t.loc,a=void 0,s=void 0;n&&(a=n.start.line,s=n.start.column,e+=" - "+a+":"+s);for(var u=Error.prototype.constructor.call(this,e),l=0;l0?(n.ids&&(n.ids=[n.name]),e.helpers.each(t,n)):i(this);if(n.data&&n.ids){var a=r.createFrame(n.data);a.contextPath=r.appendContextPath(n.data.contextPath,n.name),n={data:a}}return o(t,n)})},e.exports=t.default},function(e,t,n){"use strict";var r=n(2).default;t.__esModule=!0;var i=n(4),o=n(5),a=r(o);t.default=function(e){e.registerHelper("each",function(e,t){function n(t,n,o){l&&(l.key=t,l.index=n,l.first=0===n,l.last=!!o,c&&(l.contextPath=c+t)),u+=r(e[t],{data:l,blockParams:i.blockParams([e[t],t],[c+t,null])})}if(!t)throw new a.default("Must pass iterator to #each");var r=t.fn,o=t.inverse,s=0,u="",l=void 0,c=void 0;if(t.data&&t.ids&&(c=i.appendContextPath(t.data.contextPath,t.ids[0])+"."),i.isFunction(e)&&(e=e.call(this)),t.data&&(l=i.createFrame(t.data)),e&&"object"==typeof e)if(i.isArray(e))for(var d=e.length;s=0?t:parseInt(e,10)}return e},log:function(e){if(e=i.lookupLevel(e),"undefined"!=typeof console&&i.lookupLevel(i.level)<=e){var t=i.methodMap[e];console[t]||(t="log");for(var n=arguments.length,r=Array(n>1?n-1:0),o=1;o3&&void 0!==arguments[3]?arguments[3]:["#2196f3","#707d8b","#f44336","#009688","#ffc107"],i=this._genId("settings");return this._settings.push({config:e,key:t,id:i}),this._$el.append(this._colorTpl({desc:n,colors:r,id:i,val:e.get(t)})),this}},{key:"select",value:function(e,t,n,r){var i=this._genId("settings");return this._settings.push({config:e,key:t,id:i}),this._$el.append(this._selectTpl({desc:n,selections:r,id:i,val:e.get(t)})),this}},{key:"range",value:function(e,t,n,r){var i=r.min,o=void 0===i?0:i,a=r.max,s=void 0===a?1:a,u=r.step,l=void 0===u?.1:u,c=this._genId("settings");this._settings.push({config:e,key:t,min:o,max:s,step:l,id:c});var d=e.get(t);return this._$el.append(this._rangeTpl({desc:n,min:o,max:s,step:l,val:d,progress:y(d,o,s),id:c})),this}},{key:"separator",value:function(){return this._$el.append('
    '),this}},{key:"text",value:function(e){return this._$el.append('
    '+e+"
    "),this}},{key:"_cleanSeparator",value:function(){function e(e){return"eruda-separator"===e.getAttribute("class")}for(var t=(0,_.clone)(this._$el.get(0).children),n=0,r=t.length;n0?r:n)(e)}},function(e,t,n){var r=n(44)("keys"),i=n(31);e.exports=function(e){return r[e]||(r[e]=i(e))}},function(e,t,n){var r=n(11),i=r["__core-js_shared__"]||(r["__core-js_shared__"]={});e.exports=function(e){return i[e]||(i[e]={})}},function(e,t){e.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(e,t){t.f=Object.getOwnPropertySymbols},function(e,t,n){"use strict";var r=n(96)(!0);n(67)(String,"String",function(e){this._t=String(e),this._i=0},function(){var e,t=this._t,n=this._i;return n>=t.length?{value:void 0,done:!0}:(e=r(t,n),this._i+=e.length,{value:e,done:!1})})},function(e,t){e.exports=!0},function(e,t,n){var r=n(21),i=n(98),o=n(45),a=n(43)("IE_PROTO"),s=function(){},u=function(){var e,t=n(39)("iframe"),r=o.length;for(t.style.display="none",n(69).appendChild(t),t.src="javascript:",e=t.contentWindow.document,e.open(),e.write("' - if render_latex: - js += """\ - - - """ - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/tonyassi/video-face-swap/DeepFakeAI/predictor.py b/spaces/tonyassi/video-face-swap/DeepFakeAI/predictor.py deleted file mode 100644 index 11acb527b039807fd8e01035a1fc8e4e28433da3..0000000000000000000000000000000000000000 --- a/spaces/tonyassi/video-face-swap/DeepFakeAI/predictor.py +++ /dev/null @@ -1,46 +0,0 @@ -import threading -import numpy -import opennsfw2 -from PIL import Image -from keras import Model - -from DeepFakeAI.typing import Frame - -PREDICTOR = None -THREAD_LOCK = threading.Lock() -MAX_PROBABILITY = 0.75 - - -def get_predictor() -> Model: - global PREDICTOR - - with THREAD_LOCK: - if PREDICTOR is None: - PREDICTOR = opennsfw2.make_open_nsfw_model() - return PREDICTOR - - -def clear_predictor() -> None: - global PREDICTOR - - PREDICTOR = None - - -def predict_frame(target_frame : Frame) -> bool: - return False - #image = Image.fromarray(target_frame) - #image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO) - #views = numpy.expand_dims(image, axis = 0) - #_, probability = get_predictor().predict(views)[0] - #return probability > MAX_PROBABILITY - - -def predict_image(target_path : str) -> bool: - return False - #return opennsfw2.predict_image(target_path) > MAX_PROBABILITY - - -def predict_video(target_path : str) -> bool: - return False - #_, probabilities = opennsfw2.predict_video_frames(video_path = target_path, frame_interval = 100) - #return any(probability > MAX_PROBABILITY for probability in probabilities) diff --git a/spaces/towardsai-buster/buster/cfg.py b/spaces/towardsai-buster/buster/cfg.py deleted file mode 100644 index d4e0601f160280d6842bd06db63939d070188e94..0000000000000000000000000000000000000000 --- a/spaces/towardsai-buster/buster/cfg.py +++ /dev/null @@ -1,159 +0,0 @@ -import logging -import os - -from buster.busterbot import Buster, BusterConfig -from buster.completers import ChatGPTCompleter, DocumentAnswerer -from buster.formatters.documents import DocumentsFormatterJSON -from buster.formatters.prompts import PromptFormatter -from buster.retriever import DeepLakeRetriever, Retriever -from buster.tokenizers import GPTTokenizer -from buster.validators import QuestionAnswerValidator, Validator - -from utils import init_mongo_db - -MONGODB_URI = os.getenv("MONGODB_URI") -mongo_db = init_mongo_db(uri=MONGODB_URI, db_name="towardsai-buster") - - -logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO) - -# required -ACTIVELOOP_TOKEN = os.getenv("ACTIVELOOP_TOKEN") -if ACTIVELOOP_TOKEN is None: - logger.warning("No activeloop token found, you will not be able to fetch data.") - -DEEPLAKE_DATASET = os.getenv("DEEPLAKE_DATASET", "ai-tutor-dataset") -DEEPLAKE_ORG = os.getenv("DEEPLAKE_ORG", "towards_ai") - -# if you want to use a local dataset, set the env. variable, it overrides all others -DEEPLAKE_DATASET_PATH = os.getenv( - "DEEPLAKE_DATASET_PATH", f"hub://{DEEPLAKE_ORG}/{DEEPLAKE_DATASET}" -) -logger.info(f"{DEEPLAKE_DATASET_PATH=}") - -example_questions = [ - "What is the LLama model?", - "What is a Large Language Model?", - "What is an embedding?", -] - - -buster_cfg = BusterConfig( - validator_cfg={ - "unknown_response_templates": [ - "I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the library or its usage. Is there anything else I can assist you with?", - ], - "unknown_threshold": 0.85, - "embedding_model": "text-embedding-ada-002", - "use_reranking": True, - "invalid_question_response": "This question does not seem relevant my AI knowledge. If the question is related to AI, please send us feedback! \n PS: I'm still learning, so I might not know the answer to your question, you can also try without acronyms in your question. Email us at louis@towardsai.net for any issue with the bot!", - "check_question_prompt": """You are a chatbot, answering questions about large language models and artificial intelligence. -Your job is to determine whether user's question is valid or not. Users will not always submit a question either. -Users will ask all sorts of questions, and some might be tangentially related to artificial intelligence (AI), machine learning (ML) and natural language processing (NLP). -Users will learn to build LLM-powered apps, with LangChain & Deep Lake among other technologies including OpenAI, RAG and more. -As long as a question is somewhat related to the topic of AI, ML, NLP and techniques used in AI like vectors, memories, embeddings, tokenization, encoding, etc., respond 'true'. If a question is on a different subject or unrelated, respond 'false'. -Make sure the question is a valid question. - -Here is a list of acronyms and concepts related to Artificial Intelligence AI that you can accept from users, they can be uppercase or lowercase: -[TQL, Deep Memory, LLM, Llama, GPT, NLP, RLHF, RLAIF, Mistral, SFT, Cohere, NanoGPT, ReAct, LoRA, QLoRA, LMMOps, Alpaca, Flan, Weights and Biases, W&B, IDEFICS, Flamingo, LLaVA, BLIP, Falcon] - -Here are some examples: - -Q: How can I setup my own chatbot? -true - -Q: What is the meaning of life? -false - -Q: What is rlhf? -true - -Q: -""", - "completion_kwargs": { - "model": "gpt-3.5-turbo", - "stream": False, - "temperature": 0, - }, - }, - retriever_cfg={ - "path": f"{DEEPLAKE_DATASET_PATH}", - "top_k": 5, - "thresh": 0.55, - "max_tokens": 13000, - "embedding_model": "text-embedding-ada-002", - "exec_option": "compute_engine", - "use_tql": True, - "deep_memory": False, - "activeloop_token": ACTIVELOOP_TOKEN, - }, - documents_answerer_cfg={ - "no_documents_message": "No blog posts are available for this question.", - }, - completion_cfg={ - "completion_kwargs": { - "model": "gpt-3.5-turbo-16k", - "stream": True, - "temperature": 0, - }, - }, - tokenizer_cfg={ - "model_name": "gpt-3.5-turbo-16k", - }, - documents_formatter_cfg={ - "max_tokens": 13500, - "columns": ["content", "source", "title"], - }, - prompt_formatter_cfg={ - "max_tokens": 13500, - "text_before_docs": ( - "You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine tuning models, giving memory to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, Langchain, making LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context." - "You are provided information found in the json documentation. " - "Only respond with information inside the json documentation. DO NOT use additional information, even if you know the answer. " - "If the answer is in the documentation, answer the question (depending on the questions and the variety of relevant information in the json documentation, answer in 5 paragraphs." - "If the documentation does not discuss the topic related to the question, kindly respond that you cannot answer the question because it is not part of your knowledge. " - "Here is the information you can use (json documentation) in order: " - ), - "text_after_docs": ( - "REMEMBER:\n" - "You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine tuning models, giving memory to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, Langchain, making LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context." - "You are provided information found in the json documentation. " - "Here are the rules you must follow:\n" - "* Only respond with information inside the json documentation. DO NOT provide additional information, even if you know the answer. " - "* If the answer is in the documentation, answer the question (depending on the questions and the variety of relevant information in the json documentation. Your answer needs to be pertinent and not redundant giving a clear explanation as if you were a teacher. " - "* If the documentation does not discuss the topic related to the question, kindly respond that you cannot answer the question because it is not part of your knowledge. " - "* Only use information summarized from the json documentation, do not respond otherwise. " - "* Do not refer to the json documentation directly, but use the instructions provided within it to answer questions. " - "* Do not reference any links, urls or hyperlinks in your answers.\n" - "* Make sure to format your answers in Markdown format, including code block and snippets.\n" - "* If you do not know the answer to a question, or if it is completely irrelevant to the AI courses, simply reply with:\n" - "'I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?'" - "For example:\n" - "What is the meaning of life for a qa bot?\n" - "I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?" - "Now answer the following question:\n" - ), - }, -) - - -def setup_buster(buster_cfg): - retriever: Retriever = DeepLakeRetriever(**buster_cfg.retriever_cfg) - tokenizer = GPTTokenizer(**buster_cfg.tokenizer_cfg) - document_answerer: DocumentAnswerer = DocumentAnswerer( - completer=ChatGPTCompleter(**buster_cfg.completion_cfg), - documents_formatter=DocumentsFormatterJSON( - tokenizer=tokenizer, **buster_cfg.documents_formatter_cfg - ), - prompt_formatter=PromptFormatter( - tokenizer=tokenizer, **buster_cfg.prompt_formatter_cfg - ), - **buster_cfg.documents_answerer_cfg, - ) - validator: Validator = QuestionAnswerValidator(**buster_cfg.validator_cfg) - buster: Buster = Buster( - retriever=retriever, document_answerer=document_answerer, validator=validator - ) - - return buster diff --git a/spaces/ucalyptus/PTI/scripts/latent_creators/sg2_plus_latent_creator.py b/spaces/ucalyptus/PTI/scripts/latent_creators/sg2_plus_latent_creator.py deleted file mode 100644 index 3f11ce883699a6801510712c8ee2eaa7a63fac1d..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/scripts/latent_creators/sg2_plus_latent_creator.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch -from configs import global_config, paths_config -from scripts.latent_creators.base_latent_creator import BaseLatentCreator -from training.projectors import w_plus_projector - - -class SG2PlusLatentCreator(BaseLatentCreator): - - def __init__(self, use_wandb=False, projection_steps=2000): - super().__init__(paths_config.sg2_plus_results_keyword, use_wandb=use_wandb) - - self.projection_steps = projection_steps - - def run_projection(self, fname, image): - image = torch.squeeze((image.to(global_config.device) + 1) / 2) * 255 - w = w_plus_projector.project(self.old_G, image, device=torch.device(global_config.device), - num_steps=self.projection_steps, w_name=fname, use_wandb=self.use_wandb) - - return w - - -if __name__ == '__main__': - id_change_report = SG2PlusLatentCreator() - id_change_report.create_latents() diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Bioshock Infinite Version 1.1.25.5165 All Dlcs Repack Mr DJ Hack Offline.md b/spaces/usbethFlerru/sovits-modelsV2/example/Bioshock Infinite Version 1.1.25.5165 All Dlcs Repack Mr DJ Hack Offline.md deleted file mode 100644 index 8e6aecf5e480d38703b99d815589b04a9269c3f4..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Bioshock Infinite Version 1.1.25.5165 All Dlcs Repack Mr DJ Hack Offline.md +++ /dev/null @@ -1,9 +0,0 @@ -

    Bioshock Infinite Version 1.1.25.5165 all Dlc's Repack Mr DJ Hack Offline


    DOWNLOAD →→→ https://urlcod.com/2uyUY1



    - -bioshock infinite 1.1.25.5165 all dlc's repack mr dj offline access. -BioShock Infinite is one of the most amazing titles ever created by Irrational Games. -Since its release, BioShock Infinite has been the biggest and most popular first-person shooter game ever made. -Its events unfold in the depths of the majestic Atlantean Directory, and the plot tells about the struggle against the insidious government and the struggle for your happiness, freedom and the freedom of other people. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Cdma Workshop Tool 27 Cracked Version Of 59 LINK.md b/spaces/usbethFlerru/sovits-modelsV2/example/Cdma Workshop Tool 27 Cracked Version Of 59 LINK.md deleted file mode 100644 index 91585debf2f7da51b4ef53556abba55ff2c288d4..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Cdma Workshop Tool 27 Cracked Version Of 59 LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Cdma Workshop Tool 27 Cracked Version Of 59


    Download –––––>>> https://urlcod.com/2uyV01



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/model_ensembling.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/model_ensembling.md deleted file mode 100644 index 3e13435048ecf8b133cca0dad677432b8c1a5012..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/model_ensembling.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -comments: true -description: Learn how to ensemble YOLOv5 models for improved mAP and Recall! Clone the repo, install requirements, and start testing and inference. -keywords: YOLOv5, object detection, ensemble learning, mAP, Recall ---- - -📚 This guide explains how to use YOLOv5 🚀 **model ensembling** during testing and inference for improved mAP and Recall. -UPDATED 25 September 2022. - -From [https://en.wikipedia.org/wiki/Ensemble_learning](https://en.wikipedia.org/wiki/Ensemble_learning): -> Ensemble modeling is a process where multiple diverse models are created to predict an outcome, either by using many different modeling algorithms or using different training data sets. The ensemble model then aggregates the prediction of each base model and results in once final prediction for the unseen data. The motivation for using ensemble models is to reduce the generalization error of the prediction. As long as the base models are diverse and independent, the prediction error of the model decreases when the ensemble approach is used. The approach seeks the wisdom of crowds in making a prediction. Even though the ensemble model has multiple base models within the model, it acts and performs as a single model. - -## Before You Start - -Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). - -```bash -git clone https://github.com/ultralytics/yolov5 # clone -cd yolov5 -pip install -r requirements.txt # install -``` - -## Test Normally - -Before ensembling we want to establish the baseline performance of a single model. This command tests YOLOv5x on COCO val2017 at image size 640 pixels. `yolov5x.pt` is the largest and most accurate model available. Other options are `yolov5s.pt`, `yolov5m.pt` and `yolov5l.pt`, or you own checkpoint from training a custom dataset `./weights/best.pt`. For details on all available models please see our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints). - -```bash -python val.py --weights yolov5x.pt --data coco.yaml --img 640 --half -``` - -Output: - -```shell -val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True -YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) - -Fusing layers... -Model Summary: 476 layers, 87730285 parameters, 0 gradients - -val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2846.03it/s] -val: New cache created: ../datasets/coco/val2017.cache - Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [02:30<00:00, 1.05it/s] - all 5000 36335 0.746 0.626 0.68 0.49 -Speed: 0.1ms pre-process, 22.4ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640) # <--- baseline speed - -Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json... -... - Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504 # <--- baseline mAP - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688 - Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546 - Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351 - Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551 - Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681 # <--- baseline mAR - Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524 - Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826 -``` - -## Ensemble Test - -Multiple pretrained models may be ensembled together at test and inference time by simply appending extra models to the `--weights` argument in any existing val.py or detect.py command. This example tests an ensemble of 2 models together: - -- YOLOv5x -- YOLOv5l6 - -```bash -python val.py --weights yolov5x.pt yolov5l6.pt --data coco.yaml --img 640 --half -``` - -Output: - -```shell -val: data=./data/coco.yaml, weights=['yolov5x.pt', 'yolov5l6.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True -YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) - -Fusing layers... -Model Summary: 476 layers, 87730285 parameters, 0 gradients # Model 1 -Fusing layers... -Model Summary: 501 layers, 77218620 parameters, 0 gradients # Model 2 -Ensemble created with ['yolov5x.pt', 'yolov5l6.pt'] # Ensemble notice - -val: Scanning '../datasets/coco/val2017.cache' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:00<00:00, 49695545.02it/s] - Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [03:58<00:00, 1.52s/it] - all 5000 36335 0.747 0.637 0.692 0.502 -Speed: 0.1ms pre-process, 39.5ms inference, 2.0ms NMS per image at shape (32, 3, 640, 640) # <--- ensemble speed - -Evaluating pycocotools mAP... saving runs/val/exp3/yolov5x_predictions.json... -... - Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.515 # <--- ensemble mAP - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.699 - Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.557 - Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.356 - Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.563 - Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.668 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.387 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.638 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.689 # <--- ensemble mAR - Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.526 - Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.743 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.844 -``` - -## Ensemble Inference - -Append extra models to the `--weights` argument to run ensemble inference: - -```bash -python detect.py --weights yolov5x.pt yolov5l6.pt --img 640 --source data/images -``` - -Output: - -```bash -detect: weights=['yolov5x.pt', 'yolov5l6.pt'], source=data/images, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_width=3, hide_labels=False, hide_conf=False, half=False -YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) - -Fusing layers... -Model Summary: 476 layers, 87730285 parameters, 0 gradients -Fusing layers... -Model Summary: 501 layers, 77218620 parameters, 0 gradients -Ensemble created with ['yolov5x.pt', 'yolov5l6.pt'] - -image 1/2 /content/yolov5/data/images/bus.jpg: 640x512 4 persons, 1 bus, 1 tie, Done. (0.063s) -image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 3 persons, 2 ties, Done. (0.056s) -Results saved to runs/detect/exp2 -Done. (0.223s) -``` - - - -## Environments - -YOLOv5 is designed to be run in the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) -- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls - -## Status - -YOLOv5 CI - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/running_on_jetson_nano.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/running_on_jetson_nano.md deleted file mode 100644 index f41d3e3b93e4620ae64d50b0b5050b49bdaf0f13..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/running_on_jetson_nano.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -comments: true -description: Deploy YOLOv5 on NVIDIA Jetson using TensorRT and DeepStream SDK for high performance inference. Step-by-step guide with code snippets. -keywords: YOLOv5, NVIDIA Jetson, TensorRT, DeepStream SDK, deployment, AI at edge, PyTorch, computer vision, object detection, CUDA ---- - -# Deploy on NVIDIA Jetson using TensorRT and DeepStream SDK - -📚 This guide explains how to deploy a trained model into NVIDIA Jetson Platform and perform inference using TensorRT and DeepStream SDK. Here we use TensorRT to maximize the inference performance on the Jetson platform. -UPDATED 18 November 2022. - -## Hardware Verification - -We have tested and verified this guide on the following Jetson devices - -- [Seeed reComputer J1010 built with Jetson Nano module](https://www.seeedstudio.com/Jetson-10-1-A0-p-5336.html) -- [Seeed reComputer J2021 built with Jetson Xavier NX module](https://www.seeedstudio.com/reComputer-J2021-p-5438.html) - -## Before You Start - -Make sure you have properly installed **JetPack SDK** with all the **SDK Components** and **DeepStream SDK** on the Jetson device as this includes CUDA, TensorRT and DeepStream SDK which are needed for this guide. - -JetPack SDK provides a full development environment for hardware-accelerated AI-at-the-edge development. All Jetson modules and developer kits are supported by JetPack SDK. - -There are two major installation methods including, - -1. SD Card Image Method -2. NVIDIA SDK Manager Method - -You can find a very detailed installation guide from NVIDIA [official website](https://developer.nvidia.com/jetpack-sdk-461). You can also find guides corresponding to the above-mentioned [reComputer J1010](https://wiki.seeedstudio.com/reComputer_J1010_J101_Flash_Jetpack) and [reComputer J2021](https://wiki.seeedstudio.com/reComputer_J2021_J202_Flash_Jetpack). - -## Install Necessary Packages - -- **Step 1.** Access the terminal of Jetson device, install pip and upgrade it - -```sh -sudo apt update -sudo apt install -y python3-pip -pip3 install --upgrade pip -``` - -- **Step 2.** Clone the following repo - -```sh -git clone https://github.com/ultralytics/yolov5 -``` - -- **Step 3.** Open **requirements.txt** - -```sh -cd yolov5 -vi requirements.txt -``` - -- **Step 5.** Edit the following lines. Here you need to press **i** first to enter editing mode. Press **ESC**, then type **:wq** to save and quit - -```sh -# torch>=1.7.0 -# torchvision>=0.8.1 -``` - -**Note:** torch and torchvision are excluded for now because they will be installed later. - -- **Step 6.** install the below dependency - -```sh -sudo apt install -y libfreetype6-dev -``` - -- **Step 7.** Install the necessary packages - -```sh -pip3 install -r requirements.txt -``` - -## Install PyTorch and Torchvision - -We cannot install PyTorch and Torchvision from pip because they are not compatible to run on Jetson platform which is based on **ARM aarch64 architecture**. Therefore, we need to manually install pre-built PyTorch pip wheel and compile/ install Torchvision from source. - -Visit [this page](https://forums.developer.nvidia.com/t/pytorch-for-jetson) to access all the PyTorch and Torchvision links. - -Here are some of the versions supported by JetPack 4.6 and above. - -**PyTorch v1.10.0** - -Supported by JetPack 4.4 (L4T R32.4.3) / JetPack 4.4.1 (L4T R32.4.4) / JetPack 4.5 (L4T R32.5.0) / JetPack 4.5.1 (L4T R32.5.1) / JetPack 4.6 (L4T R32.6.1) with Python 3.6 - -**file_name:** torch-1.10.0-cp36-cp36m-linux_aarch64.whl -**URL:** [https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl](https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl) - -**PyTorch v1.12.0** - -Supported by JetPack 5.0 (L4T R34.1.0) / JetPack 5.0.1 (L4T R34.1.1) / JetPack 5.0.2 (L4T R35.1.0) with Python 3.8 - -**file_name:** torch-1.12.0a0+2c916ef.nv22.3-cp38-cp38-linux_aarch64.whl -**URL:** [https://developer.download.nvidia.com/compute/redist/jp/v50/pytorch/torch-1.12.0a0+2c916ef.nv22.3-cp38-cp38-linux_aarch64.whl](https://developer.download.nvidia.com/compute/redist/jp/v50/pytorch/torch-1.12.0a0+2c916ef.nv22.3-cp38-cp38-linux_aarch64.whl) - -- **Step 1.** Install torch according to your JetPack version in the following format - -```sh -wget -O -pip3 install -``` - -For example, here we are running **JP4.6.1**, and therefore we choose **PyTorch v1.10.0** - -```sh -cd ~ -sudo apt-get install -y libopenblas-base libopenmpi-dev -wget https://nvidia.box.com/shared/static/fjtbno0vpo676a25cgvuqc1wty0fkkg6.whl -O torch-1.10.0-cp36-cp36m-linux_aarch64.whl -pip3 install torch-1.10.0-cp36-cp36m-linux_aarch64.whl -``` - -- **Step 2.** Install torchvision depending on the version of PyTorch that you have installed. For example, we chose **PyTorch v1.10.0**, which means, we need to choose **Torchvision v0.11.1** - -```sh -sudo apt install -y libjpeg-dev zlib1g-dev -git clone --branch v0.11.1 https://github.com/pytorch/vision torchvision -cd torchvision -sudo python3 setup.py install -``` - -Here a list of the corresponding torchvision version that you need to install according to the PyTorch version: - -- PyTorch v1.10 - torchvision v0.11.1 -- PyTorch v1.12 - torchvision v0.13.0 - -## DeepStream Configuration for YOLOv5 - -- **Step 1.** Clone the following repo - -```sh -cd ~ -git clone https://github.com/marcoslucianops/DeepStream-Yolo -``` - -- **Step 2.** Copy **gen_wts_yoloV5.py** from **DeepStream-Yolo/utils** into **yolov5** directory - -```sh -cp DeepStream-Yolo/utils/gen_wts_yoloV5.py yolov5 -``` - -- **Step 3.** Inside the yolov5 repo, download **pt file** from YOLOv5 releases (example for YOLOv5s 6.1) - -```sh -cd yolov5 -wget https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt -``` - -- **Step 4.** Generate the **cfg** and **wts** files - -```sh -python3 gen_wts_yoloV5.py -w yolov5s.pt -``` - -**Note**: To change the inference size (default: 640) - -```sh --s SIZE ---size SIZE --s HEIGHT WIDTH ---size HEIGHT WIDTH - -Example for 1280: - --s 1280 -or --s 1280 1280 -``` - -- **Step 5.** Copy the generated **cfg** and **wts** files into the **DeepStream-Yolo** folder - -```sh -cp yolov5s.cfg ~/DeepStream-Yolo -cp yolov5s.wts ~/DeepStream-Yolo -``` - -- **Step 6.** Open the **DeepStream-Yolo** folder and compile the library - -```sh -cd ~/DeepStream-Yolo -CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.1 -CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.0.1 / 6.0 -``` - -- **Step 7.** Edit the **config_infer_primary_yoloV5.txt** file according to your model - -```sh -[property] -... -custom-network-config=yolov5s.cfg -model-file=yolov5s.wts -... -``` - -- **Step 8.** Edit the **deepstream_app_config** file - -```sh -... -[primary-gie] -... -config-file=config_infer_primary_yoloV5.txt -``` - -- **Step 9.** Change the video source in **deepstream_app_config** file. Here a default video file is loaded as you can see below - -```sh -... -[source0] -... -uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 -``` - -## Run the Inference - -```sh -deepstream-app -c deepstream_app_config.txt -``` - -
    - -The above result is running on **Jetson Xavier NX** with **FP32** and **YOLOv5s 640x640**. We can see that the **FPS** is around **30**. - -## INT8 Calibration - -If you want to use INT8 precision for inference, you need to follow the steps below - -- **Step 1.** Install OpenCV - -```sh -sudo apt-get install libopencv-dev -``` - -- **Step 2.** Compile/recompile the **nvdsinfer_custom_impl_Yolo** library with OpenCV support - -```sh -cd ~/DeepStream-Yolo -CUDA_VER=11.4 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.1 -CUDA_VER=10.2 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo # for DeepStream 6.0.1 / 6.0 -``` - -- **Step 3.** For COCO dataset, download the [val2017](https://drive.google.com/file/d/1gbvfn7mcsGDRZ_luJwtITL-ru2kK99aK/view?usp=sharing), extract, and move to **DeepStream-Yolo** folder - -- **Step 4.** Make a new directory for calibration images - -```sh -mkdir calibration -``` - -- **Step 5.** Run the following to select 1000 random images from COCO dataset to run calibration - -```sh -for jpg in $(ls -1 val2017/*.jpg | sort -R | head -1000); do \ - cp ${jpg} calibration/; \ -done -``` - -**Note:** NVIDIA recommends at least 500 images to get a good accuracy. On this example, 1000 images are chosen to get better accuracy (more images = more accuracy). Higher INT8_CALIB_BATCH_SIZE values will result in more accuracy and faster calibration speed. Set it according to you GPU memory. You can set it from **head -1000**. For example, for 2000 images, **head -2000**. This process can take a long time. - -- **Step 6.** Create the **calibration.txt** file with all selected images - -```sh -realpath calibration/*jpg > calibration.txt -``` - -- **Step 7.** Set environment variables - -```sh -export INT8_CALIB_IMG_PATH=calibration.txt -export INT8_CALIB_BATCH_SIZE=1 -``` - -- **Step 8.** Update the **config_infer_primary_yoloV5.txt** file - -From - -```sh -... -model-engine-file=model_b1_gpu0_fp32.engine -#int8-calib-file=calib.table -... -network-mode=0 -... -``` - -To - -```sh -... -model-engine-file=model_b1_gpu0_int8.engine -int8-calib-file=calib.table -... -network-mode=1 -... -``` - -- **Step 9.** Run the inference - -```sh -deepstream-app -c deepstream_app_config.txt -``` - -
    - -The above result is running on **Jetson Xavier NX** with **INT8** and **YOLOv5s 640x640**. We can see that the **FPS** is around **60**. - -## Benchmark results - -The following table summarizes how different models perform on **Jetson Xavier NX**. - -| Model Name | Precision | Inference Size | Inference Time (ms) | FPS | -|------------|-----------|----------------|---------------------|-----| -| YOLOv5s | FP32 | 320x320 | 16.66 | 60 | -| | FP32 | 640x640 | 33.33 | 30 | -| | INT8 | 640x640 | 16.66 | 60 | -| YOLOv5n | FP32 | 640x640 | 16.66 | 60 | - -### Additional - -This tutorial is written by our friends at seeed @lakshanthad and Elaine \ No newline at end of file diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/scripts/get_coco128.sh b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/scripts/get_coco128.sh deleted file mode 100644 index 73897916a219450cb45eebe80ac8701725771b42..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/data/scripts/get_coco128.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Ultralytics YOLO 🚀, AGPL-3.0 license -# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) -# Example usage: bash data/scripts/get_coco128.sh -# parent -# ├── ultralytics -# └── datasets -# └── coco128 ← downloads here - -# Download/unzip images and labels -d='../datasets' # unzip directory -url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco128.zip' # or 'coco128-segments.zip', 68 MB -echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & - -wait # finish background tasks diff --git a/spaces/vobecant/DaS/segmenter_model/picie_model.py b/spaces/vobecant/DaS/segmenter_model/picie_model.py deleted file mode 100644 index d5cbdf6b8824cd86a1e303905699bec18179c6fb..0000000000000000000000000000000000000000 --- a/spaces/vobecant/DaS/segmenter_model/picie_model.py +++ /dev/null @@ -1,82 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from . import backbone_picie as backbone - - -class PanopticFPN(nn.Module): - def __init__(self, args): - super(PanopticFPN, self).__init__() - self.backbone = backbone.__dict__[args.arch](pretrained=args.pretrain) - if args.arch == 'vit_small': - self.decoder = FPNDecoderViT(args) - else: - self.decoder = FPNDecoder(args) - - def forward(self, x, encoder_features=False, decoder_features=False): - feats = self.backbone(x) - dec_outs = self.decoder(feats) - - if encoder_features: - return feats['res5'], dec_outs - else: - return dec_outs - - -class FPNDecoder(nn.Module): - def __init__(self, args): - super(FPNDecoder, self).__init__() - if args.arch == 'resnet18': - mfactor = 1 - out_dim = 128 - else: - mfactor = 4 - out_dim = 256 - - self.layer4 = nn.Conv2d(512 * mfactor // 8, out_dim, kernel_size=1, stride=1, padding=0) - self.layer3 = nn.Conv2d(512 * mfactor // 4, out_dim, kernel_size=1, stride=1, padding=0) - self.layer2 = nn.Conv2d(512 * mfactor // 2, out_dim, kernel_size=1, stride=1, padding=0) - self.layer1 = nn.Conv2d(512 * mfactor, out_dim, kernel_size=1, stride=1, padding=0) - - def forward(self, x): - o1 = self.layer1(x['res5']) - o2 = self.upsample_add(o1, self.layer2(x['res4'])) - o3 = self.upsample_add(o2, self.layer3(x['res3'])) - o4 = self.upsample_add(o3, self.layer4(x['res2'])) - - return o4 - - def upsample_add(self, x, y): - _, _, H, W = y.size() - - return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=False) + y - - -class FPNDecoderViT(nn.Module): - def __init__(self, args): - super(FPNDecoderViT, self).__init__() - if args.arch == 'resnet18' or args.arch == 'vit_small': - mfactor = 1 - out_dim = 128 - else: - mfactor = 4 - out_dim = 256 - - self.upsample_rate = 4 - - self.layer4 = nn.Conv2d(384, out_dim, kernel_size=1, stride=1, padding=0) - self.layer3 = nn.Conv2d(384, out_dim, kernel_size=1, stride=1, padding=0) - self.layer2 = nn.Conv2d(384, out_dim, kernel_size=1, stride=1, padding=0) - self.layer1 = nn.Conv2d(384, out_dim, kernel_size=1, stride=1, padding=0) - - def forward(self, x): - o1 = self.layer1(x[3]) - o1 = F.interpolate(o1, scale_factor=4, mode='bilinear', align_corners=False) - o2 = self.upsample_add(o1, self.layer2(x[2])) - o3 = self.upsample_add(o2, self.layer3(x[1])) - o4 = self.upsample_add(o3, self.layer4(x[0])) - - return o4 - - def upsample_add(self, x, y): - return F.interpolate(y, scale_factor=self.upsample_rate, mode='bilinear', align_corners=False) + x diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/schedules/schedule_40k.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/schedules/schedule_40k.py deleted file mode 100644 index cdbf841abcb26eed87bf76ab816aff4bae0630ee..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/schedules/schedule_40k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=40000) -checkpoint_config = dict(by_epoch=False, interval=4000) -evaluation = dict(interval=4000, metric='mIoU') diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/upsample.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/upsample.py deleted file mode 100644 index a1a353767d0ce8518f0d7289bed10dba0178ed12..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/cnn/bricks/upsample.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F - -from ..utils import xavier_init -from .registry import UPSAMPLE_LAYERS - -UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample) -UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample) - - -@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') -class PixelShufflePack(nn.Module): - """Pixel Shuffle upsample layer. - - This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to - achieve a simple upsampling with pixel shuffle. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - scale_factor (int): Upsample ratio. - upsample_kernel (int): Kernel size of the conv layer to expand the - channels. - """ - - def __init__(self, in_channels, out_channels, scale_factor, - upsample_kernel): - super(PixelShufflePack, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.scale_factor = scale_factor - self.upsample_kernel = upsample_kernel - self.upsample_conv = nn.Conv2d( - self.in_channels, - self.out_channels * scale_factor * scale_factor, - self.upsample_kernel, - padding=(self.upsample_kernel - 1) // 2) - self.init_weights() - - def init_weights(self): - xavier_init(self.upsample_conv, distribution='uniform') - - def forward(self, x): - x = self.upsample_conv(x) - x = F.pixel_shuffle(x, self.scale_factor) - return x - - -def build_upsample_layer(cfg, *args, **kwargs): - """Build upsample layer. - - Args: - cfg (dict): The upsample layer config, which should contain: - - - type (str): Layer type. - - scale_factor (int): Upsample ratio, which is not applicable to - deconv. - - layer args: Args needed to instantiate a upsample layer. - args (argument list): Arguments passed to the ``__init__`` - method of the corresponding conv layer. - kwargs (keyword arguments): Keyword arguments passed to the - ``__init__`` method of the corresponding conv layer. - - Returns: - nn.Module: Created upsample layer. - """ - if not isinstance(cfg, dict): - raise TypeError(f'cfg must be a dict, but got {type(cfg)}') - if 'type' not in cfg: - raise KeyError( - f'the cfg dict must contain the key "type", but got {cfg}') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in UPSAMPLE_LAYERS: - raise KeyError(f'Unrecognized upsample type {layer_type}') - else: - upsample = UPSAMPLE_LAYERS.get(layer_type) - - if upsample is nn.Upsample: - cfg_['mode'] = layer_type - layer = upsample(*args, **kwargs, **cfg_) - return layer diff --git a/spaces/vumichien/canvas_controlnet/ldm/models/diffusion/dpm_solver/sampler.py b/spaces/vumichien/canvas_controlnet/ldm/models/diffusion/dpm_solver/sampler.py deleted file mode 100644 index 7d137b8cf36718c1c58faa09f9dd919e5fb2977b..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/ldm/models/diffusion/dpm_solver/sampler.py +++ /dev/null @@ -1,87 +0,0 @@ -"""SAMPLING ONLY.""" -import torch - -from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver - - -MODEL_TYPES = { - "eps": "noise", - "v": "v" -} - - -class DPMSolverSampler(object): - def __init__(self, model, **kwargs): - super().__init__() - self.model = model - to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - - print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') - - device = self.model.betas.device - if x_T is None: - img = torch.randn(size, device=device) - else: - img = x_T - - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) - - model_fn = model_wrapper( - lambda x, t, c: self.model.apply_model(x, t, c), - ns, - model_type=MODEL_TYPES[self.model.parameterization], - guidance_type="classifier-free", - condition=conditioning, - unconditional_condition=unconditional_conditioning, - guidance_scale=unconditional_guidance_scale, - ) - - dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) - x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) - - return x.to(device), None \ No newline at end of file diff --git a/spaces/wasimmadha/entity-extraction/app.py b/spaces/wasimmadha/entity-extraction/app.py deleted file mode 100644 index cc24b023bc55774cb8721066d1609f88a6b39c4c..0000000000000000000000000000000000000000 --- a/spaces/wasimmadha/entity-extraction/app.py +++ /dev/null @@ -1,60 +0,0 @@ -if __name__ == '__main__': - inputs = ['gbjjhbdjhbdgjhdbfjhsdkjrkjf', 'fdjhbjhsbd'] - from transformers import AutoTokenizer - from model import CustomModel - import torch - from configuration import CFG - from dataset import SingleInputDataset - from torch.utils.data import DataLoader - from utils import inference_fn, get_char_probs, get_results, get_text - import numpy as np - import gradio as gr - import os - - device = torch.device('cpu') - config_path = os.path.join('models_file', 'config.pth') - model_path = os.path.join('models_file', 'microsoft-deberta-base_0.9449373420387531_8_best.pth') - tokenizer = AutoTokenizer.from_pretrained('models_file/tokenizer') - model = CustomModel(CFG, config_path=config_path, pretrained=False) - state = torch.load(model_path, - map_location=torch.device('cpu')) - model.load_state_dict(state['model']) - - def get_answer(context, feature): - - ## Input to the model using patient-history and feature-text - inputs_single = tokenizer(context, feature, - add_special_tokens=True, - max_length=CFG.max_len, - padding="max_length", - return_offsets_mapping=False) - - for k, v in inputs_single.items(): - inputs_single[k] = torch.tensor(v, dtype=torch.long) - - # Create a new dataset containing only the input sample - single_input_dataset = SingleInputDataset(inputs_single) - # Create a DataLoader for the new dataset - single_input_loader = DataLoader(single_input_dataset, - batch_size=1, - shuffle=False, - num_workers=2) - - # Perform inference on the single input - output = inference_fn(single_input_loader, model, device) - - prediction = output.reshape((1, CFG.max_len)) - char_probs = get_char_probs([context], prediction, tokenizer) - predictions = np.mean([char_probs], axis=0) - results = get_results(predictions, th=0.5) - - print(results) - return get_text(context, results[0]) - - inputs = [gr.inputs.Textbox(label="Context Para", lines=10), gr.inputs.Textbox(label="Question", lines=1)] - output = gr.outputs.Textbox(label="Answer") - - app = gr.Interface(fn=get_answer, inputs=inputs, outputs=output, allow_flagging='never') - - app.launch() - print(get_answer(inputs[0], inputs[1])) diff --git a/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/utils.py b/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/utils.py deleted file mode 100644 index 5bd18f70225e12b2e27fdb4eabcde91d959f8e31..0000000000000000000000000000000000000000 --- a/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/utils.py +++ /dev/null @@ -1,268 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import copy -import math - -import torch -import torch.nn.functional as F -from torch import Tensor, nn - - -def _get_clones(module, N, layer_share=False): - # import ipdb; ipdb.set_trace() - if layer_share: - return nn.ModuleList([module for i in range(N)]) - else: - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def get_sine_pos_embed( - pos_tensor: torch.Tensor, - num_pos_feats: int = 128, - temperature: int = 10000, - exchange_xy: bool = True, -): - """generate sine position embedding from a position tensor - Args: - pos_tensor (torch.Tensor): shape: [..., n]. - num_pos_feats (int): projected shape for each float in the tensor. - temperature (int): temperature in the sine/cosine function. - exchange_xy (bool, optional): exchange pos x and pos y. \ - For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True. - Returns: - pos_embed (torch.Tensor): shape: [..., n*num_pos_feats]. - """ - scale = 2 * math.pi - dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) - dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) - - def sine_func(x: torch.Tensor): - sin_x = x * scale / dim_t - sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) - return sin_x - - pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)] - if exchange_xy: - pos_res[0], pos_res[1] = pos_res[1], pos_res[0] - pos_res = torch.cat(pos_res, dim=-1) - return pos_res - - -def gen_encoder_output_proposals( - memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None -): - """ - Input: - - memory: bs, \sum{hw}, d_model - - memory_padding_mask: bs, \sum{hw} - - spatial_shapes: nlevel, 2 - - learnedwh: 2 - Output: - - output_memory: bs, \sum{hw}, d_model - - output_proposals: bs, \sum{hw}, 4 - """ - N_, S_, C_ = memory.shape - proposals = [] - _cur = 0 - for lvl, (H_, W_) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1) - valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) - valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) - - # import ipdb; ipdb.set_trace() - - grid_y, grid_x = torch.meshgrid( - torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device), - torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device), - ) - grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2 - - scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2) - grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - - if learnedwh is not None: - # import ipdb; ipdb.set_trace() - wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl) - else: - wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) - - # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1) - # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - # wh = torch.ones_like(grid) / scale - proposal = torch.cat((grid, wh), -1).view(N_, -1, 4) - proposals.append(proposal) - _cur += H_ * W_ - # import ipdb; ipdb.set_trace() - output_proposals = torch.cat(proposals, 1) - output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all( - -1, keepdim=True - ) - output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid - output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf")) - output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) - - output_memory = memory - output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0)) - output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) - - # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf')) - # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf')) - - return output_memory, output_proposals - - -class RandomBoxPerturber: - def __init__( - self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2 - ) -> None: - self.noise_scale = torch.Tensor( - [x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale] - ) - - def __call__(self, refanchors: Tensor) -> Tensor: - nq, bs, query_dim = refanchors.shape - device = refanchors.device - - noise_raw = torch.rand_like(refanchors) - noise_scale = self.noise_scale.to(device)[:query_dim] - - new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale) - return new_refanchors.clamp_(0, 1) - - -def sigmoid_focal_loss( - inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - if no_reduction: - return loss - - return loss.mean(1).sum() / num_boxes - - -class MLP(nn.Module): - """Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x - - -def _get_activation_fn(activation, d_model=256, batch_dim=0): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - if activation == "prelu": - return nn.PReLU() - if activation == "selu": - return F.selu - - raise RuntimeError(f"activation should be relu/gelu, not {activation}.") - - -def gen_sineembed_for_position(pos_tensor): - # n_query, bs, _ = pos_tensor.size() - # sineembed_tensor = torch.zeros(n_query, bs, 256) - scale = 2 * math.pi - dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) - dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128) - x_embed = pos_tensor[:, :, 0] * scale - y_embed = pos_tensor[:, :, 1] * scale - pos_x = x_embed[:, :, None] / dim_t - pos_y = y_embed[:, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) - pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) - if pos_tensor.size(-1) == 2: - pos = torch.cat((pos_y, pos_x), dim=2) - elif pos_tensor.size(-1) == 4: - w_embed = pos_tensor[:, :, 2] * scale - pos_w = w_embed[:, :, None] / dim_t - pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2) - - h_embed = pos_tensor[:, :, 3] * scale - pos_h = h_embed[:, :, None] / dim_t - pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2) - - pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) - else: - raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1))) - return pos - - -class ContrastiveEmbed(nn.Module): - def __init__(self, max_text_len=256): - """ - Args: - max_text_len: max length of text. - """ - super().__init__() - self.max_text_len = max_text_len - - def forward(self, x, text_dict): - """_summary_ - - Args: - x (_type_): _description_ - text_dict (_type_): _description_ - { - 'encoded_text': encoded_text, # bs, 195, d_model - 'text_token_mask': text_token_mask, # bs, 195 - # True for used tokens. False for padding tokens - } - Returns: - _type_: _description_ - """ - assert isinstance(text_dict, dict) - - y = text_dict["encoded_text"] - text_token_mask = text_dict["text_token_mask"] - - res = x @ y.transpose(-1, -2) - res.masked_fill_(~text_token_mask[:, None, :], float("-inf")) - - # padding to max_text_len - new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device) - new_res[..., : res.shape[-1]] = res - - return new_res diff --git a/spaces/whilefalse/CLIP/app.py b/spaces/whilefalse/CLIP/app.py deleted file mode 100644 index b0bf2a3595002caddb9c1eebd3d2a5de397e0880..0000000000000000000000000000000000000000 --- a/spaces/whilefalse/CLIP/app.py +++ /dev/null @@ -1,44 +0,0 @@ -import gradio as gr -import torch -import clip -from PIL import Image - -print("Getting device...") -device = "cuda" if torch.cuda.is_available() else "cpu" -print("Loading model...") -model, preprocess = clip.load("ViT-B/32", device=device) -print("Loaded model.") - - -def process(image, prompt): - print("Inferring...") - image = preprocess(image).unsqueeze(0).to(device) - print("Image: ", image) - - prompts = prompt.split("\n") - print("Prompts: ", prompts) - text = clip.tokenize(prompts).to(device) - print("Tokens: ", text) - - with torch.no_grad(): - logits_per_image, logits_per_text = model(image, text) - probs = logits_per_image.softmax(dim=-1).cpu() - print("Probs: ", probs) - - return {k: v.item() for (k,v) in zip(prompts, probs[0])} - - -iface = gr.Interface( - fn=process, - inputs=[ - gr.Image(type="pil", label="Image"), - gr.Textbox(lines=5, label="Prompts (newline-separated)"), - ], - outputs="label", - examples=[ - ["dog.jpg", "a photo of a dog\na photo of a cat"], - ["cat.jpg", "a photo of a dog\na photo of a cat"], - ["car.jpg", "a red car on a golf course\na red sports car on a road\na blue sports car\na red family car"] - ] -) -iface.launch() diff --git a/spaces/wliu88/StructDiffusionDemo/src/StructDiffusion/language/sentence_encoder.py b/spaces/wliu88/StructDiffusionDemo/src/StructDiffusion/language/sentence_encoder.py deleted file mode 100644 index 9c73963e394eb03e39b9f213025b6aa2014077b7..0000000000000000000000000000000000000000 --- a/spaces/wliu88/StructDiffusionDemo/src/StructDiffusion/language/sentence_encoder.py +++ /dev/null @@ -1,23 +0,0 @@ -from sentence_transformers import SentenceTransformer - -class SentenceBertEncoder: - - def __init__(self): - self.model = SentenceTransformer('all-MiniLM-L6-v2') - - def encode(self, sentences): - #Our sentences we like to encode - # sentences = ['This framework generates embeddings for each input sentence', - # 'Sentences are passed as a list of string.', - # 'The quick brown fox jumps over the lazy dog.'] - #Sentences are encoded by calling model.encode() - - embeddings = self.model.encode(sentences) - # print(embeddings.shape) - return embeddings - - -if __name__ == "__main__": - sentence_encoder = SentenceBertEncoder() - embedding = sentence_encoder.encode(["this is cool!"]) - print(embedding.shape) \ No newline at end of file diff --git a/spaces/wong26/faster-whisper-webui/src/segments.py b/spaces/wong26/faster-whisper-webui/src/segments.py deleted file mode 100644 index ec2650dceade5d0b2022264f6419115eab085aea..0000000000000000000000000000000000000000 --- a/spaces/wong26/faster-whisper-webui/src/segments.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Any, Dict, List - -import copy - -def merge_timestamps(timestamps: List[Dict[str, Any]], merge_window: float = 5, max_merge_size: float = 30, padding_left: float = 1, padding_right: float = 1): - result = [] - - if len(timestamps) == 0: - return result - if max_merge_size is None: - return timestamps - - if padding_left is None: - padding_left = 0 - if padding_right is None: - padding_right = 0 - - processed_time = 0 - current_segment = None - - for i in range(len(timestamps)): - next_segment = timestamps[i] - - delta = next_segment['start'] - processed_time - - # Note that segments can still be longer than the max merge size, they just won't be merged in that case - if current_segment is None or (merge_window is not None and delta > merge_window) \ - or next_segment['end'] - current_segment['start'] > max_merge_size: - # Finish the current segment - if current_segment is not None: - # Add right padding - finish_padding = min(padding_right, delta / 2) if delta < padding_left + padding_right else padding_right - current_segment['end'] += finish_padding - delta -= finish_padding - - result.append(current_segment) - - # Start a new segment - current_segment = copy.deepcopy(next_segment) - - # Pad the segment - current_segment['start'] = current_segment['start'] - min(padding_left, delta) - processed_time = current_segment['end'] - - else: - # Merge the segment - current_segment['end'] = next_segment['end'] - processed_time = current_segment['end'] - - # Add the last segment - if current_segment is not None: - current_segment['end'] += padding_right - result.append(current_segment) - - return result \ No newline at end of file diff --git a/spaces/wpeebles/DiT/README.md b/spaces/wpeebles/DiT/README.md deleted file mode 100644 index d1aae787b021c47ad6914bdc857cf7016244a496..0000000000000000000000000000000000000000 --- a/spaces/wpeebles/DiT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Diffusion Transformers (DiT) -emoji: 🚀 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: cc-by-nc-4.0 ---- - -The code and model weights are licensed under CC-BY-NC. See LICENSE.txt for details. diff --git a/spaces/wwwwwwww2/bingo/src/lib/isomorphic/browser.ts b/spaces/wwwwwwww2/bingo/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/wwwwwwww2/bingo/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/wy213/213a/src/lib/hooks/use-at-bottom.tsx b/spaces/wy213/213a/src/lib/hooks/use-at-bottom.tsx deleted file mode 100644 index d37c8cf4162adcb0064e08ecec24eb731416b045..0000000000000000000000000000000000000000 --- a/spaces/wy213/213a/src/lib/hooks/use-at-bottom.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export function useAtBottom(offset = 0) { - const [isAtBottom, setIsAtBottom] = React.useState(false) - - React.useEffect(() => { - const handleScroll = () => { - setIsAtBottom( - window.innerHeight + window.scrollY >= - document.body.offsetHeight - offset - ) - } - - window.addEventListener('scroll', handleScroll, { passive: true }) - handleScroll() - - return () => { - window.removeEventListener('scroll', handleScroll) - } - }, [offset]) - - return isAtBottom -} diff --git a/spaces/xdecoder/Demo/utils/distributed.py b/spaces/xdecoder/Demo/utils/distributed.py deleted file mode 100644 index 521a934de05bca3159bb595cd0ab997ee08dd61a..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Demo/utils/distributed.py +++ /dev/null @@ -1,180 +0,0 @@ -import os -import time -import torch -import pickle -import torch.distributed as dist - - -def init_distributed(opt): - opt['CUDA'] = opt.get('CUDA', True) and torch.cuda.is_available() - if 'OMPI_COMM_WORLD_SIZE' not in os.environ: - # application was started without MPI - # default to single node with single process - opt['env_info'] = 'no MPI' - opt['world_size'] = 1 - opt['local_size'] = 1 - opt['rank'] = 0 - opt['local_rank'] = 0 - opt['master_address'] = '127.0.0.1' - opt['master_port'] = '8673' - else: - # application was started with MPI - # get MPI parameters - opt['world_size'] = int(os.environ['OMPI_COMM_WORLD_SIZE']) - opt['local_size'] = int(os.environ['OMPI_COMM_WORLD_LOCAL_SIZE']) - opt['rank'] = int(os.environ['OMPI_COMM_WORLD_RANK']) - opt['local_rank'] = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) - - # set up device - if not opt['CUDA']: - assert opt['world_size'] == 1, 'multi-GPU training without CUDA is not supported since we use NCCL as communication backend' - opt['device'] = torch.device("cpu") - else: - torch.cuda.set_device(opt['local_rank']) - opt['device'] = torch.device("cuda", opt['local_rank']) - return opt - -def is_main_process(): - rank = 0 - if 'OMPI_COMM_WORLD_SIZE' in os.environ: - rank = int(os.environ['OMPI_COMM_WORLD_RANK']) - - return rank == 0 - -def get_world_size(): - if not dist.is_available(): - return 1 - if not dist.is_initialized(): - return 1 - return dist.get_world_size() - -def get_rank(): - if not dist.is_available(): - return 0 - if not dist.is_initialized(): - return 0 - return dist.get_rank() - - -def synchronize(): - """ - Helper function to synchronize (barrier) among all processes when - using distributed training - """ - if not dist.is_available(): - return - if not dist.is_initialized(): - return - world_size = dist.get_world_size() - rank = dist.get_rank() - if world_size == 1: - return - - def _send_and_wait(r): - if rank == r: - tensor = torch.tensor(0, device="cuda") - else: - tensor = torch.tensor(1, device="cuda") - dist.broadcast(tensor, r) - while tensor.item() == 1: - time.sleep(1) - - _send_and_wait(0) - # now sync on the main process - _send_and_wait(1) - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.IntTensor([tensor.numel()]).to("cuda") - size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) - if local_size != max_size: - padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that process with rank - 0 has the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.reduce(values, dst=0) - if dist.get_rank() == 0 and average: - # only main process gets accumulated, so only divide by - # world_size in this case - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -def broadcast_data(data): - if not torch.distributed.is_initialized(): - return data - rank = dist.get_rank() - if rank == 0: - data_tensor = torch.tensor(data + [0], device="cuda") - else: - data_tensor = torch.tensor(data + [1], device="cuda") - torch.distributed.broadcast(data_tensor, 0) - while data_tensor.cpu().numpy()[-1] == 1: - time.sleep(1) - - return data_tensor.cpu().numpy().tolist()[:-1] - - -def reduce_sum(tensor): - if get_world_size() <= 1: - return tensor - - tensor = tensor.clone() - dist.all_reduce(tensor, op=dist.ReduceOp.SUM) - return tensor \ No newline at end of file diff --git a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/util/visualizer.py b/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/util/visualizer.py deleted file mode 100644 index 7a1b7b101e9b73f75f9136bc67f2063c7c1cf1c1..0000000000000000000000000000000000000000 --- a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/util/visualizer.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@File : visualizer.py -@Time : 2022/04/05 11:39:33 -@Author : Shilong Liu -@Contact : slongliu86@gmail.com -""" - -import datetime -import os - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -from matplotlib import transforms -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon -from pycocotools import mask as maskUtils - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class ColorMap: - def __init__(self, basergb=[255, 255, 0]): - self.basergb = np.array(basergb) - - def __call__(self, attnmap): - # attnmap: h, w. np.uint8. - # return: h, w, 4. np.uint8. - assert attnmap.dtype == np.uint8 - h, w = attnmap.shape - res = self.basergb.copy() - res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3 - attn1 = attnmap.copy()[..., None] # h, w, 1 - res = np.concatenate((res, attn1), axis=-1).astype(np.uint8) - return res - - -def rainbow_text(x, y, ls, lc, **kw): - """ - Take a list of strings ``ls`` and colors ``lc`` and place them next to each - other, with text ls[i] being shown in color lc[i]. - - This example shows how to do both vertical and horizontal text, and will - pass all keyword arguments to plt.text, so you can set the font size, - family, etc. - """ - t = plt.gca().transData - fig = plt.gcf() - plt.show() - - # horizontal version - for s, c in zip(ls, lc): - text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw) - text.draw(fig.canvas.get_renderer()) - ex = text.get_window_extent() - t = transforms.offset_copy(text._transform, x=ex.width, units="dots") - - # #vertical version - # for s,c in zip(ls,lc): - # text = plt.text(x,y," "+s+" ",color=c, transform=t, - # rotation=90,va='bottom',ha='center',**kw) - # text.draw(fig.canvas.get_renderer()) - # ex = text.get_window_extent() - # t = transforms.offset_copy(text._transform, y=ex.height, units='dots') - - -class COCOVisualizer: - def __init__(self, coco=None, tokenlizer=None) -> None: - self.coco = coco - - def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"): - """ - img: tensor(3, H, W) - tgt: make sure they are all on cpu. - must have items: 'image_id', 'boxes', 'size' - """ - plt.figure(dpi=dpi) - plt.rcParams["font.size"] = "5" - ax = plt.gca() - img = renorm(img).permute(1, 2, 0) - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - ax.imshow(img) - - self.addtgt(tgt) - - if tgt is None: - image_id = 0 - elif "image_id" not in tgt: - image_id = 0 - else: - image_id = tgt["image_id"] - - if caption is None: - savename = "{}/{}-{}.png".format( - savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - else: - savename = "{}/{}-{}-{}.png".format( - savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - print("savename: {}".format(savename)) - os.makedirs(os.path.dirname(savename), exist_ok=True) - plt.savefig(savename) - plt.close() - - def addtgt(self, tgt): - """ """ - if tgt is None or not "boxes" in tgt: - ax = plt.gca() - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - - ax.set_axis_off() - return - - ax = plt.gca() - H, W = tgt["size"] - numbox = tgt["boxes"].shape[0] - - color = [] - polygons = [] - boxes = [] - for box in tgt["boxes"].cpu(): - unnormbbox = box * torch.Tensor([W, H, W, H]) - unnormbbox[:2] -= unnormbbox[2:] / 2 - [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist() - boxes.append([bbox_x, bbox_y, bbox_w, bbox_h]) - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - color.append(c) - - p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1) - ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - - if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0: - assert ( - len(tgt["strings_positive"]) == numbox - ), f"{len(tgt['strings_positive'])} = {numbox}, " - for idx, strlist in enumerate(tgt["strings_positive"]): - cate_id = int(tgt["labels"][idx]) - _string = str(cate_id) + ":" + " ".join(strlist) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "box_label" in tgt: - assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, " - for idx, bl in enumerate(tgt["box_label"]): - _string = str(bl) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - # plt.figure() - # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(), - # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black']) - - if "attn" in tgt: - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - if isinstance(tgt["attn"], tuple): - tgt["attn"] = [tgt["attn"]] - for item in tgt["attn"]: - attn_map, basergb = item - attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3) - attn_map = (attn_map * 255).astype(np.uint8) - cm = ColorMap(basergb) - heatmap = cm(attn_map) - ax.imshow(heatmap) - ax.set_axis_off() - - def showAnns(self, anns, draw_bbox=False): - """ - Display the specified annotations. - :param anns (array of object): annotations to display - :return: None - """ - if len(anns) == 0: - return 0 - if "segmentation" in anns[0] or "keypoints" in anns[0]: - datasetType = "instances" - elif "caption" in anns[0]: - datasetType = "captions" - else: - raise Exception("datasetType not supported") - if datasetType == "instances": - ax = plt.gca() - ax.set_autoscale_on(False) - polygons = [] - color = [] - for ann in anns: - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - if "segmentation" in ann: - if type(ann["segmentation"]) == list: - # polygon - for seg in ann["segmentation"]: - poly = np.array(seg).reshape((int(len(seg) / 2), 2)) - polygons.append(Polygon(poly)) - color.append(c) - else: - # mask - t = self.imgs[ann["image_id"]] - if type(ann["segmentation"]["counts"]) == list: - rle = maskUtils.frPyObjects( - [ann["segmentation"]], t["height"], t["width"] - ) - else: - rle = [ann["segmentation"]] - m = maskUtils.decode(rle) - img = np.ones((m.shape[0], m.shape[1], 3)) - if ann["iscrowd"] == 1: - color_mask = np.array([2.0, 166.0, 101.0]) / 255 - if ann["iscrowd"] == 0: - color_mask = np.random.random((1, 3)).tolist()[0] - for i in range(3): - img[:, :, i] = color_mask[i] - ax.imshow(np.dstack((img, m * 0.5))) - if "keypoints" in ann and type(ann["keypoints"]) == list: - # turn skeleton into zero-based index - sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1 - kp = np.array(ann["keypoints"]) - x = kp[0::3] - y = kp[1::3] - v = kp[2::3] - for sk in sks: - if np.all(v[sk] > 0): - plt.plot(x[sk], y[sk], linewidth=3, color=c) - plt.plot( - x[v > 0], - y[v > 0], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor="k", - markeredgewidth=2, - ) - plt.plot( - x[v > 1], - y[v > 1], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor=c, - markeredgewidth=2, - ) - - if draw_bbox: - [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"] - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - color.append(c) - - # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) - # ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - elif datasetType == "captions": - for ann in anns: - print(ann["caption"]) diff --git a/spaces/xnetba/MMS/uroman/lib/JSON/backportPP/Boolean.pm b/spaces/xnetba/MMS/uroman/lib/JSON/backportPP/Boolean.pm deleted file mode 100644 index 38be6a3817b3b3b5632f4ee6bd3bba7397af567e..0000000000000000000000000000000000000000 --- a/spaces/xnetba/MMS/uroman/lib/JSON/backportPP/Boolean.pm +++ /dev/null @@ -1,27 +0,0 @@ -=head1 NAME - -JSON::PP::Boolean - dummy module providing JSON::PP::Boolean - -=head1 SYNOPSIS - - # do not "use" yourself - -=head1 DESCRIPTION - -This module exists only to provide overload resolution for Storable -and similar modules. See L for more info about this class. - -=cut - -use JSON::backportPP (); -use strict; - -1; - -=head1 AUTHOR - -This idea is from L written by -Marc Lehmann - -=cut - diff --git a/spaces/xxx1/zh-clip/models/zhclip/configuration_zhclip.py b/spaces/xxx1/zh-clip/models/zhclip/configuration_zhclip.py deleted file mode 100644 index ebe6db2ea3f56dc2d5b6bda3af9c868094996582..0000000000000000000000000000000000000000 --- a/spaces/xxx1/zh-clip/models/zhclip/configuration_zhclip.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding=utf-8 -# Copyright The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" ZhClip model configuration""" - -import copy - -from transformers.configuration_utils import PretrainedConfig -from transformers.utils import logging -from transformers.models.auto.configuration_auto import AutoConfig -from transformers.models.clip.configuration_clip import CLIPVisionConfig -from typing import Union, Dict - -logger = logging.get_logger(__name__) - -class ZhCLIPConfig(PretrainedConfig): - - model_type = "zhclip" - is_composition = True - - def __init__( - self, - text_config: Union[PretrainedConfig, Dict], - vision_config: Union[PretrainedConfig, Dict], - num_token_types=2, - hidden_size=768, - num_hidden_layers=6, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - initializer_range=0.02, - layer_norm_eps=1e-12, - classifier_dropout=None, - **kwargs): - super().__init__(**kwargs) - - if not isinstance(text_config, PretrainedConfig): - text_model_type = text_config.pop('model_type') - text_config = AutoConfig.for_model(text_model_type, **text_config) - self.text_config = text_config - - if not isinstance(vision_config, PretrainedConfig): - vision_model_type = vision_config.pop('model_type') - if vision_model_type == "clip": - vision_config = AutoConfig.for_model(vision_model_type, **vision_config).vision_config - elif vision_model_type == "clip_vision_model": - vision_config = CLIPVisionConfig(**vision_config) - else: - vision_config = AutoConfig.for_model(vision_model_type, **vision_config) - self.vision_config = vision_config - else: - vision_model_type = vision_config.model_type - if vision_model_type== "clip": - vision_config = vision_config.vision_config - self.vision_config = vision_config - - - # co-attention - self.num_token_types=num_token_types - self.hidden_size=hidden_size - self.num_hidden_layers=num_hidden_layers - self.num_attention_heads=num_attention_heads - self.intermediate_size=intermediate_size - self.hidden_act=hidden_act - self.hidden_dropout_prob=hidden_dropout_prob - self.attention_probs_dropout_prob=attention_probs_dropout_prob - self.initializer_range=initializer_range - self.layer_norm_eps=layer_norm_eps - self.classifier_dropout=classifier_dropout - - - def to_dict(self): - """ - Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. - Returns: - `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, - """ - output = copy.deepcopy(self.__dict__) - output["vision_config"] = self.vision_config.to_dict() - output["text_config"] = self.text_config.to_dict() - output["model_type"] = self.__class__.model_type - return output \ No newline at end of file diff --git a/spaces/yaoshining/text-generation-webui/modules/utils.py b/spaces/yaoshining/text-generation-webui/modules/utils.py deleted file mode 100644 index 1535ecdc065307c2c443f592a9ad23d6777cb1aa..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/modules/utils.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import re -from datetime import datetime -from pathlib import Path - -from modules import shared -from modules.logging_colors import logger - - -def save_file(fname, contents): - if fname == '': - logger.error('File name is empty!') - return - - root_folder = Path(__file__).resolve().parent.parent - abs_path = Path(fname).resolve() - rel_path = abs_path.relative_to(root_folder) - if rel_path.parts[0] == '..': - logger.error(f'Invalid file path: {fname}') - return - - with open(abs_path, 'w', encoding='utf-8') as f: - f.write(contents) - - logger.info(f'Saved {abs_path}.') - - -def delete_file(fname): - if fname == '': - logger.error('File name is empty!') - return - - root_folder = Path(__file__).resolve().parent.parent - abs_path = Path(fname).resolve() - rel_path = abs_path.relative_to(root_folder) - if rel_path.parts[0] == '..': - logger.error(f'Invalid file path: {fname}') - return - - if abs_path.exists(): - abs_path.unlink() - logger.info(f'Deleted {fname}.') - - -def current_time(): - return f"{datetime.now().strftime('%Y-%m-%d-%H%M%S')}" - - -def atoi(text): - return int(text) if text.isdigit() else text.lower() - - -# Replace multiple string pairs in a string -def replace_all(text, dic): - for i, j in dic.items(): - text = text.replace(i, j) - - return text - - -def natural_keys(text): - return [atoi(c) for c in re.split(r'(\d+)', text)] - - -def get_available_models(): - if shared.args.flexgen: - return sorted([re.sub('-np$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if item.name.endswith('-np')], key=natural_keys) - else: - return sorted([re.sub('.pth$', '', item.name) for item in list(Path(f'{shared.args.model_dir}/').glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json', '.yaml'))], key=natural_keys) - - -def get_available_presets(): - return sorted(set((k.stem for k in Path('presets').glob('*.yaml'))), key=natural_keys) - - -def get_available_prompts(): - prompts = [] - files = set((k.stem for k in Path('prompts').glob('*.txt'))) - prompts += sorted([k for k in files if re.match('^[0-9]', k)], key=natural_keys, reverse=True) - prompts += sorted([k for k in files if re.match('^[^0-9]', k)], key=natural_keys) - prompts += ['Instruct-' + k for k in get_available_instruction_templates() if k != 'None'] - prompts += ['None'] - return prompts - - -def get_available_characters(): - paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml')) - return ['None'] + sorted(set((k.stem for k in paths if k.stem != "instruction-following")), key=natural_keys) - - -def get_available_instruction_templates(): - path = "characters/instruction-following" - paths = [] - if os.path.exists(path): - paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml')) - - return ['None'] + sorted(set((k.stem for k in paths)), key=natural_keys) - - -def get_available_extensions(): - return sorted(set(map(lambda x: x.parts[1], Path('extensions').glob('*/script.py'))), key=natural_keys) - - -def get_available_loras(): - return sorted([item.name for item in list(Path(shared.args.lora_dir).glob('*')) if not item.name.endswith(('.txt', '-np', '.pt', '.json'))], key=natural_keys) - - -def get_datasets(path: str, ext: str): - return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys) - - -def get_available_chat_styles(): - return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys) diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/Theme/GlobalCSS.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/Theme/GlobalCSS.tsx deleted file mode 100644 index aedab080a7ae8e849f791eb5c2307eb5ba7a06b6..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/Theme/GlobalCSS.tsx +++ /dev/null @@ -1,103 +0,0 @@ -import { css, Global } from "@emotion/react" -import { useTheme } from "../../hooks/useTheme" - -export const GlobalCSS = () => { - const theme = useTheme() - return ( - - ) -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/stores/Router.ts b/spaces/yderre-aubay/midi-player-demo/src/main/stores/Router.ts deleted file mode 100644 index 21a4dd5c813caa7b618a1d18d98a787e54bb7873..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/stores/Router.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { makeObservable, observable } from "mobx" - -export type RoutePath = "/track" | "/arrange" | "/tempo" - -export default class Router { - path: RoutePath = "/track" - - constructor() { - makeObservable(this, { - path: observable, - }) - } - - pushArrange() { - this.path = "/arrange" - } - - pushTrack() { - this.path = `/track` - } -} diff --git a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/utils/utils.py b/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/utils/utils.py deleted file mode 100644 index 69fdc4cdb5d75b907c8b9372a9f2448a9a166730..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/utils/utils.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""Utility functions.""" - -import fnmatch -import logging -import os -import sys -try: - import h5py -except: - pass -import numpy as np - - -def find_files(root_dir, query="*.wav", include_root_dir=True): - """Find files recursively. - - Args: - root_dir (str): Root root_dir to find. - query (str): Query to find. - include_root_dir (bool): If False, root_dir name is not included. - - Returns: - list: List of found filenames. - - """ - files = [] - for root, dirnames, filenames in os.walk(root_dir, followlinks=True): - for filename in fnmatch.filter(filenames, query): - files.append(os.path.join(root, filename)) - if not include_root_dir: - files = [file_.replace(root_dir + "/", "") for file_ in files] - - return files - - -def read_hdf5(hdf5_name, hdf5_path): - """Read hdf5 dataset. - - Args: - hdf5_name (str): Filename of hdf5 file. - hdf5_path (str): Dataset name in hdf5 file. - - Return: - any: Dataset values. - - """ - if not os.path.exists(hdf5_name): - logging.error(f"There is no such a hdf5 file ({hdf5_name}).") - sys.exit(1) - - hdf5_file = h5py.File(hdf5_name, "r") - - if hdf5_path not in hdf5_file: - logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})") - sys.exit(1) - - hdf5_data = hdf5_file[hdf5_path][()] - hdf5_file.close() - - return hdf5_data - - -def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True): - """Write dataset to hdf5. - - Args: - hdf5_name (str): Hdf5 dataset filename. - hdf5_path (str): Dataset path in hdf5. - write_data (ndarray): Data to write. - is_overwrite (bool): Whether to overwrite dataset. - - """ - # convert to numpy array - write_data = np.array(write_data) - - # check folder existence - folder_name, _ = os.path.split(hdf5_name) - if not os.path.exists(folder_name) and len(folder_name) != 0: - os.makedirs(folder_name) - - # check hdf5 existence - if os.path.exists(hdf5_name): - # if already exists, open with r+ mode - hdf5_file = h5py.File(hdf5_name, "r+") - # check dataset existence - if hdf5_path in hdf5_file: - if is_overwrite: - logging.warning("Dataset in hdf5 file already exists. " - "recreate dataset in hdf5.") - hdf5_file.__delitem__(hdf5_path) - else: - logging.error("Dataset in hdf5 file already exists. " - "if you want to overwrite, please set is_overwrite = True.") - hdf5_file.close() - sys.exit(1) - else: - # if not exists, open with w mode - hdf5_file = h5py.File(hdf5_name, "w") - - # write data to hdf5 - hdf5_file.create_dataset(hdf5_path, data=write_data) - hdf5_file.flush() - hdf5_file.close() - - -class HDF5ScpLoader(object): - """Loader class for a fests.scp file of hdf5 file. - - Examples: - key1 /some/path/a.h5:feats - key2 /some/path/b.h5:feats - key3 /some/path/c.h5:feats - key4 /some/path/d.h5:feats - ... - >>> loader = HDF5ScpLoader("hdf5.scp") - >>> array = loader["key1"] - - key1 /some/path/a.h5 - key2 /some/path/b.h5 - key3 /some/path/c.h5 - key4 /some/path/d.h5 - ... - >>> loader = HDF5ScpLoader("hdf5.scp", "feats") - >>> array = loader["key1"] - - """ - - def __init__(self, feats_scp, default_hdf5_path="feats"): - """Initialize HDF5 scp loader. - - Args: - feats_scp (str): Kaldi-style feats.scp file with hdf5 format. - default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used. - - """ - self.default_hdf5_path = default_hdf5_path - with open(feats_scp) as f: - lines = [line.replace("\n", "") for line in f.readlines()] - self.data = {} - for line in lines: - key, value = line.split() - self.data[key] = value - - def get_path(self, key): - """Get hdf5 file path for a given key.""" - return self.data[key] - - def __getitem__(self, key): - """Get ndarray for a given key.""" - p = self.data[key] - if ":" in p: - return read_hdf5(*p.split(":")) - else: - return read_hdf5(p, self.default_hdf5_path) - - def __len__(self): - """Return the length of the scp file.""" - return len(self.data) - - def __iter__(self): - """Return the iterator of the scp file.""" - return iter(self.data) - - def keys(self): - """Return the keys of the scp file.""" - return self.data.keys() diff --git a/spaces/ygangang/VToonify/vtoonify/model/encoder/psp.py b/spaces/ygangang/VToonify/vtoonify/model/encoder/psp.py deleted file mode 100644 index cc08f2b28b3be2985139602e0f0ae56b1303e1a3..0000000000000000000000000000000000000000 --- a/spaces/ygangang/VToonify/vtoonify/model/encoder/psp.py +++ /dev/null @@ -1,125 +0,0 @@ -""" -This file defines the core research contribution -""" -import matplotlib -matplotlib.use('Agg') -import math - -import torch -from torch import nn -from model.encoder.encoders import psp_encoders -from model.stylegan.model import Generator - -def get_keys(d, name): - if 'state_dict' in d: - d = d['state_dict'] - d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name} - return d_filt - - -class pSp(nn.Module): - - def __init__(self, opts): - super(pSp, self).__init__() - self.set_opts(opts) - # compute number of style inputs based on the output resolution - self.opts.n_styles = int(math.log(self.opts.output_size, 2)) * 2 - 2 - # Define architecture - self.encoder = self.set_encoder() - self.decoder = Generator(self.opts.output_size, 512, 8) - self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - # Load weights if needed - self.load_weights() - - def set_encoder(self): - if self.opts.encoder_type == 'GradualStyleEncoder': - encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts) - elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoW': - encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoW(50, 'ir_se', self.opts) - elif self.opts.encoder_type == 'BackboneEncoderUsingLastLayerIntoWPlus': - encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoWPlus(50, 'ir_se', self.opts) - else: - raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type)) - return encoder - - def load_weights(self): - if self.opts.checkpoint_path is not None: - print('Loading pSp from checkpoint: {}'.format(self.opts.checkpoint_path)) - ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu') - self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True) - self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True) - self.__load_latent_avg(ckpt) - else: - pass - '''print('Loading encoders weights from irse50!') - encoder_ckpt = torch.load(model_paths['ir_se50']) - # if input to encoder is not an RGB image, do not load the input layer weights - if self.opts.label_nc != 0: - encoder_ckpt = {k: v for k, v in encoder_ckpt.items() if "input_layer" not in k} - self.encoder.load_state_dict(encoder_ckpt, strict=False) - print('Loading decoder weights from pretrained!') - ckpt = torch.load(self.opts.stylegan_weights) - self.decoder.load_state_dict(ckpt['g_ema'], strict=False) - if self.opts.learn_in_w: - self.__load_latent_avg(ckpt, repeat=1) - else: - self.__load_latent_avg(ckpt, repeat=self.opts.n_styles) - ''' - - def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True, - inject_latent=None, return_latents=False, alpha=None, z_plus_latent=False, return_z_plus_latent=True): - if input_code: - codes = x - else: - codes = self.encoder(x) - #print(codes.shape) - # normalize with respect to the center of an average face - if self.opts.start_from_latent_avg: - if self.opts.learn_in_w: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1) - else: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1) - - - if latent_mask is not None: - for i in latent_mask: - if inject_latent is not None: - if alpha is not None: - codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i] - else: - codes[:, i] = inject_latent[:, i] - else: - codes[:, i] = 0 - - input_is_latent = not input_code - if z_plus_latent: - input_is_latent = False - images, result_latent = self.decoder([codes], - input_is_latent=input_is_latent, - randomize_noise=randomize_noise, - return_latents=return_latents, - z_plus_latent=z_plus_latent) - - if resize: - images = self.face_pool(images) - - if return_latents: - if z_plus_latent and return_z_plus_latent: - return images, codes - if z_plus_latent and not return_z_plus_latent: - return images, result_latent - else: - return images, result_latent - else: - return images - - def set_opts(self, opts): - self.opts = opts - - def __load_latent_avg(self, ckpt, repeat=None): - if 'latent_avg' in ckpt: - self.latent_avg = ckpt['latent_avg'].to(self.opts.device) - if repeat is not None: - self.latent_avg = self.latent_avg.repeat(repeat, 1) - else: - self.latent_avg = None diff --git a/spaces/ygangang/VToonify/vtoonify/model/stylegan/dataset.py b/spaces/ygangang/VToonify/vtoonify/model/stylegan/dataset.py deleted file mode 100644 index 7713ea2f8bc94d202d2dfbe830af3cb96b1e803d..0000000000000000000000000000000000000000 --- a/spaces/ygangang/VToonify/vtoonify/model/stylegan/dataset.py +++ /dev/null @@ -1,40 +0,0 @@ -from io import BytesIO - -import lmdb -from PIL import Image -from torch.utils.data import Dataset - - -class MultiResolutionDataset(Dataset): - def __init__(self, path, transform, resolution=256): - self.env = lmdb.open( - path, - max_readers=32, - readonly=True, - lock=False, - readahead=False, - meminit=False, - ) - - if not self.env: - raise IOError('Cannot open lmdb dataset', path) - - with self.env.begin(write=False) as txn: - self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8')) - - self.resolution = resolution - self.transform = transform - - def __len__(self): - return self.length - - def __getitem__(self, index): - with self.env.begin(write=False) as txn: - key = f'{self.resolution}-{str(index).zfill(5)}'.encode('utf-8') - img_bytes = txn.get(key) - - buffer = BytesIO(img_bytes) - img = Image.open(buffer) - img = self.transform(img) - - return img diff --git a/spaces/ygtxr1997/ReliableSwap_Demo/modules/layers/simswap/pg_modules/blocks.py b/spaces/ygtxr1997/ReliableSwap_Demo/modules/layers/simswap/pg_modules/blocks.py deleted file mode 100644 index 78bd113bac1cd6486ede92b1ae8d5adfb678eb81..0000000000000000000000000000000000000000 --- a/spaces/ygtxr1997/ReliableSwap_Demo/modules/layers/simswap/pg_modules/blocks.py +++ /dev/null @@ -1,325 +0,0 @@ -import functools -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.utils import spectral_norm - - -### single layers - - -def conv2d(*args, **kwargs): - return spectral_norm(nn.Conv2d(*args, **kwargs)) - - -def convTranspose2d(*args, **kwargs): - return spectral_norm(nn.ConvTranspose2d(*args, **kwargs)) - - -def embedding(*args, **kwargs): - return spectral_norm(nn.Embedding(*args, **kwargs)) - - -def linear(*args, **kwargs): - return spectral_norm(nn.Linear(*args, **kwargs)) - - -def NormLayer(c, mode='batch'): - if mode == 'group': - return nn.GroupNorm(c//2, c) - elif mode == 'batch': - return nn.BatchNorm2d(c) - - -### Activations - - -class GLU(nn.Module): - def forward(self, x): - nc = x.size(1) - assert nc % 2 == 0, 'channels dont divide 2!' - nc = int(nc/2) - return x[:, :nc] * torch.sigmoid(x[:, nc:]) - - -class Swish(nn.Module): - def forward(self, feat): - return feat * torch.sigmoid(feat) - - -### Upblocks - - -class InitLayer(nn.Module): - def __init__(self, nz, channel, sz=4): - super().__init__() - - self.init = nn.Sequential( - convTranspose2d(nz, channel*2, sz, 1, 0, bias=False), - NormLayer(channel*2), - GLU(), - ) - - def forward(self, noise): - noise = noise.view(noise.shape[0], -1, 1, 1) - return self.init(noise) - - -def UpBlockSmall(in_planes, out_planes): - block = nn.Sequential( - nn.Upsample(scale_factor=2, mode='nearest'), - conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False), - NormLayer(out_planes*2), GLU()) - return block - - -class UpBlockSmallCond(nn.Module): - def __init__(self, in_planes, out_planes, z_dim): - super().__init__() - self.in_planes = in_planes - self.out_planes = out_planes - self.up = nn.Upsample(scale_factor=2, mode='nearest') - self.conv = conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False) - - which_bn = functools.partial(CCBN, which_linear=linear, input_size=z_dim) - self.bn = which_bn(2*out_planes) - self.act = GLU() - - def forward(self, x, c): - x = self.up(x) - x = self.conv(x) - x = self.bn(x, c) - x = self.act(x) - return x - - -def UpBlockBig(in_planes, out_planes): - block = nn.Sequential( - nn.Upsample(scale_factor=2, mode='nearest'), - conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False), - NoiseInjection(), - NormLayer(out_planes*2), GLU(), - conv2d(out_planes, out_planes*2, 3, 1, 1, bias=False), - NoiseInjection(), - NormLayer(out_planes*2), GLU() - ) - return block - - -class UpBlockBigCond(nn.Module): - def __init__(self, in_planes, out_planes, z_dim): - super().__init__() - self.in_planes = in_planes - self.out_planes = out_planes - self.up = nn.Upsample(scale_factor=2, mode='nearest') - self.conv1 = conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False) - self.conv2 = conv2d(out_planes, out_planes*2, 3, 1, 1, bias=False) - - which_bn = functools.partial(CCBN, which_linear=linear, input_size=z_dim) - self.bn1 = which_bn(2*out_planes) - self.bn2 = which_bn(2*out_planes) - self.act = GLU() - self.noise = NoiseInjection() - - def forward(self, x, c): - # block 1 - x = self.up(x) - x = self.conv1(x) - x = self.noise(x) - x = self.bn1(x, c) - x = self.act(x) - - # block 2 - x = self.conv2(x) - x = self.noise(x) - x = self.bn2(x, c) - x = self.act(x) - - return x - - -class SEBlock(nn.Module): - def __init__(self, ch_in, ch_out): - super().__init__() - self.main = nn.Sequential( - nn.AdaptiveAvgPool2d(4), - conv2d(ch_in, ch_out, 4, 1, 0, bias=False), - Swish(), - conv2d(ch_out, ch_out, 1, 1, 0, bias=False), - nn.Sigmoid(), - ) - - def forward(self, feat_small, feat_big): - return feat_big * self.main(feat_small) - - -### Downblocks - - -class SeparableConv2d(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, bias=False): - super(SeparableConv2d, self).__init__() - self.depthwise = conv2d(in_channels, in_channels, kernel_size=kernel_size, - groups=in_channels, bias=bias, padding=1) - self.pointwise = conv2d(in_channels, out_channels, - kernel_size=1, bias=bias) - - def forward(self, x): - out = self.depthwise(x) - out = self.pointwise(out) - return out - - -class DownBlock(nn.Module): - def __init__(self, in_planes, out_planes, separable=False): - super().__init__() - if not separable: - self.main = nn.Sequential( - conv2d(in_planes, out_planes, 4, 2, 1), - NormLayer(out_planes), - nn.LeakyReLU(0.2, inplace=True), - ) - else: - self.main = nn.Sequential( - SeparableConv2d(in_planes, out_planes, 3), - NormLayer(out_planes), - nn.LeakyReLU(0.2, inplace=True), - nn.AvgPool2d(2, 2), - ) - - def forward(self, feat): - return self.main(feat) - - -class DownBlockPatch(nn.Module): - def __init__(self, in_planes, out_planes, separable=False): - super().__init__() - self.main = nn.Sequential( - DownBlock(in_planes, out_planes, separable), - conv2d(out_planes, out_planes, 1, 1, 0, bias=False), - NormLayer(out_planes), - nn.LeakyReLU(0.2, inplace=True), - ) - - def forward(self, feat): - return self.main(feat) - - -### CSM - - -class ResidualConvUnit(nn.Module): - def __init__(self, cin, activation, bn): - super().__init__() - self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1, bias=True) - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, x): - return self.skip_add.add(self.conv(x), x) - - -class FeatureFusionBlock(nn.Module): - def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, lowest=False): - super().__init__() - - self.deconv = deconv - self.align_corners = align_corners - - self.expand = expand - out_features = features - if self.expand==True: - out_features = features//2 - - self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, *xs): - output = xs[0] - - if len(xs) == 2: - output = self.skip_add.add(output, xs[1]) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=self.align_corners - ) - - output = self.out_conv(output) - - return output - - -### Misc - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - self.weight = nn.Parameter(torch.zeros(1), requires_grad=True) - - def forward(self, feat, noise=None): - if noise is None: - batch, _, height, width = feat.shape - noise = torch.randn(batch, 1, height, width).to(feat.device) - - return feat + self.weight * noise - - -class CCBN(nn.Module): - ''' conditional batchnorm ''' - def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1): - super().__init__() - self.output_size, self.input_size = output_size, input_size - - # Prepare gain and bias layers - self.gain = which_linear(input_size, output_size) - self.bias = which_linear(input_size, output_size) - - # epsilon to avoid dividing by 0 - self.eps = eps - # Momentum - self.momentum = momentum - - self.register_buffer('stored_mean', torch.zeros(output_size)) - self.register_buffer('stored_var', torch.ones(output_size)) - - def forward(self, x, y): - # Calculate class-conditional gains and biases - gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1) - bias = self.bias(y).view(y.size(0), -1, 1, 1) - out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None, - self.training, 0.1, self.eps) - return out * gain + bias - - -class Interpolate(nn.Module): - """Interpolation module.""" - - def __init__(self, size, mode='bilinear', align_corners=False): - """Init. - Args: - scale_factor (float): scaling - mode (str): interpolation mode - """ - super(Interpolate, self).__init__() - - self.interp = nn.functional.interpolate - self.size = size - self.mode = mode - self.align_corners = align_corners - - def forward(self, x): - """Forward pass. - Args: - x (tensor): input - Returns: - tensor: interpolated data - """ - - x = self.interp( - x, - size=self.size, - mode=self.mode, - align_corners=self.align_corners, - ) - - return x diff --git a/spaces/yiguid/ChatGPT/assets/Kelpy-Codos.js b/spaces/yiguid/ChatGPT/assets/Kelpy-Codos.js deleted file mode 100644 index cfbaeedb4f371dfb5fe157db545b364046fca3e1..0000000000000000000000000000000000000000 --- a/spaces/yiguid/ChatGPT/assets/Kelpy-Codos.js +++ /dev/null @@ -1,76 +0,0 @@ -// ==UserScript== -// @name Kelpy Codos -// @namespace https://github.com/Keldos-Li/Kelpy-Codos -// @version 1.0.5 -// @author Keldos; https://keldos.me/ -// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially. -// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22) -// @license GPL-3.0 -// @grant none -// ==/UserScript== - -(function () { - 'use strict'; - - function addCopyButton(pre) { - var code = pre.querySelector('code'); - if (!code) { - return; // 如果没有找到 元素,则不添加按钮 - } - var firstChild = code.firstChild; - if (!firstChild) { - return; // 如果 元素没有子节点,则不添加按钮 - } - var button = document.createElement('button'); - button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本 - button.style.position = 'relative'; - button.style.float = 'right'; - button.style.fontSize = '1em'; // 可选:调整按钮大小 - button.style.background = 'none'; // 可选:去掉背景颜色 - button.style.border = 'none'; // 可选:去掉边框 - button.style.cursor = 'pointer'; // 可选:显示指针样式 - button.addEventListener('click', function () { - var range = document.createRange(); - range.selectNodeContents(code); - range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前 - var selection = window.getSelection(); - selection.removeAllRanges(); - selection.addRange(range); - - try { - var success = document.execCommand('copy'); - if (success) { - button.textContent = '\u2714'; - setTimeout(function () { - button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制” - }, 2000); - } else { - button.textContent = '\u2716'; - } - } catch (e) { - console.error(e); - button.textContent = '\u2716'; - } - - selection.removeAllRanges(); - }); - code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前 - } - - function handleNewElements(mutationsList, observer) { - for (var mutation of mutationsList) { - if (mutation.type === 'childList') { - for (var node of mutation.addedNodes) { - if (node.nodeName === 'PRE') { - addCopyButton(node); - } - } - } - } - } - - var observer = new MutationObserver(handleNewElements); - observer.observe(document.documentElement, { childList: true, subtree: true }); - - document.querySelectorAll('pre').forEach(addCopyButton); -})(); diff --git a/spaces/yiguid/ChatGPT/chat_func.py b/spaces/yiguid/ChatGPT/chat_func.py deleted file mode 100644 index 374178f3d22c5c23d1dc2952336cdc298a77315d..0000000000000000000000000000000000000000 --- a/spaces/yiguid/ChatGPT/chat_func.py +++ /dev/null @@ -1,456 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import os -import requests -import urllib3 - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp - -from presets import * -from llama_func import * -from utils import * - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -def get_response( - openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model -): - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - history = [construct_system(system_prompt), *history] - - payload = { - "model": selected_model, - "messages": history, # [{"role": "user", "content": f"{inputs}"}], - "temperature": temperature, # 1.0, - "top_p": top_p, # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - if stream: - timeout = timeout_streaming - else: - timeout = timeout_all - - # 获取环境变量中的代理设置 - http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy") - https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy") - - # 如果存在代理设置,使用它们 - proxies = {} - if http_proxy: - logging.info(f"Using HTTP proxy: {http_proxy}") - proxies["http"] = http_proxy - if https_proxy: - logging.info(f"Using HTTPS proxy: {https_proxy}") - proxies["https"] = https_proxy - - # 如果有代理,使用代理发送请求,否则使用默认设置发送请求 - if proxies: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - proxies=proxies, - ) - else: - response = requests.post( - API_URL, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - ) - return response - - -def stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - def get_return_value(): - return chatbot, history, status_text, all_token_counts - - logging.info("实时回答模式") - partial_words = "" - counter = 0 - status_text = "开始实时传输回答……" - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - user_token_count = 0 - if len(all_token_counts) == 0: - system_prompt_token_count = count_token(construct_system(system_prompt)) - user_token_count = ( - count_token(construct_user(inputs)) + system_prompt_token_count - ) - else: - user_token_count = count_token(construct_user(inputs)) - all_token_counts.append(user_token_count) - logging.info(f"输入token计数: {user_token_count}") - yield get_return_value() - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - True, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - yield get_return_value() - return - except requests.exceptions.ReadTimeout: - status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt - yield get_return_value() - return - - yield get_return_value() - error_json_str = "" - - for chunk in tqdm(response.iter_lines()): - if counter == 0: - counter += 1 - continue - counter += 1 - # check whether each line is non-empty - if chunk: - chunk = chunk.decode() - chunklength = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - logging.info(chunk) - error_json_str += chunk - status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}" - yield get_return_value() - continue - # decode each line as response data is in bytes - if chunklength > 6 and "delta" in chunk["choices"][0]: - finish_reason = chunk["choices"][0]["finish_reason"] - status_text = construct_token_message( - sum(all_token_counts), stream=True - ) - if finish_reason == "stop": - yield get_return_value() - break - try: - partial_words = ( - partial_words + chunk["choices"][0]["delta"]["content"] - ) - except KeyError: - status_text = ( - standard_error_msg - + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " - + str(sum(all_token_counts)) - ) - yield get_return_value() - break - history[-1] = construct_assistant(partial_words) - chatbot[-1] = (chatbot[-1][0], partial_words+display_append) - all_token_counts[-1] += 1 - yield get_return_value() - - -def predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - logging.info("一次性回答模式") - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - all_token_counts.append(count_token(construct_user(inputs))) - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - False, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - return chatbot, history, status_text, all_token_counts - except requests.exceptions.ProxyError: - status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - except requests.exceptions.SSLError: - status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - history[-1] = construct_assistant(content) - chatbot[-1] = (chatbot[-1][0], content+display_append) - total_token_count = response["usage"]["total_tokens"] - all_token_counts[-1] = total_token_count - sum(all_token_counts) - status_text = construct_token_message(total_token_count) - return chatbot, history, status_text, all_token_counts - - -def predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - use_websearch=False, - files = None, - should_check_token_count=True, -): # repetition_penalty, top_k - logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL) - if files: - msg = "构建索引中……(这可能需要比较久的时间)" - logging.info(msg) - yield chatbot, history, msg, all_token_counts - index = construct_index(openai_api_key, file_src=files) - msg = "索引构建完成,获取回答中……" - yield chatbot, history, msg, all_token_counts - history, chatbot, status_text = chat_ai(openai_api_key, index, inputs, history, chatbot) - yield chatbot, history, status_text, all_token_counts - return - - old_inputs = "" - link_references = [] - if use_websearch: - search_results = ddg(inputs, max_results=5) - old_inputs = inputs - web_results = [] - for idx, result in enumerate(search_results): - logging.info(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}') - link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n") - link_references = "\n\n" + "".join(link_references) - inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", inputs) - .replace("{web_results}", "\n\n".join(web_results)) - ) - else: - link_references = "" - - if len(openai_api_key) != 51: - status_text = standard_error_msg + no_apikey_msg - logging.info(status_text) - chatbot.append((inputs, "")) - if len(history) == 0: - history.append(construct_user(inputs)) - history.append("") - all_token_counts.append(0) - else: - history[-2] = construct_user(inputs) - yield chatbot, history, status_text, all_token_counts - return - - yield chatbot, history, "开始生成回答……", all_token_counts - - if stream: - logging.info("使用流式传输") - iter = stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - for chatbot, history, status_text, all_token_counts in iter: - yield chatbot, history, status_text, all_token_counts - else: - logging.info("不使用流式传输") - chatbot, history, status_text, all_token_counts = predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=link_references - ) - yield chatbot, history, status_text, all_token_counts - - logging.info(f"传输完毕。当前token计数为{all_token_counts}") - if len(history) > 1 and history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if stream: - max_token = max_token_streaming - else: - max_token = max_token_all - - if sum(all_token_counts) > max_token and should_check_token_count: - status_text = f"精简token中{all_token_counts}/{max_token}" - logging.info(status_text) - yield chatbot, history, status_text, all_token_counts - iter = reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - all_token_counts, - top_p, - temperature, - max_token//2, - selected_model=selected_model, - ) - for chatbot, history, status_text, all_token_counts in iter: - status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}" - yield chatbot, history, status_text, all_token_counts - - -def retry( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], -): - logging.info("重试中……") - if len(history) == 0: - yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count - return - history.pop() - inputs = history.pop()["content"] - token_count.pop() - iter = predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - token_count, - top_p, - temperature, - stream=stream, - selected_model=selected_model, - ) - logging.info("重试中……") - for x in iter: - yield x - logging.info("重试完毕") - - -def reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - max_token_count, - selected_model=MODELS[0], -): - logging.info("开始减少token数量……") - iter = predict( - openai_api_key, - system_prompt, - history, - summarize_prompt, - chatbot, - token_count, - top_p, - temperature, - selected_model=selected_model, - should_check_token_count=False, - ) - logging.info(f"chatbot: {chatbot}") - flag = False - for chatbot, history, status_text, previous_token_count in iter: - num_chat = find_n(previous_token_count, max_token_count) - if flag: - chatbot = chatbot[:-1] - flag = True - history = history[-2*num_chat:] if num_chat > 0 else [] - token_count = previous_token_count[-num_chat:] if num_chat > 0 else [] - msg = f"保留了最近{num_chat}轮对话" - yield chatbot, history, msg + "," + construct_token_message( - sum(token_count) if len(token_count) > 0 else 0, - ), token_count - logging.info(msg) - logging.info("减少token数量完毕") \ No newline at end of file diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bart/tokenization_bart.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bart/tokenization_bart.py deleted file mode 100644 index 7dd008c4dbbaf2a1034e9e9830340a8e8055d773..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bart/tokenization_bart.py +++ /dev/null @@ -1,421 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -from functools import lru_cache -from typing import List, Optional, Tuple - -import regex as re - -from ...tokenization_utils import AddedToken, PreTrainedTokenizer -from ...utils import logging - - -logger = logging.get_logger(__name__) - - -VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} - -# See all BART models at https://huggingface.co/models?filter=bart -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", - "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", - "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", - "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", - "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", - "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", - }, - "merges_file": { - "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", - "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", - "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", - "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", - "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", - "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "facebook/bart-base": 1024, - "facebook/bart-large": 1024, - "facebook/bart-large-mnli": 1024, - "facebook/bart-large-cnn": 1024, - "facebook/bart-large-xsum": 1024, - "yjernite/bart_eli5": 1024, -} - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control - characters the bpe code barfs on. - - The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab - if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for - decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup - tables between utf-8 bytes and unicode strings. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """ - Return set of symbol pairs in a word. - - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -class BartTokenizer(PreTrainedTokenizer): - """ - Constructs a BART tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding. - - This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will - be encoded differently whether it is at the beginning of the sentence (without space) or not: - - ```python - >>> from transformers import BartTokenizer - - >>> tokenizer = BartTokenizer.from_pretrained("facebook/bart-base") - >>> tokenizer("Hello world")["input_ids"] - [0, 31414, 232, 2] - - >>> tokenizer(" Hello world")["input_ids"] - [0, 20920, 232, 2] - ``` - - You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you - call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. - - - - When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). - - - - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - merges_file (`str`): - Path to the merges file. - errors (`str`, *optional*, defaults to `"replace"`): - Paradigm to follow when decoding bytes to UTF-8. See - [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. - bos_token (`str`, *optional*, defaults to `""`): - The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. - - - - When building a sequence using special tokens, this is not the token that is used for the beginning of - sequence. The token used is the `cls_token`. - - - - eos_token (`str`, *optional*, defaults to `""`): - The end of sequence token. - - - - When building a sequence using special tokens, this is not the token that is used for the end of sequence. - The token used is the `sep_token`. - - - - sep_token (`str`, *optional*, defaults to `"
    "`): - The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for - sequence classification or for a text and a question for question answering. It is also used as the last - token of a sequence built with special tokens. - cls_token (`str`, *optional*, defaults to `""`): - The classifier token which is used when doing sequence classification (classification of the whole sequence - instead of per-token classification). It is the first token of the sequence when built with special tokens. - unk_token (`str`, *optional*, defaults to `""`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - pad_token (`str`, *optional*, defaults to `""`): - The token used for padding, for example when batching sequences of different lengths. - mask_token (`str`, *optional*, defaults to `""`): - The token used for masking values. This is the token used when training this model with masked language - modeling. This is the token which the model will try to predict. - add_prefix_space (`bool`, *optional*, defaults to `False`): - Whether or not to add an initial space to the input. This allows to treat the leading word just as any - other word. (BART tokenizer detect beginning of words by the preceding space). - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - - def __init__( - self, - vocab_file, - merges_file, - errors="replace", - bos_token="", - eos_token="", - sep_token="", - cls_token="", - unk_token="", - pad_token="", - mask_token="", - add_prefix_space=False, - **kwargs, - ): - bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token - eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token - sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token - cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token - unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token - pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token - - # Mask token behave like a normal word, i.e. include the space before it - # TODO seems like both slow and fast actually don't strip left and right soooooooo yeah. See `test_embeded_special_tokens` - # Also this not only will strip the spaces but any punctuation - mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token - - with open(vocab_file, encoding="utf-8") as vocab_handle: - self.encoder = json.load(vocab_handle) - self.decoder = {v: k for k, v in self.encoder.items()} - self.errors = errors # how to handle errors in decoding - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - with open(merges_file, encoding="utf-8") as merges_handle: - bpe_merges = merges_handle.read().split("\n")[1:-1] - bpe_merges = [tuple(merge.split()) for merge in bpe_merges] - self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) - self.cache = {} - self.add_prefix_space = add_prefix_space - - # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions - self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") - - super().__init__( - errors=errors, - bos_token=bos_token, - eos_token=eos_token, - unk_token=unk_token, - sep_token=sep_token, - cls_token=cls_token, - pad_token=pad_token, - mask_token=mask_token, - add_prefix_space=add_prefix_space, - **kwargs, - ) - - @property - def vocab_size(self): - return len(self.encoder) - - def get_vocab(self): - return dict(self.encoder, **self.added_tokens_encoder) - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token) - pairs = get_pairs(word) - - if not pairs: - return token - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - except ValueError: - new_word.extend(word[i:]) - break - else: - new_word.extend(word[i:j]) - i = j - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def _tokenize(self, text): - """Tokenize a string.""" - bpe_tokens = [] - for token in re.findall(self.pat, text): - token = "".join( - self.byte_encoder[b] for b in token.encode("utf-8") - ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) - bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) - return bpe_tokens - - def _convert_token_to_id(self, token): - """Converts a token (str) in an id using the vocab.""" - return self.encoder.get(token, self.encoder.get(self.unk_token)) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.decoder.get(index) - - def convert_tokens_to_string(self, tokens): - """Converts a sequence of tokens (string) in a single string.""" - text = "".join(tokens) - text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) - return text - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") - return - vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) - merge_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] - ) - - with open(vocab_file, "w", encoding="utf-8") as f: - f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") - - index = 0 - with open(merge_file, "w", encoding="utf-8") as writer: - writer.write("#version: 0.2\n") - for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): - if index != token_index: - logger.warning( - f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." - " Please check that the tokenizer is not corrupted!" - ) - index = token_index - writer.write(" ".join(bpe_tokens) + "\n") - index += 1 - - return vocab_file, merge_file - - def build_inputs_with_special_tokens( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A BART sequence has the following format: - - - single sequence: ` X ` - - pair of sequences: ` A B
    ` - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - if token_ids_1 is None: - return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] - cls = [self.cls_token_id] - sep = [self.sep_token_id] - return cls + token_ids_0 + sep + sep + token_ids_1 + sep - - def get_special_tokens_mask( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False - ) -> List[int]: - """ - Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` method. - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not the token list is already formatted with special tokens for the model. - - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - if already_has_special_tokens: - return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) - - if token_ids_1 is None: - return [1] + ([0] * len(token_ids_0)) + [1] - return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] - - def create_token_type_ids_from_sequences( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: - """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. BART does not - make use of token type ids, therefore a list of zeros is returned. - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of zeros. - """ - sep = [self.sep_token_id] - cls = [self.cls_token_id] - - if token_ids_1 is None: - return len(cls + token_ids_0 + sep) * [0] - return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] - - def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): - add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) - if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): - text = " " + text - return (text, kwargs) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta_v2/tokenization_deberta_v2_fast.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta_v2/tokenization_deberta_v2_fast.py deleted file mode 100644 index dab376ce95be8a27a240549d7cde6219c05acdd7..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta_v2/tokenization_deberta_v2_fast.py +++ /dev/null @@ -1,250 +0,0 @@ -# coding=utf-8 -# Copyright 2020 Microsoft and the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Fast Tokenization class for model DeBERTa.""" - -import os -from shutil import copyfile -from typing import Optional, Tuple - -from ...file_utils import is_sentencepiece_available -from ...tokenization_utils_fast import PreTrainedTokenizerFast -from ...utils import logging - - -if is_sentencepiece_available(): - from .tokenization_deberta_v2 import DebertaV2Tokenizer -else: - DebertaV2Tokenizer = None - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model", - "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model", - "microsoft/deberta-v2-xlarge-mnli": ( - "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model" - ), - "microsoft/deberta-v2-xxlarge-mnli": ( - "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model" - ), - } -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "microsoft/deberta-v2-xlarge": 512, - "microsoft/deberta-v2-xxlarge": 512, - "microsoft/deberta-v2-xlarge-mnli": 512, - "microsoft/deberta-v2-xxlarge-mnli": 512, -} - -PRETRAINED_INIT_CONFIGURATION = { - "microsoft/deberta-v2-xlarge": {"do_lower_case": False}, - "microsoft/deberta-v2-xxlarge": {"do_lower_case": False}, - "microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False}, - "microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False}, -} - - -class DebertaV2TokenizerFast(PreTrainedTokenizerFast): - r""" - Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). - - Args: - vocab_file (`str`): - [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that - contains the vocabulary necessary to instantiate a tokenizer. - do_lower_case (`bool`, *optional*, defaults to `False`): - Whether or not to lowercase the input when tokenizing. - bos_token (`string`, *optional*, defaults to `"[CLS]"`): - The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. - When building a sequence using special tokens, this is not the token that is used for the beginning of - sequence. The token used is the `cls_token`. - eos_token (`string`, *optional*, defaults to `"[SEP]"`): - The end of sequence token. When building a sequence using special tokens, this is not the token that is - used for the end of sequence. The token used is the `sep_token`. - unk_token (`str`, *optional*, defaults to `"[UNK]"`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - sep_token (`str`, *optional*, defaults to `"[SEP]"`): - The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for - sequence classification or for a text and a question for question answering. It is also used as the last - token of a sequence built with special tokens. - pad_token (`str`, *optional*, defaults to `"[PAD]"`): - The token used for padding, for example when batching sequences of different lengths. - cls_token (`str`, *optional*, defaults to `"[CLS]"`): - The classifier token which is used when doing sequence classification (classification of the whole sequence - instead of per-token classification). It is the first token of the sequence when built with special tokens. - mask_token (`str`, *optional*, defaults to `"[MASK]"`): - The token used for masking values. This is the token used when training this model with masked language - modeling. This is the token which the model will try to predict. - sp_model_kwargs (`dict`, *optional*): - Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for - SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, - to set: - - - `enable_sampling`: Enable subword regularization. - - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - - - `nbest_size = {0,1}`: No sampling is performed. - - `nbest_size > 1`: samples from the nbest_size results. - - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) - using forward-filtering-and-backward-sampling algorithm. - - - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for - BPE-dropout. - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - slow_tokenizer_class = DebertaV2Tokenizer - - def __init__( - self, - vocab_file=None, - tokenizer_file=None, - do_lower_case=False, - split_by_punct=False, - bos_token="[CLS]", - eos_token="[SEP]", - unk_token="[UNK]", - sep_token="[SEP]", - pad_token="[PAD]", - cls_token="[CLS]", - mask_token="[MASK]", - **kwargs, - ) -> None: - super().__init__( - vocab_file, - tokenizer_file=tokenizer_file, - do_lower_case=do_lower_case, - bos_token=bos_token, - eos_token=eos_token, - unk_token=unk_token, - sep_token=sep_token, - pad_token=pad_token, - cls_token=cls_token, - mask_token=mask_token, - split_by_punct=split_by_punct, - **kwargs, - ) - - self.do_lower_case = do_lower_case - self.split_by_punct = split_by_punct - self.vocab_file = vocab_file - - @property - def can_save_slow_tokenizer(self) -> bool: - return os.path.isfile(self.vocab_file) if self.vocab_file else False - - def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): - """ - Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and - adding special tokens. A DeBERTa sequence has the following format: - - - single sequence: [CLS] X [SEP] - - pair of sequences: [CLS] A [SEP] B [SEP] - - Args: - token_ids_0 (`List[int]`): - List of IDs to which the special tokens will be added. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. - """ - - if token_ids_1 is None: - return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] - cls = [self.cls_token_id] - sep = [self.sep_token_id] - return cls + token_ids_0 + sep + token_ids_1 + sep - - def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): - """ - Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding - special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - already_has_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not the token list is already formatted with special tokens for the model. - - Returns: - `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. - """ - - if already_has_special_tokens: - return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) - - if token_ids_1 is not None: - return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] - return [1] + ([0] * len(token_ids_0)) + [1] - - def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): - """ - Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa - sequence pair mask has the following format: - - ``` - 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 - | first sequence | second sequence | - ``` - - If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). - - Args: - token_ids_0 (`List[int]`): - List of IDs. - token_ids_1 (`List[int]`, *optional*): - Optional second list of IDs for sequence pairs. - - Returns: - `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). - """ - sep = [self.sep_token_id] - cls = [self.cls_token_id] - if token_ids_1 is None: - return len(cls + token_ids_0 + sep) * [0] - return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - if not self.can_save_slow_tokenizer: - raise ValueError( - "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " - "tokenizer." - ) - - if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") - return - out_vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) - - if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): - copyfile(self.vocab_file, out_vocab_file) - - return (out_vocab_file,) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/open_llama/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/open_llama/__init__.py deleted file mode 100644 index 446c9f076d31347c496300f432908d56895f7e67..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deprecated/open_llama/__init__.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2023 EleutherAI and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ....utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_open_llama": ["OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenLlamaConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_open_llama"] = ["LlamaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_open_llama_fast"] = ["LlamaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_open_llama"] = [ - "OpenLlamaForCausalLM", - "OpenLlamaModel", - "OpenLlamaPreTrainedModel", - "OpenLlamaForSequenceClassification", - ] - - -if TYPE_CHECKING: - from .configuration_open_llama import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenLlamaConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from transformers import LlamaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from transformers import LlamaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_open_llama import ( - OpenLlamaForCausalLM, - OpenLlamaForSequenceClassification, - OpenLlamaModel, - OpenLlamaPreTrainedModel, - ) - - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/onnxexport/model_onnx_speaker_mix.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/onnxexport/model_onnx_speaker_mix.py deleted file mode 100644 index 355e590da30a4651925ffb24938b8c2af558c098..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/onnxexport/model_onnx_speaker_mix.py +++ /dev/null @@ -1,350 +0,0 @@ -import torch -from torch import nn -from torch.nn import functional as F -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import utils -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - kernel_size, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.gin_channels = gin_channels - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_mask, f0=None, z=None): - x = x + self.f0_emb(f0).transpose(1, 2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + z * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class F0Decoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=0): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.spk_channels = spk_channels - - self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1) - self.decoder = attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.f0_prenet = nn.Conv1d(1, hidden_channels, 3, padding=1) - self.cond = nn.Conv1d(spk_channels, hidden_channels, 1) - - def forward(self, x, norm_f0, x_mask, spk_emb=None): - x = torch.detach(x) - if spk_emb is not None: - x = x + self.cond(spk_emb) - x += self.f0_prenet(norm_f0) - x = self.prenet(x) * x_mask - x = self.decoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - sampling_rate=44100, - **kwargs): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2) - - self.enc_p = TextEncoder( - inter_channels, - hidden_channels, - filter_channels=filter_channels, - n_heads=n_heads, - n_layers=n_layers, - kernel_size=kernel_size, - p_dropout=p_dropout - ) - hps = { - "sampling_rate": sampling_rate, - "inter_channels": inter_channels, - "resblock": resblock, - "resblock_kernel_sizes": resblock_kernel_sizes, - "resblock_dilation_sizes": resblock_dilation_sizes, - "upsample_rates": upsample_rates, - "upsample_initial_channel": upsample_initial_channel, - "upsample_kernel_sizes": upsample_kernel_sizes, - "gin_channels": gin_channels, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.f0_decoder = F0Decoder( - 1, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=gin_channels - ) - self.emb_uv = nn.Embedding(2, hidden_channels) - self.predict_f0 = False - self.speaker_map = [] - self.export_mix = False - - def export_chara_mix(self, n_speakers_mix): - self.speaker_map = torch.zeros((n_speakers_mix, 1, 1, self.gin_channels)) - for i in range(n_speakers_mix): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - self.export_mix = True - - def forward(self, c, f0, mel2ph, uv, noise=None, g=None, cluster_infer_ratio=0.1): - decoder_inp = F.pad(c, [0, 0, 1, 0]) - mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, c.shape[-1]]) - c = torch.gather(decoder_inp, 1, mel2ph_).transpose(1, 2) # [B, T, H] - - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - - if self.export_mix: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2) - - if self.predict_f0: - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1) - - z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0) - return o diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/ContentVec256L9.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/ContentVec256L9.py deleted file mode 100644 index b0089c789cd87cfd3b1badb2fc45cb1b88041eab..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/ContentVec256L9.py +++ /dev/null @@ -1,35 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import torch -from fairseq import checkpoint_utils - -class ContentVec256L9(SpeechEncoder): - def __init__(self,vec_path = "pretrain/checkpoint_best_legacy_500.pt",device=None): - print("load model(s) from {}".format(vec_path)) - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [vec_path], - suffix="", - ) - self.hidden_dim = 256 - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.model = models[0].to(self.dev) - self.model.eval() - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.to(wav.device), - "padding_mask": padding_mask.to(wav.device), - "output_layer": 9, # layer 9 - } - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - feats = self.model.final_proj(logits[0]) - return feats.transpose(1, 2) diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/auto_slicer.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/auto_slicer.py deleted file mode 100644 index 090d913455f8153b7f39ee85aba068b3ba28230a..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/auto_slicer.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import numpy as np -import librosa -import soundfile as sf -from modules.slicer2 import Slicer - -class AutoSlicer: - def __init__(self): - self.slicer_params = { - "threshold": -40, - "min_length": 5000, - "min_interval": 300, - "hop_size": 10, - "max_sil_kept": 500, - } - self.original_min_interval = self.slicer_params["min_interval"] - - def auto_slice(self, filename, input_dir, output_dir, max_sec): - audio, sr = librosa.load(os.path.join(input_dir, filename), sr=None, mono=False) - slicer = Slicer(sr=sr, **self.slicer_params) - chunks = slicer.slice(audio) - files_to_delete = [] - for i, chunk in enumerate(chunks): - if len(chunk.shape) > 1: - chunk = chunk.T - output_filename = f"{os.path.splitext(filename)[0]}_{i}" - output_filename = "".join(c for c in output_filename if c.isascii() or c == "_") + ".wav" - output_filepath = os.path.join(output_dir, output_filename) - sf.write(output_filepath, chunk, sr) - #Check and re-slice audio that more than max_sec. - while True: - new_audio, sr = librosa.load(output_filepath, sr=None, mono=False) - if librosa.get_duration(y=new_audio, sr=sr) <= max_sec: - break - self.slicer_params["min_interval"] = self.slicer_params["min_interval"] // 2 - if self.slicer_params["min_interval"] >= self.slicer_params["hop_size"]: - new_chunks = Slicer(sr=sr, **self.slicer_params).slice(new_audio) - for j, new_chunk in enumerate(new_chunks): - if len(new_chunk.shape) > 1: - new_chunk = new_chunk.T - new_output_filename = f"{os.path.splitext(output_filename)[0]}_{j}.wav" - sf.write(os.path.join(output_dir, new_output_filename), new_chunk, sr) - files_to_delete.append(output_filepath) - else: - break - self.slicer_params["min_interval"] = self.original_min_interval - for file_path in files_to_delete: - if os.path.exists(file_path): - os.remove(file_path) - - def merge_short(self, output_dir, max_sec, min_sec): - short_files = [] - for filename in os.listdir(output_dir): - filepath = os.path.join(output_dir, filename) - if filename.endswith(".wav"): - audio, sr = librosa.load(filepath, sr=None, mono=False) - duration = librosa.get_duration(y=audio, sr=sr) - if duration < min_sec: - short_files.append((filepath, audio, duration)) - short_files.sort(key=lambda x: x[2], reverse=True) - merged_audio = [] - current_duration = 0 - for filepath, audio, duration in short_files: - if current_duration + duration <= max_sec: - merged_audio.append(audio) - current_duration += duration - os.remove(filepath) - else: - if merged_audio: - output_audio = np.concatenate(merged_audio, axis=-1) - if len(output_audio.shape) > 1: - output_audio = output_audio.T - output_filename = f"merged_{len(os.listdir(output_dir))}.wav" - sf.write(os.path.join(output_dir, output_filename), output_audio, sr) - merged_audio = [audio] - current_duration = duration - os.remove(filepath) - if merged_audio and current_duration >= min_sec: - output_audio = np.concatenate(merged_audio, axis=-1) - if len(output_audio.shape) > 1: - output_audio = output_audio.T - output_filename = f"merged_{len(os.listdir(output_dir))}.wav" - sf.write(os.path.join(output_dir, output_filename), output_audio, sr) - - def slice_count(self, input_dir, output_dir): - orig_duration = final_duration = 0 - for file in os.listdir(input_dir): - if file.endswith(".wav"): - _audio, _sr = librosa.load(os.path.join(input_dir, file), sr=None, mono=False) - orig_duration += librosa.get_duration(y=_audio, sr=_sr) - wav_files = [file for file in os.listdir(output_dir) if file.endswith(".wav")] - num_files = len(wav_files) - max_duration = -1 - min_duration = float("inf") - for file in wav_files: - file_path = os.path.join(output_dir, file) - audio, sr = librosa.load(file_path, sr=None, mono=False) - duration = librosa.get_duration(y=audio, sr=sr) - final_duration += float(duration) - if duration > max_duration: - max_duration = float(duration) - if duration < min_duration: - min_duration = float(duration) - return num_files, max_duration, min_duration, orig_duration, final_duration - - diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py deleted file mode 100644 index 7c7890f8bec5db44098fe1a38d26eb13231f7063..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/utils/logger.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import atexit -import functools -import logging -import os -import sys -import time -from collections import Counter -import torch -from tabulate import tabulate -from termcolor import colored - -from detectron2.utils.file_io import PathManager - -__all__ = ["setup_logger", "log_first_n", "log_every_n", "log_every_n_seconds"] - - -class _ColorfulFormatter(logging.Formatter): - def __init__(self, *args, **kwargs): - self._root_name = kwargs.pop("root_name") + "." - self._abbrev_name = kwargs.pop("abbrev_name", "") - if len(self._abbrev_name): - self._abbrev_name = self._abbrev_name + "." - super(_ColorfulFormatter, self).__init__(*args, **kwargs) - - def formatMessage(self, record): - record.name = record.name.replace(self._root_name, self._abbrev_name) - log = super(_ColorfulFormatter, self).formatMessage(record) - if record.levelno == logging.WARNING: - prefix = colored("WARNING", "red", attrs=["blink"]) - elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: - prefix = colored("ERROR", "red", attrs=["blink", "underline"]) - else: - return log - return prefix + " " + log - - -@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers -def setup_logger( - output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None -): - """ - Initialize the detectron2 logger and set its verbosity level to "DEBUG". - - Args: - output (str): a file name or a directory to save log. If None, will not save log file. - If ends with ".txt" or ".log", assumed to be a file name. - Otherwise, logs will be saved to `output/log.txt`. - name (str): the root module name of this logger - abbrev_name (str): an abbreviation of the module, to avoid long names in logs. - Set to "" to not log the root module in logs. - By default, will abbreviate "detectron2" to "d2" and leave other - modules unchanged. - - Returns: - logging.Logger: a logger - """ - logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - logger.propagate = False - - if abbrev_name is None: - abbrev_name = "d2" if name == "detectron2" else name - - plain_formatter = logging.Formatter( - "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" - ) - # stdout logging: master only - if distributed_rank == 0: - ch = logging.StreamHandler(stream=sys.stdout) - ch.setLevel(logging.DEBUG) - if color: - formatter = _ColorfulFormatter( - colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", - datefmt="%m/%d %H:%M:%S", - root_name=name, - abbrev_name=str(abbrev_name), - ) - else: - formatter = plain_formatter - ch.setFormatter(formatter) - logger.addHandler(ch) - - # file logging: all workers - if output is not None: - if output.endswith(".txt") or output.endswith(".log"): - filename = output - else: - filename = os.path.join(output, "log.txt") - if distributed_rank > 0: - filename = filename + ".rank{}".format(distributed_rank) - PathManager.mkdirs(os.path.dirname(filename)) - - fh = logging.StreamHandler(_cached_log_stream(filename)) - fh.setLevel(logging.DEBUG) - fh.setFormatter(plain_formatter) - logger.addHandler(fh) - - return logger - - -# cache the opened file object, so that different calls to `setup_logger` -# with the same file name can safely write to the same file. -@functools.lru_cache(maxsize=None) -def _cached_log_stream(filename): - # use 1K buffer if writing to cloud storage - io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1) - atexit.register(io.close) - return io - - -""" -Below are some other convenient logging methods. -They are mainly adopted from -https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py -""" - - -def _find_caller(): - """ - Returns: - str: module name of the caller - tuple: a hashable key to be used to identify different callers - """ - frame = sys._getframe(2) - while frame: - code = frame.f_code - if os.path.join("utils", "logger.") not in code.co_filename: - mod_name = frame.f_globals["__name__"] - if mod_name == "__main__": - mod_name = "detectron2" - return mod_name, (code.co_filename, frame.f_lineno, code.co_name) - frame = frame.f_back - - -_LOG_COUNTER = Counter() -_LOG_TIMER = {} - - -def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): - """ - Log only for the first n times. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - key (str or tuple[str]): the string(s) can be one of "caller" or - "message", which defines how to identify duplicated logs. - For example, if called with `n=1, key="caller"`, this function - will only log the first call from the same caller, regardless of - the message content. - If called with `n=1, key="message"`, this function will log the - same content only once, even if they are called from different places. - If called with `n=1, key=("caller", "message")`, this function - will not log only if the same caller has logged the same message before. - """ - if isinstance(key, str): - key = (key,) - assert len(key) > 0 - - caller_module, caller_key = _find_caller() - hash_key = () - if "caller" in key: - hash_key = hash_key + caller_key - if "message" in key: - hash_key = hash_key + (msg,) - - _LOG_COUNTER[hash_key] += 1 - if _LOG_COUNTER[hash_key] <= n: - logging.getLogger(name or caller_module).log(lvl, msg) - - -def log_every_n(lvl, msg, n=1, *, name=None): - """ - Log once per n times. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - """ - caller_module, key = _find_caller() - _LOG_COUNTER[key] += 1 - if n == 1 or _LOG_COUNTER[key] % n == 1: - logging.getLogger(name or caller_module).log(lvl, msg) - - -def log_every_n_seconds(lvl, msg, n=1, *, name=None): - """ - Log no more than once per n seconds. - - Args: - lvl (int): the logging level - msg (str): - n (int): - name (str): name of the logger to use. Will use the caller's module by default. - """ - caller_module, key = _find_caller() - last_logged = _LOG_TIMER.get(key, None) - current_time = time.time() - if last_logged is None or current_time - last_logged >= n: - logging.getLogger(name or caller_module).log(lvl, msg) - _LOG_TIMER[key] = current_time - - -def create_small_table(small_dict): - """ - Create a small table using the keys of small_dict as headers. This is only - suitable for small dictionaries. - - Args: - small_dict (dict): a result dictionary of only a few items. - - Returns: - str: the table as a string. - """ - keys, values = tuple(zip(*small_dict.items())) - table = tabulate( - [values], - headers=keys, - tablefmt="pipe", - floatfmt=".3f", - stralign="center", - numalign="center", - ) - return table - - -def _log_api_usage(identifier: str): - """ - Internal function used to log the usage of different detectron2 components - inside facebook's infra. - """ - torch._C._log_api_usage_once("detectron2." + identifier) diff --git a/spaces/zxc314/vits-uma-genshin-honkai/transforms.py b/spaces/zxc314/vits-uma-genshin-honkai/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/zxc314/vits-uma-genshin-honkai/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet