repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
pchunduri6/rag-demystified
complex_qa.py
[ { "identifier": "generate_subquestions", "path": "subquestion_generator.py", "snippet": "def generate_subquestions(\n question,\n file_names: List[str] = None,\n system_prompt=DEFAULT_SUBQUESTION_GENERATOR_PROMPT,\n user_task=DEFAULT_USER_TASK,\n llm_model=\"gpt-4-0613\",\n):\n \"\"\"Generates a list of subquestions from a user question along with the\n file name and the function to use to answer the question using OpenAI LLM.\n \"\"\"\n FilenameEnum = Enum(\"FilenameEnum\", {x.upper(): x for x in file_names})\n FilenameEnum.__doc__ = f\"The names of the file to use to answer the corresponding subquestion - e.g. {file_names[0]}\"\n\n # Create pydantic class dynamically\n QuestionBundle = create_model(\n \"QuestionBundle\",\n question=(\n str,\n Field(\n None, description=\"The subquestion extracted from the user's question\"\n ),\n ),\n function=(FunctionEnum, Field(None)),\n file_name=(FilenameEnum, Field(None)),\n )\n\n SubQuestionBundleList = create_model(\n \"SubQuestionBundleList\",\n subquestion_bundle_list=(\n List[QuestionBundle],\n Field(\n None,\n description=\"A list of subquestions - each item in the list contains a question, a function, and a file name\",\n ),\n ),\n __base__=OpenAISchema,\n )\n\n user_prompt = f\"{user_task}\\n Here is the user question: {question}\"\n\n few_shot_examples = [\n {\n \"role\": \"user\",\n \"content\": \"Compare the population of Atlanta and Toronto?\",\n },\n {\n \"role\": \"function\",\n \"name\": \"SubQuestionBundleList\",\n \"content\": \"\"\"\n {\n \"subquestion_bundle_list\": [\n {\n \"question\": \"What is the population of Atlanta?\",\n \"function\": \"vector_retrieval\",\n \"file_name\": \"Atlanta\"\n },\n {\n \"question\": \"What is the population of Toronto?\"\n \"function\": \"vector_retrieval\",\n \"file_name\": \"Toronto\"\n }\n ]\n }\"\"\",\n },\n {\n \"role\": \"user\",\n \"content\": \"Summarize the history of Chicago and Houston.\",\n },\n {\n \"role\": \"function\",\n \"name\": \"SubQuestionBundleList\",\n \"content\": \"\"\"\n {\n \"subquestion_bundle_list\": [\n {\n \"question\": \"What is the history of Chicago?\",\n \"function\": \"llm_retrieval\",\n \"file_name\": \"Chicago\"\n },\n {\n \"question\": \"What is the history of Houston?\",\n \"function\": \"llm_retrieval\",\n \"file_name\": \"Houston\"\n }\n ]\n }\"\"\",\n },\n ]\n\n response, cost = llm_call(\n model=llm_model,\n function_schema=[SubQuestionBundleList.openai_schema],\n output_schema={\"name\": SubQuestionBundleList.openai_schema[\"name\"]},\n system_prompt=system_prompt,\n user_prompt=user_prompt,\n few_shot_examples=few_shot_examples,\n )\n\n subquestions_list = json.loads(response.choices[0].message.function_call.arguments)\n\n subquestions_pydantic_obj = SubQuestionBundleList(**subquestions_list)\n subquestions_list = subquestions_pydantic_obj.subquestion_bundle_list\n return subquestions_list, cost" }, { "identifier": "llm_call", "path": "openai_utils.py", "snippet": "def llm_call(\n model,\n function_schema=None,\n output_schema=None,\n system_prompt=\"You are an AI assistant that answers user questions using the context provided.\",\n user_prompt=\"Please help me answer the following question:\",\n few_shot_examples=None,\n):\n kwargs = {}\n if function_schema is not None:\n kwargs[\"functions\"] = function_schema\n if output_schema is not None:\n kwargs[\"function_call\"] = output_schema\n\n messages = []\n if system_prompt is not None:\n messages.append({\"role\": \"system\", \"content\": system_prompt})\n if few_shot_examples is not None:\n messages.extend(few_shot_examples)\n if user_prompt is not None:\n messages.append({\"role\": \"user\", \"content\": user_prompt})\n\n response = completion_with_backoff(\n model=model,\n temperature=0,\n messages=messages,\n **kwargs\n )\n\n # print cost of call\n call_cost = llm_call_cost(response)\n print(f\"🤑 LLM call cost: ${call_cost:.4f}\")\n return response, call_cost" } ]
import os import requests import warnings import evadb from dotenv import load_dotenv from pathlib import Path from subquestion_generator import generate_subquestions from openai_utils import llm_call
2,410
""" res_batch = cursor.query( f"""SELECT data FROM {doc_name}_features ORDER BY Similarity(SentenceFeatureExtractor('{question}'),features) LIMIT 3;""" ).df() context_list = [] for i in range(len(res_batch)): context_list.append(res_batch["data"][i]) context = "\n".join(context_list) user_prompt = f"""You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. Question: {question} Context: {context} Answer:""" response, cost = llm_call(model=llm_model, user_prompt=user_prompt) answer = response.choices[0].message.content return answer, cost def summary_retrieval(llm_model, question, doc): """Returns the answer to a summarization question over the document using summary retrieval. """ # context_length = OPENAI_MODEL_CONTEXT_LENGTH[llm_model] # total_tokens = get_num_tokens_simple(llm_model, wiki_docs[doc]) user_prompt = f"""Here is some context: {doc} Use only the provided context to answer the question. Here is the question: {question}""" response, cost = llm_call(model=llm_model, user_prompt=user_prompt) answer = response.choices[0].message.content return answer, cost # load max of context_length tokens from the document def response_aggregator(llm_model, question, responses): """Aggregates the responses from the subquestions to generate the final response. """ print("-------> ⭐ Aggregating responses...") system_prompt = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.""" context = "" for i, response in enumerate(responses): context += f"\n{response}" user_prompt = f"""Question: {question} Context: {context} Answer:""" response, cost = llm_call(model=llm_model, system_prompt=system_prompt, user_prompt=user_prompt) answer = response.choices[0].message.content return answer, cost def load_wiki_pages(page_titles=["Toronto", "Chicago", "Houston", "Boston", "Atlanta"]): # Download all wiki documents for title in page_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", # 'exintro': True, "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) # Load all wiki documents city_docs = {} for wiki_title in page_titles: input_text = open(f"data/{wiki_title}.txt", "r").read() city_docs[wiki_title] = input_text[:10000] return city_docs if __name__ == "__main__": # establish evadb api cursor print("⏳ Connect to EvaDB...") cursor = evadb.connect().cursor() print("✅ Connected to EvaDB...") doc_names = ["Toronto", "Chicago", "Houston", "Boston", "Atlanta"] wiki_docs = load_wiki_pages(page_titles=doc_names) question = "Which city has the highest population?" user_task = """We have a database of wikipedia articles about several cities. We are building an application to answer questions about the cities.""" vector_stores = generate_vector_stores(cursor, wiki_docs) llm_model = "gpt-35-turbo" total_cost = 0 while True: question_cost = 0 # Get question from user question = str(input("Question (enter 'exit' to exit): ")) if question.lower() == "exit": break print("🧠 Generating subquestions...")
warnings.filterwarnings("ignore") if not load_dotenv(): print( "Could not load .env file or it is empty. Please check if it exists and is readable." ) exit(1) def generate_vector_stores(cursor, docs): """Generate a vector store for the docs using evadb. """ for doc in docs: print(f"Creating vector store for {doc}...") cursor.query(f"DROP TABLE IF EXISTS {doc};").df() cursor.query(f"LOAD DOCUMENT 'data/{doc}.txt' INTO {doc};").df() evadb_path = os.path.dirname(evadb.__file__) cursor.query( f"""CREATE FUNCTION IF NOT EXISTS SentenceFeatureExtractor IMPL '{evadb_path}/functions/sentence_feature_extractor.py'; """).df() cursor.query( f"""CREATE TABLE IF NOT EXISTS {doc}_features AS SELECT SentenceFeatureExtractor(data), data FROM {doc};""" ).df() cursor.query( f"CREATE INDEX IF NOT EXISTS {doc}_index ON {doc}_features (features) USING FAISS;" ).df() print(f"Successfully created vector store for {doc}.") def vector_retrieval(cursor, llm_model, question, doc_name): """Returns the answer to a factoid question using vector retrieval. """ res_batch = cursor.query( f"""SELECT data FROM {doc_name}_features ORDER BY Similarity(SentenceFeatureExtractor('{question}'),features) LIMIT 3;""" ).df() context_list = [] for i in range(len(res_batch)): context_list.append(res_batch["data"][i]) context = "\n".join(context_list) user_prompt = f"""You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. Question: {question} Context: {context} Answer:""" response, cost = llm_call(model=llm_model, user_prompt=user_prompt) answer = response.choices[0].message.content return answer, cost def summary_retrieval(llm_model, question, doc): """Returns the answer to a summarization question over the document using summary retrieval. """ # context_length = OPENAI_MODEL_CONTEXT_LENGTH[llm_model] # total_tokens = get_num_tokens_simple(llm_model, wiki_docs[doc]) user_prompt = f"""Here is some context: {doc} Use only the provided context to answer the question. Here is the question: {question}""" response, cost = llm_call(model=llm_model, user_prompt=user_prompt) answer = response.choices[0].message.content return answer, cost # load max of context_length tokens from the document def response_aggregator(llm_model, question, responses): """Aggregates the responses from the subquestions to generate the final response. """ print("-------> ⭐ Aggregating responses...") system_prompt = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.""" context = "" for i, response in enumerate(responses): context += f"\n{response}" user_prompt = f"""Question: {question} Context: {context} Answer:""" response, cost = llm_call(model=llm_model, system_prompt=system_prompt, user_prompt=user_prompt) answer = response.choices[0].message.content return answer, cost def load_wiki_pages(page_titles=["Toronto", "Chicago", "Houston", "Boston", "Atlanta"]): # Download all wiki documents for title in page_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", # 'exintro': True, "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) # Load all wiki documents city_docs = {} for wiki_title in page_titles: input_text = open(f"data/{wiki_title}.txt", "r").read() city_docs[wiki_title] = input_text[:10000] return city_docs if __name__ == "__main__": # establish evadb api cursor print("⏳ Connect to EvaDB...") cursor = evadb.connect().cursor() print("✅ Connected to EvaDB...") doc_names = ["Toronto", "Chicago", "Houston", "Boston", "Atlanta"] wiki_docs = load_wiki_pages(page_titles=doc_names) question = "Which city has the highest population?" user_task = """We have a database of wikipedia articles about several cities. We are building an application to answer questions about the cities.""" vector_stores = generate_vector_stores(cursor, wiki_docs) llm_model = "gpt-35-turbo" total_cost = 0 while True: question_cost = 0 # Get question from user question = str(input("Question (enter 'exit' to exit): ")) if question.lower() == "exit": break print("🧠 Generating subquestions...")
subquestions_bundle_list, cost = generate_subquestions(question=question,
0
2023-10-18 16:32:51+00:00
4k
predibase/lorax
server/lorax_server/utils/sources/hub.py
[ { "identifier": "BaseModelSource", "path": "server/lorax_server/utils/sources/source.py", "snippet": "class BaseModelSource:\n def remote_weight_files(self, extension: str = None):\n raise NotImplementedError\n\n def weight_files(self, extension: str = None):\n raise NotImplementedError\n \n def download_weights(self, filenames: List[str]):\n raise NotImplementedError\n \n def download_model_assets(self):\n \"\"\" The reason we need this function is that for s3 \n we need to download all the model files whereas for \n hub we only need to download the weight files. And maybe \n for other future sources we might need something different. \n So this function will take the necessary steps to download\n the needed files for any source \"\"\"\n raise NotImplementedError" }, { "identifier": "try_to_load_from_cache", "path": "server/lorax_server/utils/sources/source.py", "snippet": "def try_to_load_from_cache(\n repo_cache: Path, revision: Optional[str], filename: str\n) -> Optional[Path]:\n \"\"\"Try to load a file from the Hugging Face cache\"\"\"\n if revision is None:\n revision = \"main\"\n\n if not repo_cache.is_dir():\n # No cache for this model\n return None\n\n refs_dir = repo_cache / \"refs\"\n snapshots_dir = repo_cache / \"snapshots\"\n\n # Resolve refs (for instance to convert main to the associated commit sha)\n if refs_dir.is_dir():\n revision_file = refs_dir / revision\n if revision_file.exists():\n with revision_file.open() as f:\n revision = f.read()\n\n # Check if revision folder exists\n if not snapshots_dir.exists():\n return None\n cached_shas = os.listdir(snapshots_dir)\n if revision and revision not in cached_shas:\n # No cache for this revision and we won't try to return a random revision\n return None\n\n # Check if file exists in cache\n cached_file = snapshots_dir / revision / filename\n return cached_file if cached_file.is_file() else None" } ]
import time import os from datetime import timedelta from loguru import logger from pathlib import Path from typing import Optional, List from huggingface_hub import HfApi, hf_hub_download from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE from huggingface_hub.utils import ( LocalEntryNotFoundError, EntryNotFoundError, RevisionNotFoundError, # Import here to ease try/except in other part of the lib ) from .source import BaseModelSource, try_to_load_from_cache
1,689
and "arguments" not in s.rfilename and "args" not in s.rfilename and "training" not in s.rfilename ] if not filenames: raise EntryNotFoundError( f"No {extension} weights found for model {model_id} and revision {revision}.", None, ) return filenames def weight_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[Path]: """Get the local files""" # Local model if Path(model_id).exists() and Path(model_id).is_dir(): local_files = list(Path(model_id).glob(f"*{extension}")) if not local_files: raise FileNotFoundError( f"No local weights found in {model_id} with extension {extension}" ) return local_files try: filenames = weight_hub_files(model_id, revision, extension) except EntryNotFoundError as e: if extension != ".safetensors": raise e # Try to see if there are pytorch weights pt_filenames = weight_hub_files(model_id, revision, extension=".bin") # Change pytorch extension to safetensors extension # It is possible that we have safetensors weights locally even though they are not on the # hub if we converted weights locally without pushing them filenames = [ f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames ] if WEIGHTS_CACHE_OVERRIDE is not None: files = [] for filename in filenames: p = Path(WEIGHTS_CACHE_OVERRIDE) / filename if not p.exists(): raise FileNotFoundError( f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}." ) files.append(p) return files repo_cache = get_hub_model_local_dir(model_id) files = [] for filename in filenames: cache_file = try_to_load_from_cache( repo_cache, revision=revision, filename=filename ) if cache_file is None: raise LocalEntryNotFoundError( f"File {filename} of model {model_id} not found in " f"{os.getenv('HUGGINGFACE_HUB_CACHE', 'the local cache')}. " f"Please run `lorax-server download-weights {model_id}` first." ) files.append(cache_file) return files def download_weights( filenames: List[str], model_id: str, revision: Optional[str] = None ) -> List[Path]: """Download the safetensors files from the hub""" def download_file(filename, tries=5, backoff: int = 5): repo_cache = get_hub_model_local_dir(model_id) local_file = try_to_load_from_cache(repo_cache, revision, filename) if local_file is not None: logger.info(f"File {filename} already present in cache.") return Path(local_file) for i in range(tries): try: logger.info(f"Download file: {filename}") start_time = time.time() local_file = hf_hub_download( filename=filename, repo_id=model_id, revision=revision, local_files_only=False, ) logger.info( f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - start_time))}." ) return Path(local_file) except Exception as e: if i + 1 == tries: raise e logger.error(e) logger.info(f"Retrying in {backoff} seconds") time.sleep(backoff) logger.info(f"Retry {i + 1}/{tries - 1}") # We do this instead of using tqdm because we want to parse the logs with the launcher start_time = time.time() files = [] for i, filename in enumerate(filenames): file = download_file(filename) elapsed = timedelta(seconds=int(time.time() - start_time)) remaining = len(filenames) - (i + 1) eta = (elapsed / (i + 1)) * remaining if remaining > 0 else 0 logger.info(f"Download: [{i + 1}/{len(filenames)}] -- ETA: {eta}") files.append(file) return files
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None) def get_hub_model_local_dir(model_id: str) -> Path: object_id = model_id.replace("/", "--") repo_cache = Path(HUGGINGFACE_HUB_CACHE) / f"models--{object_id}" return repo_cache def weight_hub_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[str]: """Get the weights filenames on the hub""" api = HfApi() info = api.model_info(model_id, revision=revision) filenames = [ s.rfilename for s in info.siblings if s.rfilename.endswith(extension) and len(s.rfilename.split("/")) == 1 and "arguments" not in s.rfilename and "args" not in s.rfilename and "training" not in s.rfilename ] if not filenames: raise EntryNotFoundError( f"No {extension} weights found for model {model_id} and revision {revision}.", None, ) return filenames def weight_files( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors" ) -> List[Path]: """Get the local files""" # Local model if Path(model_id).exists() and Path(model_id).is_dir(): local_files = list(Path(model_id).glob(f"*{extension}")) if not local_files: raise FileNotFoundError( f"No local weights found in {model_id} with extension {extension}" ) return local_files try: filenames = weight_hub_files(model_id, revision, extension) except EntryNotFoundError as e: if extension != ".safetensors": raise e # Try to see if there are pytorch weights pt_filenames = weight_hub_files(model_id, revision, extension=".bin") # Change pytorch extension to safetensors extension # It is possible that we have safetensors weights locally even though they are not on the # hub if we converted weights locally without pushing them filenames = [ f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames ] if WEIGHTS_CACHE_OVERRIDE is not None: files = [] for filename in filenames: p = Path(WEIGHTS_CACHE_OVERRIDE) / filename if not p.exists(): raise FileNotFoundError( f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}." ) files.append(p) return files repo_cache = get_hub_model_local_dir(model_id) files = [] for filename in filenames: cache_file = try_to_load_from_cache( repo_cache, revision=revision, filename=filename ) if cache_file is None: raise LocalEntryNotFoundError( f"File {filename} of model {model_id} not found in " f"{os.getenv('HUGGINGFACE_HUB_CACHE', 'the local cache')}. " f"Please run `lorax-server download-weights {model_id}` first." ) files.append(cache_file) return files def download_weights( filenames: List[str], model_id: str, revision: Optional[str] = None ) -> List[Path]: """Download the safetensors files from the hub""" def download_file(filename, tries=5, backoff: int = 5): repo_cache = get_hub_model_local_dir(model_id) local_file = try_to_load_from_cache(repo_cache, revision, filename) if local_file is not None: logger.info(f"File {filename} already present in cache.") return Path(local_file) for i in range(tries): try: logger.info(f"Download file: {filename}") start_time = time.time() local_file = hf_hub_download( filename=filename, repo_id=model_id, revision=revision, local_files_only=False, ) logger.info( f"Downloaded {local_file} in {timedelta(seconds=int(time.time() - start_time))}." ) return Path(local_file) except Exception as e: if i + 1 == tries: raise e logger.error(e) logger.info(f"Retrying in {backoff} seconds") time.sleep(backoff) logger.info(f"Retry {i + 1}/{tries - 1}") # We do this instead of using tqdm because we want to parse the logs with the launcher start_time = time.time() files = [] for i, filename in enumerate(filenames): file = download_file(filename) elapsed = timedelta(seconds=int(time.time() - start_time)) remaining = len(filenames) - (i + 1) eta = (elapsed / (i + 1)) * remaining if remaining > 0 else 0 logger.info(f"Download: [{i + 1}/{len(filenames)}] -- ETA: {eta}") files.append(file) return files
class HubModelSource(BaseModelSource):
0
2023-10-20 18:19:49+00:00
4k
codefuse-ai/Test-Agent
chat/server/gradio_web_server.py
[ { "identifier": "SeparatorStyle", "path": "chat/conversation.py", "snippet": "class SeparatorStyle(IntEnum):\n \"\"\"Separator styles.\"\"\"\n\n ADD_COLON_SINGLE = auto()\n ADD_COLON_TWO = auto()\n ADD_COLON_SPACE_SINGLE = auto()\n NO_COLON_SINGLE = auto()\n NO_COLON_TWO = auto()\n ADD_NEW_LINE_SINGLE = auto()\n LLAMA2 = auto()\n LLAMA2_TESTGPT = auto()\n CHATGLM = auto()\n CHATML = auto()\n CHATINTERN = auto()\n DOLLY = auto()\n RWKV = auto()\n PHOENIX = auto()\n ROBIN = auto()" }, { "identifier": "LOGDIR", "path": "chat/constants.py", "snippet": "LOGDIR = os.getenv(\"LOGDIR\", \".\")" }, { "identifier": "WORKER_API_TIMEOUT", "path": "chat/constants.py", "snippet": "WORKER_API_TIMEOUT = int(os.getenv(\"FASTCHAT_WORKER_API_TIMEOUT\", 100))" }, { "identifier": "ErrorCode", "path": "chat/constants.py", "snippet": "class ErrorCode(IntEnum):\n \"\"\"\n https://platform.openai.com/docs/guides/error-codes/api-errors\n \"\"\"\n\n VALIDATION_TYPE_ERROR = 40001\n\n INVALID_AUTH_KEY = 40101\n INCORRECT_AUTH_KEY = 40102\n NO_PERMISSION = 40103\n\n INVALID_MODEL = 40301\n PARAM_OUT_OF_RANGE = 40302\n CONTEXT_OVERFLOW = 40303\n\n RATE_LIMIT = 42901\n QUOTA_EXCEEDED = 42902\n ENGINE_OVERLOADED = 42903\n\n INTERNAL_ERROR = 50001\n CUDA_OUT_OF_MEMORY = 50002\n GRADIO_REQUEST_ERROR = 50003\n GRADIO_STREAM_UNKNOWN_ERROR = 50004\n CONTROLLER_NO_WORKER = 50005\n CONTROLLER_WORKER_TIMEOUT = 50006" }, { "identifier": "MODERATION_MSG", "path": "chat/constants.py", "snippet": "MODERATION_MSG = \"YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE FIX YOUR INPUT AND TRY AGAIN.\"" }, { "identifier": "CONVERSATION_LIMIT_MSG", "path": "chat/constants.py", "snippet": "CONVERSATION_LIMIT_MSG = \"YOU HAVE REACHED THE CONVERSATION LENGTH LIMIT. PLEASE CLEAR HISTORY AND START A NEW CONVERSATION.\"" }, { "identifier": "SERVER_ERROR_MSG", "path": "chat/constants.py", "snippet": "SERVER_ERROR_MSG = (\n \"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\"\n)" }, { "identifier": "INACTIVE_MSG", "path": "chat/constants.py", "snippet": "INACTIVE_MSG = \"THIS SESSION HAS BEEN INACTIVE FOR TOO LONG. PLEASE REFRESH THIS PAGE.\"" }, { "identifier": "INPUT_CHAR_LEN_LIMIT", "path": "chat/constants.py", "snippet": "INPUT_CHAR_LEN_LIMIT = int(os.getenv(\"FASTCHAT_INPUT_CHAR_LEN_LIMIT\", 2560))" }, { "identifier": "CONVERSATION_TURN_LIMIT", "path": "chat/constants.py", "snippet": "CONVERSATION_TURN_LIMIT = 50" }, { "identifier": "SESSION_EXPIRATION_TIME", "path": "chat/constants.py", "snippet": "SESSION_EXPIRATION_TIME = 3600" }, { "identifier": "get_conversation_template", "path": "chat/model/model_adapter.py", "snippet": "def get_conversation_template(model_path: str) -> Conversation:\n \"\"\"Get the default conversation template.\"\"\"\n adapter = get_model_adapter(model_path)\n return adapter.get_default_conv_template(model_path)" }, { "identifier": "model_info", "path": "chat/model/model_registry.py", "snippet": "def register_model_info(\n full_names: List[str], simple_name: str, link: str, description: str\n):\ndef get_model_info(name: str) -> ModelInfo:" }, { "identifier": "anthropic_api_stream_iter", "path": "chat/server/api_provider.py", "snippet": "def anthropic_api_stream_iter(model_name, prompt, temperature, top_p, max_new_tokens):\n import anthropic\n\n c = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n # Make requests\n gen_params = {\n \"model\": model_name,\n \"prompt\": prompt,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"max_new_tokens\": max_new_tokens,\n }\n logger.info(f\"==== request ====\\n{gen_params}\")\n\n res = c.completions.create(\n prompt=prompt,\n stop_sequences=[anthropic.HUMAN_PROMPT],\n max_tokens_to_sample=max_new_tokens,\n temperature=temperature,\n top_p=top_p,\n model=model_name,\n stream=True,\n )\n text = \"\"\n for chunk in res:\n text += chunk.completion\n data = {\n \"text\": text,\n \"error_code\": 0,\n }\n yield data" }, { "identifier": "openai_api_stream_iter", "path": "chat/server/api_provider.py", "snippet": "def openai_api_stream_iter(\n model_name,\n messages,\n temperature,\n top_p,\n max_new_tokens,\n api_base=None,\n api_key=None,\n):\n import openai\n\n openai.api_base = api_base or \"https://api.openai.com/v1\"\n openai.api_key = api_key or os.environ[\"OPENAI_API_KEY\"]\n\n # Make requests\n gen_params = {\n \"model\": model_name,\n \"prompt\": messages,\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"max_new_tokens\": max_new_tokens,\n }\n logger.info(f\"==== request ====\\n{gen_params}\")\n\n res = openai.ChatCompletion.create(\n model=model_name,\n messages=messages,\n temperature=temperature,\n max_tokens=max_new_tokens,\n stream=True,\n )\n text = \"\"\n for chunk in res:\n text += chunk[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n data = {\n \"text\": text,\n \"error_code\": 0,\n }\n yield data" }, { "identifier": "palm_api_stream_iter", "path": "chat/server/api_provider.py", "snippet": "def palm_api_stream_iter(chat, message, temperature, top_p, max_new_tokens):\n parameters = {\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"max_output_tokens\": max_new_tokens,\n }\n gen_params = {\n \"model\": \"palm-2\",\n \"prompt\": message,\n }\n gen_params.update(parameters)\n logger.info(f\"==== request ====\\n{gen_params}\")\n\n response = chat.send_message(message, **parameters)\n content = response.text\n\n pos = 0\n while pos < len(content):\n # This is a fancy way to simulate token generation latency combined\n # with a Poisson process.\n pos += random.randint(10, 20)\n time.sleep(random.expovariate(50))\n data = {\n \"text\": content[:pos],\n \"error_code\": 0,\n }\n yield data" }, { "identifier": "init_palm_chat", "path": "chat/server/api_provider.py", "snippet": "def init_palm_chat(model_name):\n import vertexai # pip3 install google-cloud-aiplatform\n from vertexai.preview.language_models import ChatModel\n\n project_id = os.environ[\"GCP_PROJECT_ID\"]\n location = \"us-central1\"\n vertexai.init(project=project_id, location=location)\n\n chat_model = ChatModel.from_pretrained(model_name)\n chat = chat_model.start_chat(examples=[])\n return chat" }, { "identifier": "build_logger", "path": "chat/utils.py", "snippet": "def build_logger(logger_name, logger_filename):\n def __init__(self, logger, log_level=logging.INFO):\n def __getattr__(self, attr):\n def write(self, buf):\n def flush(self):\ndef disable_torch_init():\ndef get_gpu_memory(max_gpus=None):\ndef violates_moderation(text):\ndef clean_flant5_ckpt(ckpt_path):\ndef pretty_print_semaphore(semaphore):\ndef iter_over_async(\n async_gen: AsyncGenerator, event_loop: AbstractEventLoop\n) -> Generator:\n async def get_next():\ndef detect_language(text: str) -> str:\ndef parse_gradio_auth_creds(filename: str):\ndef is_partial_stop(output: str, stop_str: str):\ndef run_cmd(cmd: str):\ndef is_sentence_complete(output: str):\ndef get_context_length(config):\nclass StreamToLogger(object):\nSEQUENCE_LENGTH_KEYS = [\n \"max_sequence_length\",\n \"seq_length\",\n \"max_position_embeddings\",\n \"max_seq_len\",\n \"model_max_length\",\n]" } ]
import argparse import datetime import json import os import random import time import uuid import gradio as gr import requests from collections import defaultdict from chat.conversation import SeparatorStyle from chat.constants import ( LOGDIR, WORKER_API_TIMEOUT, ErrorCode, MODERATION_MSG, CONVERSATION_LIMIT_MSG, SERVER_ERROR_MSG, INACTIVE_MSG, INPUT_CHAR_LEN_LIMIT, CONVERSATION_TURN_LIMIT, SESSION_EXPIRATION_TIME, ) from chat.model.model_adapter import get_conversation_template from chat.model.model_registry import model_info from chat.server.api_provider import ( anthropic_api_stream_iter, openai_api_stream_iter, palm_api_stream_iter, init_palm_chat, ) from chat.utils import ( build_logger, violates_moderation, get_window_url_params_js, parse_gradio_auth_creds, )
3,567
openai_compatible_models_info = json.load( open(register_openai_compatible_models) ) models += list(openai_compatible_models_info.keys()) if add_chatgpt: models += ["gpt-3.5-turbo", "gpt-4"] if add_claude: models += ["claude-2", "claude-instant-1"] if add_palm: models += ["palm-2"] models = list(set(models)) priority = {k: f"___{i:02d}" for i, k in enumerate(model_info)} models.sort(key=lambda x: priority.get(x, x)) logger.info(f"Models: {models}") return models def load_demo_single(models, url_params): selected_model = models[0] if len(models) > 0 else "" if "model" in url_params: model = url_params["model"] if model in models: selected_model = model dropdown_update = gr.Dropdown.update( choices=models, value=selected_model, visible=True ) state = None return ( state, dropdown_update, gr.Chatbot.update(visible=True), gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Row.update(visible=True), gr.Accordion.update(visible=True), ) def load_demo(url_params, request: gr.Request): global models ip = request.client.host logger.info(f"load_demo. ip: {ip}. params: {url_params}") ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME if args.model_list_mode == "reload": models = get_model_list( controller_url, args.register_openai_compatible_models, args.add_chatgpt, args.add_claude, args.add_palm, ) return load_demo_single(models, url_params) def vote_last_response(state, vote_type, model_selector, request: gr.Request): with open(get_conv_log_filename(), "a") as fout: data = { "tstamp": round(time.time(), 4), "type": vote_type, "model": model_selector, "state": state.dict(), "ip": request.client.host, } fout.write(json.dumps(data) + "\n") def upvote_last_response(state, model_selector, request: gr.Request): logger.info(f"upvote. ip: {request.client.host}") vote_last_response(state, "upvote", model_selector, request) return ("",) + (disable_btn,) * 3 def downvote_last_response(state, model_selector, request: gr.Request): logger.info(f"downvote. ip: {request.client.host}") vote_last_response(state, "downvote", model_selector, request) return ("",) + (disable_btn,) * 3 def flag_last_response(state, model_selector, request: gr.Request): logger.info(f"flag. ip: {request.client.host}") vote_last_response(state, "flag", model_selector, request) return ("",) + (disable_btn,) * 3 def regenerate(state, request: gr.Request): logger.info(f"regenerate. ip: {request.client.host}") state.conv.update_last_message(None) return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5 def clear_history(request: gr.Request): logger.info(f"clear_history. ip: {request.client.host}") state = None return (state, [], "") + (disable_btn,) * 5 def add_text(state, model_selector, text, request: gr.Request): ip = request.client.host logger.info(f"add_text. ip: {ip}. len: {len(text)}") if state is None: state = State(model_selector) if len(text) <= 0: state.skip_next = True return (state, state.to_gradio_chatbot(), "") + (no_change_btn,) * 5 if ip_expiration_dict[ip] < time.time(): logger.info(f"inactive. ip: {request.client.host}. text: {text}") state.skip_next = True return (state, state.to_gradio_chatbot(), INACTIVE_MSG) + (no_change_btn,) * 5 if enable_moderation:
""" The gradio demo server for chatting with a single model. """ logger = build_logger("gradio_web_server", "gradio_web_server.log") headers = {"User-Agent": "FastChat Client"} no_change_btn = gr.Button.update() enable_btn = gr.Button.update(interactive=True) disable_btn = gr.Button.update(interactive=False) controller_url = None enable_moderation = False acknowledgment_md = """ **Acknowledgment:** We thank Kaggle, MBZUAI, and AnyScale for their sponsorship. """ ip_expiration_dict = defaultdict(lambda: 0) # Information about custom OpenAI compatible API models. # JSON file format: # { # "vicuna-7b": { # "model_name": "vicuna-7b-v1.5", # "api_base": "http://8.8.8.55:5555/v1", # "api_key": "password" # }, # } openai_compatible_models_info = {} class State: def __init__(self, model_name): self.conv = get_conversation_template(model_name) self.conv_id = uuid.uuid4().hex self.skip_next = False self.model_name = model_name if model_name == "palm-2": # According to release note, "chat-bison@001" is PaLM 2 for chat. # https://cloud.google.com/vertex-ai/docs/release-notes#May_10_2023 self.palm_chat = init_palm_chat("chat-bison@001") def to_gradio_chatbot(self): return self.conv.to_gradio_chatbot() def dict(self): base = self.conv.dict() base.update( { "conv_id": self.conv_id, "model_name": self.model_name, } ) return base def set_global_vars(controller_url_, enable_moderation_): global controller_url, enable_moderation controller_url = controller_url_ enable_moderation = enable_moderation_ def get_conv_log_filename(): t = datetime.datetime.now() name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json") return name def get_model_list( controller_url, register_openai_compatible_models, add_chatgpt, add_claude, add_palm ): if controller_url: ret = requests.post(controller_url + "/refresh_all_workers") assert ret.status_code == 200 ret = requests.post(controller_url + "/list_models") models = ret.json()["models"] else: models = [] # Add API providers if register_openai_compatible_models: global openai_compatible_models_info openai_compatible_models_info = json.load( open(register_openai_compatible_models) ) models += list(openai_compatible_models_info.keys()) if add_chatgpt: models += ["gpt-3.5-turbo", "gpt-4"] if add_claude: models += ["claude-2", "claude-instant-1"] if add_palm: models += ["palm-2"] models = list(set(models)) priority = {k: f"___{i:02d}" for i, k in enumerate(model_info)} models.sort(key=lambda x: priority.get(x, x)) logger.info(f"Models: {models}") return models def load_demo_single(models, url_params): selected_model = models[0] if len(models) > 0 else "" if "model" in url_params: model = url_params["model"] if model in models: selected_model = model dropdown_update = gr.Dropdown.update( choices=models, value=selected_model, visible=True ) state = None return ( state, dropdown_update, gr.Chatbot.update(visible=True), gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Row.update(visible=True), gr.Accordion.update(visible=True), ) def load_demo(url_params, request: gr.Request): global models ip = request.client.host logger.info(f"load_demo. ip: {ip}. params: {url_params}") ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME if args.model_list_mode == "reload": models = get_model_list( controller_url, args.register_openai_compatible_models, args.add_chatgpt, args.add_claude, args.add_palm, ) return load_demo_single(models, url_params) def vote_last_response(state, vote_type, model_selector, request: gr.Request): with open(get_conv_log_filename(), "a") as fout: data = { "tstamp": round(time.time(), 4), "type": vote_type, "model": model_selector, "state": state.dict(), "ip": request.client.host, } fout.write(json.dumps(data) + "\n") def upvote_last_response(state, model_selector, request: gr.Request): logger.info(f"upvote. ip: {request.client.host}") vote_last_response(state, "upvote", model_selector, request) return ("",) + (disable_btn,) * 3 def downvote_last_response(state, model_selector, request: gr.Request): logger.info(f"downvote. ip: {request.client.host}") vote_last_response(state, "downvote", model_selector, request) return ("",) + (disable_btn,) * 3 def flag_last_response(state, model_selector, request: gr.Request): logger.info(f"flag. ip: {request.client.host}") vote_last_response(state, "flag", model_selector, request) return ("",) + (disable_btn,) * 3 def regenerate(state, request: gr.Request): logger.info(f"regenerate. ip: {request.client.host}") state.conv.update_last_message(None) return (state, state.to_gradio_chatbot(), "") + (disable_btn,) * 5 def clear_history(request: gr.Request): logger.info(f"clear_history. ip: {request.client.host}") state = None return (state, [], "") + (disable_btn,) * 5 def add_text(state, model_selector, text, request: gr.Request): ip = request.client.host logger.info(f"add_text. ip: {ip}. len: {len(text)}") if state is None: state = State(model_selector) if len(text) <= 0: state.skip_next = True return (state, state.to_gradio_chatbot(), "") + (no_change_btn,) * 5 if ip_expiration_dict[ip] < time.time(): logger.info(f"inactive. ip: {request.client.host}. text: {text}") state.skip_next = True return (state, state.to_gradio_chatbot(), INACTIVE_MSG) + (no_change_btn,) * 5 if enable_moderation:
flagged = violates_moderation(text)
17
2023-10-20 08:56:20+00:00
4k
thuml/iTransformer
model/iInformer.py
[ { "identifier": "Encoder", "path": "layers/Transformer_EncDec.py", "snippet": "class Encoder(nn.Module):\n def __init__(self, attn_layers, conv_layers=None, norm_layer=None):\n super(Encoder, self).__init__()\n self.attn_layers = nn.ModuleList(attn_layers)\n self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None\n self.norm = norm_layer\n\n def forward(self, x, attn_mask=None, tau=None, delta=None):\n # x [B, L, D]\n attns = []\n if self.conv_layers is not None:\n for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)):\n delta = delta if i == 0 else None\n x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)\n x = conv_layer(x)\n attns.append(attn)\n x, attn = self.attn_layers[-1](x, tau=tau, delta=None)\n attns.append(attn)\n else:\n for attn_layer in self.attn_layers:\n x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)\n attns.append(attn)\n\n if self.norm is not None:\n x = self.norm(x)\n\n return x, attns" }, { "identifier": "EncoderLayer", "path": "layers/Transformer_EncDec.py", "snippet": "class EncoderLayer(nn.Module):\n def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation=\"relu\"):\n super(EncoderLayer, self).__init__()\n d_ff = d_ff or 4 * d_model\n self.attention = attention\n self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)\n self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout = nn.Dropout(dropout)\n self.activation = F.relu if activation == \"relu\" else F.gelu\n\n def forward(self, x, attn_mask=None, tau=None, delta=None):\n new_x, attn = self.attention(\n x, x, x,\n attn_mask=attn_mask,\n tau=tau, delta=delta\n )\n x = x + self.dropout(new_x)\n\n y = x = self.norm1(x)\n y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))\n y = self.dropout(self.conv2(y).transpose(-1, 1))\n\n return self.norm2(x + y), attn" }, { "identifier": "ProbAttention", "path": "layers/SelfAttention_Family.py", "snippet": "class ProbAttention(nn.Module):\n def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n super(ProbAttention, self).__init__()\n self.factor = factor\n self.scale = scale\n self.mask_flag = mask_flag\n self.output_attention = output_attention\n self.dropout = nn.Dropout(attention_dropout)\n\n def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q)\n # Q [B, H, L, D]\n B, H, L_K, E = K.shape\n _, _, L_Q, _ = Q.shape\n\n # calculate the sampled Q_K\n K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)\n # real U = U_part(factor*ln(L_k))*L_q\n index_sample = torch.randint(L_K, (L_Q, sample_k))\n K_sample = K_expand[:, :, torch.arange(\n L_Q).unsqueeze(1), index_sample, :]\n Q_K_sample = torch.matmul(\n Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze()\n\n # find the Top_k query with sparisty measurement\n M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)\n M_top = M.topk(n_top, sorted=False)[1]\n\n # use the reduced Q to calculate Q_K\n Q_reduce = Q[torch.arange(B)[:, None, None],\n torch.arange(H)[None, :, None],\n M_top, :] # factor*ln(L_q)\n Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k\n\n return Q_K, M_top\n\n def _get_initial_context(self, V, L_Q):\n B, H, L_V, D = V.shape\n if not self.mask_flag:\n # V_sum = V.sum(dim=-2)\n V_sum = V.mean(dim=-2)\n contex = V_sum.unsqueeze(-2).expand(B, H,\n L_Q, V_sum.shape[-1]).clone()\n else: # use mask\n # requires that L_Q == L_V, i.e. for self-attention only\n assert (L_Q == L_V)\n contex = V.cumsum(dim=-2)\n return contex\n\n def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):\n B, H, L_V, D = V.shape\n\n if self.mask_flag:\n attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)\n scores.masked_fill_(attn_mask.mask, -np.inf)\n\n attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores)\n\n context_in[torch.arange(B)[:, None, None],\n torch.arange(H)[None, :, None],\n index, :] = torch.matmul(attn, V).type_as(context_in)\n if self.output_attention:\n attns = (torch.ones([B, H, L_V, L_V]) /\n L_V).type_as(attn).to(attn.device)\n attns[torch.arange(B)[:, None, None], torch.arange(H)[\n None, :, None], index, :] = attn\n return (context_in, attns)\n else:\n return (context_in, None)\n\n def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n B, L_Q, H, D = queries.shape\n _, L_K, _, _ = keys.shape\n\n queries = queries.transpose(2, 1)\n keys = keys.transpose(2, 1)\n values = values.transpose(2, 1)\n\n U_part = self.factor * \\\n np.ceil(np.log(L_K)).astype('int').item() # c*ln(L_k)\n u = self.factor * \\\n np.ceil(np.log(L_Q)).astype('int').item() # c*ln(L_q)\n\n U_part = U_part if U_part < L_K else L_K\n u = u if u < L_Q else L_Q\n\n scores_top, index = self._prob_QK(\n queries, keys, sample_k=U_part, n_top=u)\n\n # add scale factor\n scale = self.scale or 1. / sqrt(D)\n if scale is not None:\n scores_top = scores_top * scale\n # get the context\n context = self._get_initial_context(values, L_Q)\n # update the context with selected top_k queries\n context, attn = self._update_context(\n context, values, scores_top, index, L_Q, attn_mask)\n\n return context.contiguous(), attn" }, { "identifier": "AttentionLayer", "path": "layers/SelfAttention_Family.py", "snippet": "class AttentionLayer(nn.Module):\n def __init__(self, attention, d_model, n_heads, d_keys=None,\n d_values=None):\n super(AttentionLayer, self).__init__()\n\n d_keys = d_keys or (d_model // n_heads)\n d_values = d_values or (d_model // n_heads)\n\n self.inner_attention = attention\n self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n self.value_projection = nn.Linear(d_model, d_values * n_heads)\n self.out_projection = nn.Linear(d_values * n_heads, d_model)\n self.n_heads = n_heads\n\n def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n B, L, _ = queries.shape\n _, S, _ = keys.shape\n H = self.n_heads\n\n queries = self.query_projection(queries).view(B, L, H, -1)\n keys = self.key_projection(keys).view(B, S, H, -1)\n values = self.value_projection(values).view(B, S, H, -1)\n\n out, attn = self.inner_attention(\n queries,\n keys,\n values,\n attn_mask,\n tau=tau,\n delta=delta\n )\n out = out.view(B, L, -1)\n\n return self.out_projection(out), attn" }, { "identifier": "DataEmbedding_inverted", "path": "layers/Embed.py", "snippet": "class DataEmbedding_inverted(nn.Module):\n def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1):\n super(DataEmbedding_inverted, self).__init__()\n self.value_embedding = nn.Linear(c_in, d_model)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, x, x_mark):\n x = x.permute(0, 2, 1)\n # x: [Batch Variate Time]\n if x_mark is None:\n x = self.value_embedding(x)\n else:\n # the potential to take covariates (e.g. timestamps) as tokens\n x = self.value_embedding(torch.cat([x, x_mark.permute(0, 2, 1)], 1)) \n # x: [Batch Variate d_model]\n return self.dropout(x)" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from layers.Transformer_EncDec import Encoder, EncoderLayer from layers.SelfAttention_Family import ProbAttention, AttentionLayer from layers.Embed import DataEmbedding_inverted
2,602
class Model(nn.Module): """ Vanilla Transformer with O(L^2) complexity Paper link: https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf """ def __init__(self, configs): super(Model, self).__init__() self.seq_len = configs.seq_len self.pred_len = configs.pred_len self.output_attention = configs.output_attention # Embedding self.enc_embedding = DataEmbedding_inverted(configs.seq_len, configs.d_model, configs.embed, configs.freq, configs.dropout) # Encoder-only architecture self.encoder = Encoder( [
class Model(nn.Module): """ Vanilla Transformer with O(L^2) complexity Paper link: https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf """ def __init__(self, configs): super(Model, self).__init__() self.seq_len = configs.seq_len self.pred_len = configs.pred_len self.output_attention = configs.output_attention # Embedding self.enc_embedding = DataEmbedding_inverted(configs.seq_len, configs.d_model, configs.embed, configs.freq, configs.dropout) # Encoder-only architecture self.encoder = Encoder( [
EncoderLayer(
1
2023-10-19 03:23:15+00:00
4k
kylesargent/ZeroNVS
threestudio/models/prompt_processors/base.py
[ { "identifier": "BaseObject", "path": "threestudio/utils/base.py", "snippet": "class BaseObject(Updateable):\n @dataclass\n class Config:\n pass\n\n cfg: Config # add this to every subclass of BaseObject to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = get_device()\n self.configure(*args, **kwargs)\n\n def configure(self, *args, **kwargs) -> None:\n pass" }, { "identifier": "barrier", "path": "threestudio/utils/misc.py", "snippet": "def barrier():\n if not _distributed_available():\n return\n else:\n torch.distributed.barrier()" }, { "identifier": "cleanup", "path": "threestudio/utils/misc.py", "snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()" }, { "identifier": "get_rank", "path": "threestudio/utils/misc.py", "snippet": "def get_rank():\n # SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,\n # therefore LOCAL_RANK needs to be checked first\n rank_keys = (\"RANK\", \"LOCAL_RANK\", \"SLURM_PROCID\", \"JSM_NAMESPACE_RANK\")\n for key in rank_keys:\n rank = os.environ.get(key)\n if rank is not None:\n return int(rank)\n return 0" }, { "identifier": "shifted_cosine_decay", "path": "threestudio/utils/ops.py", "snippet": "def shifted_cosine_decay(a, b, c, r):\n return a * torch.cos(b * r + c) + a" }, { "identifier": "shifted_expotional_decay", "path": "threestudio/utils/ops.py", "snippet": "def shifted_expotional_decay(a, b, c, r):\n return a * torch.exp(-b * r) + c" } ]
import json import os import torch import torch.multiprocessing as mp import torch.nn as nn import torch.nn.functional as F import threestudio import hashlib from dataclasses import dataclass, field from pytorch_lightning.utilities.rank_zero import rank_zero_only from transformers import AutoTokenizer, BertForMaskedLM from threestudio.utils.base import BaseObject from threestudio.utils.misc import barrier, cleanup, get_rank from threestudio.utils.ops import shifted_cosine_decay, shifted_expotional_decay from threestudio.utils.typing import *
1,611
def hash_prompt(model: str, prompt: str) -> str: identifier = f"{model}-{prompt}" return hashlib.md5(identifier.encode()).hexdigest() @dataclass class DirectionConfig: name: str prompt: Callable[[str], str] negative_prompt: Callable[[str], str] condition: Callable[ [Float[Tensor, "B"], Float[Tensor, "B"], Float[Tensor, "B"]], Float[Tensor, "B"], ] @dataclass class PromptProcessorOutput: text_embeddings: Float[Tensor, "N Nf"] uncond_text_embeddings: Float[Tensor, "N Nf"] text_embeddings_vd: Float[Tensor, "Nv N Nf"] uncond_text_embeddings_vd: Float[Tensor, "Nv N Nf"] directions: List[DirectionConfig] direction2idx: Dict[str, int] use_perp_neg: bool perp_neg_f_sb: Tuple[float, float, float] perp_neg_f_fsb: Tuple[float, float, float] perp_neg_f_fs: Tuple[float, float, float] perp_neg_f_sf: Tuple[float, float, float] def get_text_embeddings( self, elevation: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], camera_distances: Float[Tensor, "B"], view_dependent_prompting: bool = True, ) -> Float[Tensor, "BB N Nf"]: batch_size = elevation.shape[0] if view_dependent_prompting: # Get direction direction_idx = torch.zeros_like(elevation, dtype=torch.long) for d in self.directions: direction_idx[ d.condition(elevation, azimuth, camera_distances) ] = self.direction2idx[d.name] # Get text embeddings text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore else: text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore batch_size, -1, -1 ) # IMPORTANT: we return (cond, uncond), which is in different order than other implementations! return torch.cat([text_embeddings, uncond_text_embeddings], dim=0) def get_text_embeddings_perp_neg( self, elevation: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], camera_distances: Float[Tensor, "B"], view_dependent_prompting: bool = True, ) -> Tuple[Float[Tensor, "BBBB N Nf"], Float[Tensor, "B 2"]]: assert ( view_dependent_prompting ), "Perp-Neg only works with view-dependent prompting" batch_size = elevation.shape[0] direction_idx = torch.zeros_like(elevation, dtype=torch.long) for d in self.directions: direction_idx[ d.condition(elevation, azimuth, camera_distances) ] = self.direction2idx[d.name] # 0 - side view # 1 - front view # 2 - back view # 3 - overhead view pos_text_embeddings = [] neg_text_embeddings = [] neg_guidance_weights = [] uncond_text_embeddings = [] side_emb = self.text_embeddings_vd[0] front_emb = self.text_embeddings_vd[1] back_emb = self.text_embeddings_vd[2] overhead_emb = self.text_embeddings_vd[3] for idx, ele, azi, dis in zip( direction_idx, elevation, azimuth, camera_distances ): azi = shift_azimuth_deg(azi) # to (-180, 180) uncond_text_embeddings.append( self.uncond_text_embeddings_vd[idx] ) # should be "" if idx.item() == 3: # overhead view pos_text_embeddings.append(overhead_emb) # side view # dummy neg_text_embeddings += [ self.uncond_text_embeddings_vd[idx], self.uncond_text_embeddings_vd[idx], ] neg_guidance_weights += [0.0, 0.0] else: # interpolating views if torch.abs(azi) < 90: # front-side interpolation # 0 - complete side, 1 - complete front r_inter = 1 - torch.abs(azi) / 90 pos_text_embeddings.append( r_inter * front_emb + (1 - r_inter) * side_emb ) neg_text_embeddings += [front_emb, side_emb] neg_guidance_weights += [
def hash_prompt(model: str, prompt: str) -> str: identifier = f"{model}-{prompt}" return hashlib.md5(identifier.encode()).hexdigest() @dataclass class DirectionConfig: name: str prompt: Callable[[str], str] negative_prompt: Callable[[str], str] condition: Callable[ [Float[Tensor, "B"], Float[Tensor, "B"], Float[Tensor, "B"]], Float[Tensor, "B"], ] @dataclass class PromptProcessorOutput: text_embeddings: Float[Tensor, "N Nf"] uncond_text_embeddings: Float[Tensor, "N Nf"] text_embeddings_vd: Float[Tensor, "Nv N Nf"] uncond_text_embeddings_vd: Float[Tensor, "Nv N Nf"] directions: List[DirectionConfig] direction2idx: Dict[str, int] use_perp_neg: bool perp_neg_f_sb: Tuple[float, float, float] perp_neg_f_fsb: Tuple[float, float, float] perp_neg_f_fs: Tuple[float, float, float] perp_neg_f_sf: Tuple[float, float, float] def get_text_embeddings( self, elevation: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], camera_distances: Float[Tensor, "B"], view_dependent_prompting: bool = True, ) -> Float[Tensor, "BB N Nf"]: batch_size = elevation.shape[0] if view_dependent_prompting: # Get direction direction_idx = torch.zeros_like(elevation, dtype=torch.long) for d in self.directions: direction_idx[ d.condition(elevation, azimuth, camera_distances) ] = self.direction2idx[d.name] # Get text embeddings text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore else: text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore batch_size, -1, -1 ) # IMPORTANT: we return (cond, uncond), which is in different order than other implementations! return torch.cat([text_embeddings, uncond_text_embeddings], dim=0) def get_text_embeddings_perp_neg( self, elevation: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], camera_distances: Float[Tensor, "B"], view_dependent_prompting: bool = True, ) -> Tuple[Float[Tensor, "BBBB N Nf"], Float[Tensor, "B 2"]]: assert ( view_dependent_prompting ), "Perp-Neg only works with view-dependent prompting" batch_size = elevation.shape[0] direction_idx = torch.zeros_like(elevation, dtype=torch.long) for d in self.directions: direction_idx[ d.condition(elevation, azimuth, camera_distances) ] = self.direction2idx[d.name] # 0 - side view # 1 - front view # 2 - back view # 3 - overhead view pos_text_embeddings = [] neg_text_embeddings = [] neg_guidance_weights = [] uncond_text_embeddings = [] side_emb = self.text_embeddings_vd[0] front_emb = self.text_embeddings_vd[1] back_emb = self.text_embeddings_vd[2] overhead_emb = self.text_embeddings_vd[3] for idx, ele, azi, dis in zip( direction_idx, elevation, azimuth, camera_distances ): azi = shift_azimuth_deg(azi) # to (-180, 180) uncond_text_embeddings.append( self.uncond_text_embeddings_vd[idx] ) # should be "" if idx.item() == 3: # overhead view pos_text_embeddings.append(overhead_emb) # side view # dummy neg_text_embeddings += [ self.uncond_text_embeddings_vd[idx], self.uncond_text_embeddings_vd[idx], ] neg_guidance_weights += [0.0, 0.0] else: # interpolating views if torch.abs(azi) < 90: # front-side interpolation # 0 - complete side, 1 - complete front r_inter = 1 - torch.abs(azi) / 90 pos_text_embeddings.append( r_inter * front_emb + (1 - r_inter) * side_emb ) neg_text_embeddings += [front_emb, side_emb] neg_guidance_weights += [
-shifted_expotional_decay(*self.perp_neg_f_fs, r_inter),
5
2023-10-24 19:02:44+00:00
4k
princeton-nlp/LLM-Shearing
llmshearing/utils/post_pruning_processing.py
[ { "identifier": "ComposerMosaicLlama", "path": "llmshearing/models/composer_llama.py", "snippet": "class ComposerMosaicLlama(ComposerModel):\n \"\"\" Llama model with the Composer model interface. \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.model = LlamaModel(cfg)\n self.ref_model = None\n self.num_fwd_flops = self._compute_num_fwd_flops()\n self.train_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n self.eval_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n\n self.set_names = getattr(cfg, \"set_names\", None)\n if self.set_names is not None:\n self.set_name_to_id = {set_name: i for i, set_name in enumerate(self.set_names)}\n self.set_id_to_name = {i: set_name for i, set_name in enumerate(self.set_names)}\n \n for set_name in self.set_names:\n # add train and eval metrics for each set\n self.train_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.eval_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.train_metrics[f'{set_name}_count'] = DomainCount(set_name=set_name, set_index=self.set_name_to_id[set_name]) \n\n def prune_params(self, zs=None):\n self.model.prune_params(zs)\n \n def get_targets(self, batch):\n targets = torch.roll(batch['labels'], shifts=-1)\n targets[:, -1] = -100\n return targets\n \n def forward(self, batch):\n input_ids = batch['input_ids']\n key_padding_mask = batch['attention_mask'].bool(\n ) if 'attention_mask' in batch else None\n pruned_steps = batch.get('pruned_steps', None)\n if pruned_steps is not None:\n pruned_steps = pruned_steps[0].item()\n zs = {key: batch[key] for key in batch if \"_z\" in key}\n model_output = self.model(input_ids=input_ids, key_padding_mask=key_padding_mask, pruned_steps=pruned_steps, **zs)\n return model_output\n\n def eval_forward(self, batch, outputs=None):\n return outputs if outputs is not None else self.forward(batch)\n\n def loss(self, outputs, batch):\n logits = outputs[\"logits\"]\n l0_output = outputs[\"l0_output\"]\n targets = self.get_targets(batch)\n\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)),\n targets.view(-1),\n ignore_index=-100)\n return_loss = {\"ce_loss\": loss}\n if l0_output is not None:\n lag_loss = l0_output[0]\n return_loss[\"lag_loss\"] = lag_loss\n return_loss[\"total\"] = sum(return_loss.values())\n return return_loss\n\n def get_metrics(self, is_train=False):\n return self.train_metrics if is_train else self.eval_metrics\n\n def update_metric(self, batch, outputs, metric) -> None:\n logits = outputs[\"logits\"]\n if isinstance(metric, DomainLanguageCrossEntropy):\n targets = self.get_targets(batch)\n set_id = self.set_name_to_id[metric.set_name]\n targets[batch[\"set\"] != set_id] = -100\n metric.update(logits, targets)\n elif isinstance(metric, DomainCount):\n with torch.inference_mode():\n idx = None\n selected_sets = batch['set']\n metric.update(selected_sets, idx)\n else:\n logits = logits.view(-1, logits.size(-1))\n targets = self.get_targets(batch).view(-1)\n metric.update(logits, targets)\n\n def add_eval_metrics(self, evaluator):\n evaluator_metrics = {\n m: METRIC_DEFAULT_CTORS[m]() for m in evaluator.metric_names\n }\n if self.eval_metrics is not None:\n self.eval_metrics.update(evaluator_metrics)\n else:\n self.eval_metrics = evaluator_metrics\n\n def _compute_num_fwd_flops(self):\n # Might not be correct for LLaMA structures\n n_params = sum(p.numel() for p in self.parameters())\n # the number of paramters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.model.cfg.max_seq_len\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = self.model.cfg.n_layers * 2 * 2 * (\n self.model.cfg.d_model * (self.model.cfg.max_seq_len**2))\n return params_flops_per_seq + attn_flops_per_seq\n\n def flops_per_batch(self, batch):\n # Note: this computation does not take into account padding, and assumes\n # that the dataset has been constructed without padding. Additionally, we\n # assume the backward pass is approximately 2x the forward pass\n return self.num_fwd_flops * 3 * batch['input_ids'].shape[0]\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n if new_num_tokens is not None:\n self.model._resize_token_embeddings(new_num_tokens)" }, { "identifier": "load_weights", "path": "llmshearing/utils/utils.py", "snippet": "def load_weights(path):\n \"\"\" load model weights from a path \"\"\"\n if not isinstance(path, str):\n path = str(path)\n ckpt_paths = [path]\n if not path.endswith(\".pt\"):\n ckpt_paths = [path + \"/latest-rank0.pt\"]\n if not os.path.exists(ckpt_paths[0]):\n ckpt_paths = glob.glob(path + \"/pytorch_model*bin\")\n \n state_dict = {}\n for p in ckpt_paths: \n if torch.cuda.is_available():\n p_weight = torch.load(p)\n else:\n p_weight = torch.load(path, map_location=torch.device('cpu'))\n if \"state\" in p_weight:\n state_dict.update(p_weight[\"state\"][\"model\"])\n else:\n state_dict.update(p_weight)\n print(\"Loaded model from path: \", path)\n return state_dict" } ]
import glob import os import torch import fire from llmshearing.models.composer_llama import ComposerMosaicLlama from llmshearing.utils.utils import load_weights
1,703
def prune_and_save_model(path): """ prune and save the model after pruning """ outpath = os.path.dirname(path) + f"/pruned-{os.path.basename(path)}" config_file = os.path.join(os.path.dirname(path), "config.pt") assert os.path.exists(config_file), f"Config file {config_file} does not exist" cfg = torch.load(config_file).model if cfg.l0_module.target_model is not None: cfg.l0_module.eval_target_model = True # hack
def prune_and_save_model(path): """ prune and save the model after pruning """ outpath = os.path.dirname(path) + f"/pruned-{os.path.basename(path)}" config_file = os.path.join(os.path.dirname(path), "config.pt") assert os.path.exists(config_file), f"Config file {config_file} does not exist" cfg = torch.load(config_file).model if cfg.l0_module.target_model is not None: cfg.l0_module.eval_target_model = True # hack
model = ComposerMosaicLlama(cfg)
0
2023-10-16 12:26:08+00:00
4k
hugoycj/Instant-angelo
models/nerf.py
[ { "identifier": "BaseModel", "path": "models/base.py", "snippet": "class BaseModel(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.rank = get_rank()\n self.setup()\n if self.config.get('weights', None):\n self.load_state_dict(torch.load(self.config.weights))\n \n def setup(self):\n raise NotImplementedError\n \n def update_step(self, epoch, global_step):\n pass\n \n def train(self, mode=True):\n return super().train(mode=mode)\n \n def eval(self):\n return super().eval()\n \n def regularizations(self, out):\n return {}\n \n @torch.no_grad()\n def export(self, export_config):\n return {}" }, { "identifier": "chunk_batch", "path": "models/utils.py", "snippet": "def chunk_batch(func, chunk_size, move_to_cpu, *args, **kwargs):\n B = None\n for arg in args:\n if isinstance(arg, torch.Tensor):\n B = arg.shape[0]\n break\n out = defaultdict(list)\n out_type = None\n for i in range(0, B, chunk_size):\n out_chunk = func(*[arg[i:i+chunk_size] if isinstance(arg, torch.Tensor) else arg for arg in args], **kwargs)\n if out_chunk is None:\n continue\n out_type = type(out_chunk)\n if isinstance(out_chunk, torch.Tensor):\n out_chunk = {0: out_chunk}\n elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):\n chunk_length = len(out_chunk)\n out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}\n elif isinstance(out_chunk, dict):\n pass\n else:\n print(f'Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.')\n exit(1)\n for k, v in out_chunk.items():\n v = v if torch.is_grad_enabled() else v.detach()\n v = v.cpu() if move_to_cpu else v\n out[k].append(v)\n \n if out_type is None:\n return\n\n out = {k: torch.cat(v, dim=0) for k, v in out.items()}\n if out_type is torch.Tensor:\n return out[0]\n elif out_type in [tuple, list]:\n return out_type([out[i] for i in range(chunk_length)])\n elif out_type is dict:\n return out" }, { "identifier": "update_module_step", "path": "systems/utils.py", "snippet": "def update_module_step(m, epoch, global_step):\n if hasattr(m, 'update_step'):\n m.update_step(epoch, global_step)" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F import models from models.base import BaseModel from models.utils import chunk_batch from systems.utils import update_module_step from nerfacc import ContractionType, OccupancyGrid, ray_marching, render_weight_from_density, accumulate_along_rays
1,891
@models.register('nerf') class NeRFModel(BaseModel): def setup(self): self.geometry = models.make(self.config.geometry.name, self.config.geometry) self.texture = models.make(self.config.texture.name, self.config.texture) self.register_buffer('scene_aabb', torch.as_tensor([-self.config.radius, -self.config.radius, -self.config.radius, self.config.radius, self.config.radius, self.config.radius], dtype=torch.float32)) if self.config.learned_background: self.occupancy_grid_res = 256 self.near_plane, self.far_plane = 0.2, 1e4 self.cone_angle = 10**(math.log10(self.far_plane) / self.config.num_samples_per_ray) - 1. # approximate self.render_step_size = 0.01 # render_step_size = max(distance_to_camera * self.cone_angle, self.render_step_size) self.contraction_type = ContractionType.UN_BOUNDED_SPHERE else: self.occupancy_grid_res = 128 self.near_plane, self.far_plane = None, None self.cone_angle = 0.0 self.render_step_size = 1.732 * 2 * self.config.radius / self.config.num_samples_per_ray self.contraction_type = ContractionType.AABB self.geometry.contraction_type = self.contraction_type if self.config.grid_prune: self.occupancy_grid = OccupancyGrid( roi_aabb=self.scene_aabb, resolution=self.occupancy_grid_res, contraction_type=self.contraction_type ) self.randomized = self.config.randomized self.background_color = None def update_step(self, epoch, global_step): update_module_step(self.geometry, epoch, global_step) update_module_step(self.texture, epoch, global_step) def occ_eval_fn(x): density, _ = self.geometry(x) # approximate for 1 - torch.exp(-density[...,None] * self.render_step_size) based on taylor series return density[...,None] * self.render_step_size if self.training and self.config.grid_prune: self.occupancy_grid.every_n_step(step=global_step, occ_eval_fn=occ_eval_fn) def isosurface(self): mesh = self.geometry.isosurface() return mesh def forward_(self, rays): n_rays = rays.shape[0] rays_o, rays_d = rays[:, 0:3], rays[:, 3:6] # both (N_rays, 3) def sigma_fn(t_starts, t_ends, ray_indices): ray_indices = ray_indices.long() t_origins = rays_o[ray_indices] t_dirs = rays_d[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends) / 2. density, _ = self.geometry(positions) return density[...,None] def rgb_sigma_fn(t_starts, t_ends, ray_indices): ray_indices = ray_indices.long() t_origins = rays_o[ray_indices] t_dirs = rays_d[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends) / 2. density, feature = self.geometry(positions) rgb = self.texture(feature, t_dirs) return rgb, density[...,None] with torch.no_grad(): ray_indices, t_starts, t_ends = ray_marching( rays_o, rays_d, scene_aabb=None if self.config.learned_background else self.scene_aabb, grid=self.occupancy_grid if self.config.grid_prune else None, sigma_fn=sigma_fn, near_plane=self.near_plane, far_plane=self.far_plane, render_step_size=self.render_step_size, stratified=self.randomized, cone_angle=self.cone_angle, alpha_thre=0.0 ) ray_indices = ray_indices.long() t_origins = rays_o[ray_indices] t_dirs = rays_d[ray_indices] midpoints = (t_starts + t_ends) / 2. positions = t_origins + t_dirs * midpoints intervals = t_ends - t_starts density, feature = self.geometry(positions) rgb = self.texture(feature, t_dirs) weights = render_weight_from_density(t_starts, t_ends, density[...,None], ray_indices=ray_indices, n_rays=n_rays) opacity = accumulate_along_rays(weights, ray_indices, values=None, n_rays=n_rays) depth = accumulate_along_rays(weights, ray_indices, values=midpoints, n_rays=n_rays) comp_rgb = accumulate_along_rays(weights, ray_indices, values=rgb, n_rays=n_rays) comp_rgb = comp_rgb + self.background_color * (1.0 - opacity) out = { 'comp_rgb': comp_rgb, 'opacity': opacity, 'depth': depth, 'rays_valid': opacity > 0, 'num_samples': torch.as_tensor([len(t_starts)], dtype=torch.int32, device=rays.device) } if self.training: out.update({ 'weights': weights.view(-1), 'points': midpoints.view(-1), 'intervals': intervals.view(-1), 'ray_indices': ray_indices.view(-1) }) return out def forward(self, rays): if self.training: out = self.forward_(rays) else:
@models.register('nerf') class NeRFModel(BaseModel): def setup(self): self.geometry = models.make(self.config.geometry.name, self.config.geometry) self.texture = models.make(self.config.texture.name, self.config.texture) self.register_buffer('scene_aabb', torch.as_tensor([-self.config.radius, -self.config.radius, -self.config.radius, self.config.radius, self.config.radius, self.config.radius], dtype=torch.float32)) if self.config.learned_background: self.occupancy_grid_res = 256 self.near_plane, self.far_plane = 0.2, 1e4 self.cone_angle = 10**(math.log10(self.far_plane) / self.config.num_samples_per_ray) - 1. # approximate self.render_step_size = 0.01 # render_step_size = max(distance_to_camera * self.cone_angle, self.render_step_size) self.contraction_type = ContractionType.UN_BOUNDED_SPHERE else: self.occupancy_grid_res = 128 self.near_plane, self.far_plane = None, None self.cone_angle = 0.0 self.render_step_size = 1.732 * 2 * self.config.radius / self.config.num_samples_per_ray self.contraction_type = ContractionType.AABB self.geometry.contraction_type = self.contraction_type if self.config.grid_prune: self.occupancy_grid = OccupancyGrid( roi_aabb=self.scene_aabb, resolution=self.occupancy_grid_res, contraction_type=self.contraction_type ) self.randomized = self.config.randomized self.background_color = None def update_step(self, epoch, global_step): update_module_step(self.geometry, epoch, global_step) update_module_step(self.texture, epoch, global_step) def occ_eval_fn(x): density, _ = self.geometry(x) # approximate for 1 - torch.exp(-density[...,None] * self.render_step_size) based on taylor series return density[...,None] * self.render_step_size if self.training and self.config.grid_prune: self.occupancy_grid.every_n_step(step=global_step, occ_eval_fn=occ_eval_fn) def isosurface(self): mesh = self.geometry.isosurface() return mesh def forward_(self, rays): n_rays = rays.shape[0] rays_o, rays_d = rays[:, 0:3], rays[:, 3:6] # both (N_rays, 3) def sigma_fn(t_starts, t_ends, ray_indices): ray_indices = ray_indices.long() t_origins = rays_o[ray_indices] t_dirs = rays_d[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends) / 2. density, _ = self.geometry(positions) return density[...,None] def rgb_sigma_fn(t_starts, t_ends, ray_indices): ray_indices = ray_indices.long() t_origins = rays_o[ray_indices] t_dirs = rays_d[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends) / 2. density, feature = self.geometry(positions) rgb = self.texture(feature, t_dirs) return rgb, density[...,None] with torch.no_grad(): ray_indices, t_starts, t_ends = ray_marching( rays_o, rays_d, scene_aabb=None if self.config.learned_background else self.scene_aabb, grid=self.occupancy_grid if self.config.grid_prune else None, sigma_fn=sigma_fn, near_plane=self.near_plane, far_plane=self.far_plane, render_step_size=self.render_step_size, stratified=self.randomized, cone_angle=self.cone_angle, alpha_thre=0.0 ) ray_indices = ray_indices.long() t_origins = rays_o[ray_indices] t_dirs = rays_d[ray_indices] midpoints = (t_starts + t_ends) / 2. positions = t_origins + t_dirs * midpoints intervals = t_ends - t_starts density, feature = self.geometry(positions) rgb = self.texture(feature, t_dirs) weights = render_weight_from_density(t_starts, t_ends, density[...,None], ray_indices=ray_indices, n_rays=n_rays) opacity = accumulate_along_rays(weights, ray_indices, values=None, n_rays=n_rays) depth = accumulate_along_rays(weights, ray_indices, values=midpoints, n_rays=n_rays) comp_rgb = accumulate_along_rays(weights, ray_indices, values=rgb, n_rays=n_rays) comp_rgb = comp_rgb + self.background_color * (1.0 - opacity) out = { 'comp_rgb': comp_rgb, 'opacity': opacity, 'depth': depth, 'rays_valid': opacity > 0, 'num_samples': torch.as_tensor([len(t_starts)], dtype=torch.int32, device=rays.device) } if self.training: out.update({ 'weights': weights.view(-1), 'points': midpoints.view(-1), 'intervals': intervals.view(-1), 'ray_indices': ray_indices.view(-1) }) return out def forward(self, rays): if self.training: out = self.forward_(rays) else:
out = chunk_batch(self.forward_, self.config.ray_chunk, True, rays)
1
2023-10-22 02:53:17+00:00
4k
HKUDS/GraphGPT
graphgpt/serve/model_worker_graph.py
[ { "identifier": "WORKER_HEART_BEAT_INTERVAL", "path": "graphgpt/constants.py", "snippet": "WORKER_HEART_BEAT_INTERVAL = int(os.getenv(\"FASTCHAT_WORKER_HEART_BEAT_INTERVAL\", 30))" }, { "identifier": "build_logger", "path": "graphgpt/utils.py", "snippet": "def build_logger(logger_name, logger_filename):\n def __init__(self, logger, log_level=logging.INFO):\n def __getattr__(self, attr):\n def write(self, buf):\n def flush(self):\ndef disable_torch_init():\ndef get_gpu_memory(max_gpus=None):\ndef violates_moderation(text):\ndef clean_flant5_ckpt(ckpt_path):\ndef pretty_print_semaphore(semaphore):\ndef iter_over_async(\n async_gen: AsyncGenerator, event_loop: AbstractEventLoop\n) -> Generator:\n async def get_next():\ndef detect_language(text: str) -> str:\nclass StreamToLogger(object):" }, { "identifier": "load_pretrained_model", "path": "graphgpt/model/builder.py", "snippet": "def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map=\"auto\"):\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n\n if 'graphchat' in model_name.lower():\n # Load LLaVA model\n if 'lora' in model_name.lower() and model_base is not None:\n lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n print('Loading LLaVA from base model...')\n model = GraphLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features\n if model.lm_head.weight.shape[0] != token_num:\n model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n\n print('Loading additional LLaVA weights...')\n if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):\n non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')\n else:\n # this is probably from HF Hub\n from huggingface_hub import hf_hub_download\n def load_from_hf(repo_id, filename, subfolder=None):\n cache_file = hf_hub_download(\n repo_id=repo_id,\n filename=filename,\n subfolder=subfolder)\n return torch.load(cache_file, map_location='cpu')\n non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')\n non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}\n if any(k.startswith('model.model.') for k in non_lora_trainables):\n non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}\n model.load_state_dict(non_lora_trainables, strict=False)\n\n from peft import PeftModel\n print('Loading LoRA weights...')\n model = PeftModel.from_pretrained(model, model_path)\n print('Merging LoRA weights...')\n model = model.merge_and_unload()\n print('Model is loaded...')\n elif model_base is not None:\n # this may be mm projector only\n print('Loading LLaVA from base model...')\n if 'mpt' in model_name.lower():\n if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):\n shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)\n cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n\n mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')\n mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}\n model.load_state_dict(mm_projector_weights, strict=False)\n else:\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n # Load language model\n if model_base is not None:\n # PEFT model\n from peft import PeftModel\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=\"auto\")\n print(f\"Loading LoRA weights from {model_path}\")\n model = PeftModel.from_pretrained(model, model_path)\n print(f\"Merging weights\")\n model = model.merge_and_unload()\n print('Convert to FP16...')\n model.to(torch.float16)\n else:\n use_fast = False\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n\n image_processor = None\n\n if 'llava' in model_name.lower():\n mm_use_im_start_end = getattr(model.config, \"mm_use_im_start_end\", False)\n mm_use_im_patch_token = getattr(model.config, \"mm_use_im_patch_token\", True)\n if mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n if mm_use_im_start_end:\n tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n model.resize_token_embeddings(len(tokenizer))\n\n vision_tower = model.get_vision_tower()\n if not vision_tower.is_loaded:\n vision_tower.load_model()\n vision_tower.to(device='cuda', dtype=torch.float16)\n image_processor = vision_tower.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "graphgpt/constants.py", "snippet": "SERVER_ERROR_MSG = (\n \"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\"\n)\nMODERATION_MSG = \"YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE FIX YOUR INPUT AND TRY AGAIN.\"\nCONVERSATION_LIMIT_MSG = \"YOU HAVE REACHED THE CONVERSATION LENGTH LIMIT. PLEASE CLEAR HISTORY AND START A NEW CONVERSATION.\"\nINPUT_CHAR_LEN_LIMIT = 2560\nCONVERSATION_LEN_LIMIT = 50\nLOGDIR = \".\"\nCONTROLLER_HEART_BEAT_EXPIRATION = int(\n os.getenv(\"FASTCHAT_CONTROLLER_HEART_BEAT_EXPIRATION\", 90)\n)\nWORKER_HEART_BEAT_INTERVAL = int(os.getenv(\"FASTCHAT_WORKER_HEART_BEAT_INTERVAL\", 30))\nWORKER_API_TIMEOUT = int(os.getenv(\"FASTCHAT_WORKER_API_TIMEOUT\", 100))\nWORKER_API_EMBEDDING_BATCH_SIZE = int(os.getenv(\"WORKER_API_EMBEDDING_BATCH_SIZE\", 4))\n VALIDATION_TYPE_ERROR = 40001\n INVALID_AUTH_KEY = 40101\n INCORRECT_AUTH_KEY = 40102\n NO_PERMISSION = 40103\n INVALID_MODEL = 40301\n PARAM_OUT_OF_RANGE = 40302\n CONTEXT_OVERFLOW = 40303\n RATE_LIMIT = 42901\n QUOTA_EXCEEDED = 42902\n ENGINE_OVERLOADED = 42903\n INTERNAL_ERROR = 50001\n CUDA_OUT_OF_MEMORY = 50002\n GRADIO_REQUEST_ERROR = 50003\n GRADIO_STREAM_UNKNOWN_ERROR = 50004\n CONTROLLER_NO_WORKER = 50005\n CONTROLLER_WORKER_TIMEOUT = 50006\nDEFAULT_GRAPH_TOKEN = \"<graph>\"\nDEFAULT_GRAPH_PATCH_TOKEN = \"<g_patch>\"\nDEFAULT_G_START_TOKEN = \"<g_start>\"\nDEFAULT_G_END_TOKEN = \"<g_end>\"\nclass ErrorCode(IntEnum):" } ]
import argparse import asyncio import json import time import threading import uuid import requests import torch import uvicorn from fastapi import FastAPI, Request, BackgroundTasks from fastapi.responses import StreamingResponse from functools import partial from graphgpt.constants import WORKER_HEART_BEAT_INTERVAL from graphgpt.utils import (build_logger, server_error_msg, pretty_print_semaphore) from graphgpt.model.builder import load_pretrained_model from graphgpt.mm_utils import process_images, load_image_from_base64, tokenizer_image_token, KeywordsStoppingCriteria from graphgpt.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from transformers import TextIteratorStreamer from threading import Thread
3,374
GB = 1 << 30 worker_id = str(uuid.uuid4())[:6] logger = build_logger("model_worker", f"model_worker_{worker_id}.log") global_counter = 0 model_semaphore = None def heart_beat_worker(controller): while True: time.sleep(WORKER_HEART_BEAT_INTERVAL) controller.send_heart_beat() class ModelWorker: def __init__(self, controller_addr, worker_addr, worker_id, no_register, model_path, model_base, model_name, load_8bit, load_4bit, device): self.controller_addr = controller_addr self.worker_addr = worker_addr self.worker_id = worker_id if model_path.endswith("/"): model_path = model_path[:-1] if model_name is None: model_paths = model_path.split("/") if model_paths[-1].startswith('checkpoint-'): self.model_name = model_paths[-2] + "_" + model_paths[-1] else: self.model_name = model_paths[-1] else: self.model_name = model_name self.device = device logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...") self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model( model_path, model_base, self.model_name, load_8bit, load_4bit, device=self.device) self.is_multimodal = 'llava' in self.model_name.lower() if not no_register: self.register_to_controller() self.heart_beat_thread = threading.Thread( target=heart_beat_worker, args=(self,)) self.heart_beat_thread.start() def register_to_controller(self): logger.info("Register to controller") url = self.controller_addr + "/register_worker" data = { "worker_name": self.worker_addr, "check_heart_beat": True, "worker_status": self.get_status() } r = requests.post(url, json=data) assert r.status_code == 200 def send_heart_beat(self): logger.info(f"Send heart beat. Models: {[self.model_name]}. " f"Semaphore: {pretty_print_semaphore(model_semaphore)}. " f"global_counter: {global_counter}") url = self.controller_addr + "/receive_heart_beat" while True: try: ret = requests.post(url, json={ "worker_name": self.worker_addr, "queue_length": self.get_queue_length()}, timeout=5) exist = ret.json()["exist"] break except requests.exceptions.RequestException as e: logger.error(f"heart beat error: {e}") time.sleep(5) if not exist: self.register_to_controller() def get_queue_length(self): if model_semaphore is None: return 0 else: return args.limit_model_concurrency - model_semaphore._value + (len( model_semaphore._waiters) if model_semaphore._waiters is not None else 0) def get_status(self): return { "model_names": [self.model_name], "speed": 1, "queue_length": self.get_queue_length(), } @torch.inference_mode() def generate_stream(self, params): tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor prompt = params["prompt"] ori_prompt = prompt images = params.get("images", None) num_image_tokens = 0 if images is not None and len(images) > 0 and self.is_multimodal: if len(images) > 0: if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN): raise ValueError("Number of images does not match number of <image> tokens in prompt") images = [load_image_from_base64(image) for image in images] images = process_images(images, image_processor, model.config) if type(images) is list: images = [image.to(self.model.device, dtype=torch.float16) for image in images] else: images = images.to(self.model.device, dtype=torch.float16) replace_token = DEFAULT_IMAGE_TOKEN if getattr(self.model.config, 'mm_use_im_start_end', False):
""" A model worker executes the model. """ GB = 1 << 30 worker_id = str(uuid.uuid4())[:6] logger = build_logger("model_worker", f"model_worker_{worker_id}.log") global_counter = 0 model_semaphore = None def heart_beat_worker(controller): while True: time.sleep(WORKER_HEART_BEAT_INTERVAL) controller.send_heart_beat() class ModelWorker: def __init__(self, controller_addr, worker_addr, worker_id, no_register, model_path, model_base, model_name, load_8bit, load_4bit, device): self.controller_addr = controller_addr self.worker_addr = worker_addr self.worker_id = worker_id if model_path.endswith("/"): model_path = model_path[:-1] if model_name is None: model_paths = model_path.split("/") if model_paths[-1].startswith('checkpoint-'): self.model_name = model_paths[-2] + "_" + model_paths[-1] else: self.model_name = model_paths[-1] else: self.model_name = model_name self.device = device logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...") self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model( model_path, model_base, self.model_name, load_8bit, load_4bit, device=self.device) self.is_multimodal = 'llava' in self.model_name.lower() if not no_register: self.register_to_controller() self.heart_beat_thread = threading.Thread( target=heart_beat_worker, args=(self,)) self.heart_beat_thread.start() def register_to_controller(self): logger.info("Register to controller") url = self.controller_addr + "/register_worker" data = { "worker_name": self.worker_addr, "check_heart_beat": True, "worker_status": self.get_status() } r = requests.post(url, json=data) assert r.status_code == 200 def send_heart_beat(self): logger.info(f"Send heart beat. Models: {[self.model_name]}. " f"Semaphore: {pretty_print_semaphore(model_semaphore)}. " f"global_counter: {global_counter}") url = self.controller_addr + "/receive_heart_beat" while True: try: ret = requests.post(url, json={ "worker_name": self.worker_addr, "queue_length": self.get_queue_length()}, timeout=5) exist = ret.json()["exist"] break except requests.exceptions.RequestException as e: logger.error(f"heart beat error: {e}") time.sleep(5) if not exist: self.register_to_controller() def get_queue_length(self): if model_semaphore is None: return 0 else: return args.limit_model_concurrency - model_semaphore._value + (len( model_semaphore._waiters) if model_semaphore._waiters is not None else 0) def get_status(self): return { "model_names": [self.model_name], "speed": 1, "queue_length": self.get_queue_length(), } @torch.inference_mode() def generate_stream(self, params): tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor prompt = params["prompt"] ori_prompt = prompt images = params.get("images", None) num_image_tokens = 0 if images is not None and len(images) > 0 and self.is_multimodal: if len(images) > 0: if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN): raise ValueError("Number of images does not match number of <image> tokens in prompt") images = [load_image_from_base64(image) for image in images] images = process_images(images, image_processor, model.config) if type(images) is list: images = [image.to(self.model.device, dtype=torch.float16) for image in images] else: images = images.to(self.model.device, dtype=torch.float16) replace_token = DEFAULT_IMAGE_TOKEN if getattr(self.model.config, 'mm_use_im_start_end', False):
replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
3
2023-10-15 05:13:24+00:00
4k
hkchengrex/Cutie
cutie/model/cutie.py
[ { "identifier": "AuxComputer", "path": "cutie/model/aux_modules.py", "snippet": "class AuxComputer(nn.Module):\n def __init__(self, cfg: DictConfig):\n super().__init__()\n\n use_sensory_aux = cfg.model.aux_loss.sensory.enabled\n self.use_query_aux = cfg.model.aux_loss.query.enabled\n\n sensory_dim = cfg.model.sensory_dim\n embed_dim = cfg.model.embed_dim\n\n if use_sensory_aux:\n self.sensory_aux = LinearPredictor(sensory_dim, embed_dim)\n else:\n self.sensory_aux = None\n\n def _aggregate_with_selector(self, logits: torch.Tensor, selector: torch.Tensor) -> torch.Tensor:\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n logits = aggregate(prob, dim=1)\n return logits\n\n def forward(self, pix_feat: torch.Tensor, aux_input: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n sensory = aux_input['sensory']\n q_logits = aux_input['q_logits']\n\n aux_output = {}\n aux_output['attn_mask'] = aux_input['attn_mask']\n\n if self.sensory_aux is not None:\n # B*num_objects*H*W\n logits = self.sensory_aux(pix_feat, sensory)\n aux_output['sensory_logits'] = self._aggregate_with_selector(logits, selector)\n if self.use_query_aux:\n # B*num_objects*num_levels*H*W\n aux_output['q_logits'] = self._aggregate_with_selector(\n torch.stack(q_logits, dim=2),\n selector.unsqueeze(2) if selector is not None else None)\n\n return aux_output" }, { "identifier": "QueryTransformer", "path": "cutie/model/transformer/object_transformer.py", "snippet": "class QueryTransformer(nn.Module):\n def __init__(self, model_cfg: DictConfig):\n super().__init__()\n\n this_cfg = model_cfg.object_transformer\n self.value_dim = model_cfg.value_dim\n self.embed_dim = this_cfg.embed_dim\n self.num_heads = this_cfg.num_heads\n self.num_queries = this_cfg.num_queries\n\n # query initialization and embedding\n self.query_init = nn.Embedding(self.num_queries, self.embed_dim)\n self.query_emb = nn.Embedding(self.num_queries, self.embed_dim)\n\n # projection from object summaries to query initialization and embedding\n self.summary_to_query_init = nn.Linear(self.embed_dim, self.embed_dim)\n self.summary_to_query_emb = nn.Linear(self.embed_dim, self.embed_dim)\n\n self.pixel_pe_scale = model_cfg.pixel_pe_scale\n self.pixel_pe_temperature = model_cfg.pixel_pe_temperature\n self.pixel_init_proj = GConv2d(self.embed_dim, self.embed_dim, kernel_size=1)\n self.pixel_emb_proj = GConv2d(self.embed_dim, self.embed_dim, kernel_size=1)\n self.spatial_pe = PositionalEncoding(self.embed_dim,\n scale=self.pixel_pe_scale,\n temperature=self.pixel_pe_temperature,\n channel_last=False,\n transpose_output=True)\n\n # transformer blocks\n self.num_blocks = this_cfg.num_blocks\n self.blocks = nn.ModuleList(\n QueryTransformerBlock(model_cfg) for _ in range(self.num_blocks))\n self.mask_pred = nn.ModuleList(\n nn.Sequential(nn.ReLU(), GConv2d(self.embed_dim, 1, kernel_size=1))\n for _ in range(self.num_blocks + 1))\n\n self.act = nn.ReLU(inplace=True)\n\n def forward(self,\n pixel: torch.Tensor,\n obj_summaries: torch.Tensor,\n selector: Optional[torch.Tensor] = None,\n need_weights: bool = False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n # pixel: B*num_objects*embed_dim*H*W\n # obj_summaries: B*num_objects*T*num_queries*embed_dim\n T = obj_summaries.shape[2]\n bs, num_objects, _, H, W = pixel.shape\n\n # normalize object values\n # the last channel is the cumulative area of the object\n obj_summaries = obj_summaries.view(bs * num_objects, T, self.num_queries,\n self.embed_dim + 1)\n # sum over time\n # during inference, T=1 as we already did streaming average in memory_manager\n obj_sums = obj_summaries[:, :, :, :-1].sum(dim=1)\n obj_area = obj_summaries[:, :, :, -1:].sum(dim=1)\n obj_values = obj_sums / (obj_area + 1e-4)\n obj_init = self.summary_to_query_init(obj_values)\n obj_emb = self.summary_to_query_emb(obj_values)\n\n # positional embeddings for object queries\n query = self.query_init.weight.unsqueeze(0).expand(bs * num_objects, -1, -1) + obj_init\n query_emb = self.query_emb.weight.unsqueeze(0).expand(bs * num_objects, -1, -1) + obj_emb\n\n # positional embeddings for pixel features\n pixel_init = self.pixel_init_proj(pixel)\n pixel_emb = self.pixel_emb_proj(pixel)\n pixel_pe = self.spatial_pe(pixel.flatten(0, 1))\n pixel_emb = pixel_emb.flatten(3, 4).flatten(0, 1).transpose(1, 2).contiguous()\n pixel_pe = pixel_pe.flatten(1, 2) + pixel_emb\n\n pixel = pixel_init\n\n # run the transformer\n aux_features = {'logits': []}\n\n # first aux output\n aux_logits = self.mask_pred[0](pixel).squeeze(2)\n attn_mask = self._get_aux_mask(aux_logits, selector)\n aux_features['logits'].append(aux_logits)\n for i in range(self.num_blocks):\n query, pixel, q_weights, p_weights = self.blocks[i](query,\n pixel,\n query_emb,\n pixel_pe,\n attn_mask,\n need_weights=need_weights)\n\n if self.training or i <= self.num_blocks - 1 or need_weights:\n aux_logits = self.mask_pred[i + 1](pixel).squeeze(2)\n attn_mask = self._get_aux_mask(aux_logits, selector)\n aux_features['logits'].append(aux_logits)\n\n aux_features['q_weights'] = q_weights # last layer only\n aux_features['p_weights'] = p_weights # last layer only\n\n if self.training:\n # no need to save all heads\n aux_features['attn_mask'] = attn_mask.view(bs, num_objects, self.num_heads,\n self.num_queries, H, W)[:, :, 0]\n\n return pixel, aux_features\n\n def _get_aux_mask(self, logits: torch.Tensor, selector: torch.Tensor) -> torch.Tensor:\n # logits: batch_size*num_objects*H*W\n # selector: batch_size*num_objects*1*1\n # returns a mask of shape (batch_size*num_objects*num_heads)*num_queries*(H*W)\n # where True means the attention is blocked\n\n if selector is None:\n prob = logits.sigmoid()\n else:\n prob = logits.sigmoid() * selector\n logits = aggregate(prob, dim=1)\n\n is_foreground = (logits[:, 1:] >= logits.max(dim=1, keepdim=True)[0])\n foreground_mask = is_foreground.bool().flatten(start_dim=2)\n inv_foreground_mask = ~foreground_mask\n inv_background_mask = foreground_mask\n\n aux_foreground_mask = inv_foreground_mask.unsqueeze(2).unsqueeze(2).repeat(\n 1, 1, self.num_heads, self.num_queries // 2, 1).flatten(start_dim=0, end_dim=2)\n aux_background_mask = inv_background_mask.unsqueeze(2).unsqueeze(2).repeat(\n 1, 1, self.num_heads, self.num_queries // 2, 1).flatten(start_dim=0, end_dim=2)\n\n aux_mask = torch.cat([aux_foreground_mask, aux_background_mask], dim=1)\n\n aux_mask[torch.where(aux_mask.sum(-1) == aux_mask.shape[-1])] = False\n\n return aux_mask" }, { "identifier": "ObjectSummarizer", "path": "cutie/model/transformer/object_summarizer.py", "snippet": "class ObjectSummarizer(nn.Module):\n def __init__(self, model_cfg: DictConfig):\n super().__init__()\n\n this_cfg = model_cfg.object_summarizer\n self.value_dim = model_cfg.value_dim\n self.embed_dim = this_cfg.embed_dim\n self.num_summaries = this_cfg.num_summaries\n self.add_pe = this_cfg.add_pe\n self.pixel_pe_scale = model_cfg.pixel_pe_scale\n self.pixel_pe_temperature = model_cfg.pixel_pe_temperature\n\n if self.add_pe:\n self.pos_enc = PositionalEncoding(self.embed_dim,\n scale=self.pixel_pe_scale,\n temperature=self.pixel_pe_temperature)\n\n self.input_proj = nn.Linear(self.value_dim, self.embed_dim)\n self.feature_pred = nn.Sequential(\n nn.Linear(self.embed_dim, self.embed_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.embed_dim, self.embed_dim),\n )\n self.weights_pred = nn.Sequential(\n nn.Linear(self.embed_dim, self.embed_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.embed_dim, self.num_summaries),\n )\n\n def forward(self,\n masks: torch.Tensor,\n value: torch.Tensor,\n need_weights: bool = False) -> (torch.Tensor, Optional[torch.Tensor]):\n # masks: B*num_objects*(H0)*(W0)\n # value: B*num_objects*value_dim*H*W\n # -> B*num_objects*H*W*value_dim\n h, w = value.shape[-2:]\n masks = F.interpolate(masks, size=(h, w), mode='area')\n masks = masks.unsqueeze(-1)\n inv_masks = 1 - masks\n repeated_masks = torch.cat([\n masks.expand(-1, -1, -1, -1, self.num_summaries // 2),\n inv_masks.expand(-1, -1, -1, -1, self.num_summaries // 2),\n ],\n dim=-1)\n\n value = value.permute(0, 1, 3, 4, 2)\n value = self.input_proj(value)\n if self.add_pe:\n pe = self.pos_enc(value)\n value = value + pe\n\n with torch.cuda.amp.autocast(enabled=False):\n value = value.float()\n feature = self.feature_pred(value)\n logits = self.weights_pred(value)\n sums, area = _weighted_pooling(repeated_masks, feature, logits)\n\n summaries = torch.cat([sums, area], dim=-1)\n\n if need_weights:\n return summaries, logits\n else:\n return summaries, None" }, { "identifier": "aggregate", "path": "cutie/utils/tensor_utils.py", "snippet": "def aggregate(prob: torch.Tensor, dim: int) -> torch.Tensor:\n with torch.cuda.amp.autocast(enabled=False):\n prob = prob.float()\n new_prob = torch.cat([torch.prod(1 - prob, dim=dim, keepdim=True), prob],\n dim).clamp(1e-7, 1 - 1e-7)\n logits = torch.log((new_prob / (1 - new_prob)))\n\n return logits" } ]
from typing import List, Dict from omegaconf import DictConfig from cutie.model.modules import * from cutie.model.big_modules import * from cutie.model.aux_modules import AuxComputer from cutie.model.utils.memory_utils import * from cutie.model.transformer.object_transformer import QueryTransformer from cutie.model.transformer.object_summarizer import ObjectSummarizer from cutie.utils.tensor_utils import aggregate import logging import torch import torch.nn as nn
3,036
log = logging.getLogger() class CUTIE(nn.Module): def __init__(self, cfg: DictConfig, *, single_object=False): super().__init__() model_cfg = cfg.model self.ms_dims = model_cfg.pixel_encoder.ms_dims self.key_dim = model_cfg.key_dim self.value_dim = model_cfg.value_dim self.sensory_dim = model_cfg.sensory_dim self.pixel_dim = model_cfg.pixel_dim self.embed_dim = model_cfg.embed_dim self.single_object = single_object log.info(f'Single object: {self.single_object}') self.pixel_encoder = PixelEncoder(model_cfg) self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1) self.key_proj = KeyProjection(model_cfg) self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object) self.mask_decoder = MaskDecoder(model_cfg) self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object) self.object_transformer = QueryTransformer(model_cfg) self.object_summarizer = ObjectSummarizer(model_cfg)
log = logging.getLogger() class CUTIE(nn.Module): def __init__(self, cfg: DictConfig, *, single_object=False): super().__init__() model_cfg = cfg.model self.ms_dims = model_cfg.pixel_encoder.ms_dims self.key_dim = model_cfg.key_dim self.value_dim = model_cfg.value_dim self.sensory_dim = model_cfg.sensory_dim self.pixel_dim = model_cfg.pixel_dim self.embed_dim = model_cfg.embed_dim self.single_object = single_object log.info(f'Single object: {self.single_object}') self.pixel_encoder = PixelEncoder(model_cfg) self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1) self.key_proj = KeyProjection(model_cfg) self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object) self.mask_decoder = MaskDecoder(model_cfg) self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object) self.object_transformer = QueryTransformer(model_cfg) self.object_summarizer = ObjectSummarizer(model_cfg)
self.aux_computer = AuxComputer(cfg)
0
2023-10-19 17:49:24+00:00
4k
DeepGraphLearning/ULTRA
ultra/util.py
[ { "identifier": "models", "path": "ultra/models.py", "snippet": "class Ultra(nn.Module):\nclass RelNBFNet(BaseNBFNet):\nclass EntityNBFNet(BaseNBFNet):\n def __init__(self, rel_model_cfg, entity_model_cfg):\n def forward(self, data, batch):\n def __init__(self, input_dim, hidden_dims, num_relation=4, **kwargs):\n def bellmanford(self, data, h_index, separate_grad=False):\n def forward(self, rel_graph, query):\n def __init__(self, input_dim, hidden_dims, num_relation=1, **kwargs):\n def bellmanford(self, data, h_index, r_index, separate_grad=False):\n def forward(self, data, relation_representations, batch):" }, { "identifier": "datasets", "path": "ultra/datasets.py", "snippet": "class GrailInductiveDataset(InMemoryDataset):\nclass FB15k237Inductive(GrailInductiveDataset):\nclass WN18RRInductive(GrailInductiveDataset):\nclass NELLInductive(GrailInductiveDataset):\nclass TransductiveDataset(InMemoryDataset):\nclass CoDEx(TransductiveDataset):\nclass CoDExSmall(CoDEx):\nclass CoDExMedium(CoDEx):\nclass CoDExLarge(CoDEx):\nclass NELL995(TransductiveDataset):\nclass ConceptNet100k(TransductiveDataset):\nclass DBpedia100k(TransductiveDataset):\nclass YAGO310(TransductiveDataset):\nclass Hetionet(TransductiveDataset):\nclass AristoV4(TransductiveDataset):\nclass SparserKG(TransductiveDataset):\nclass WDsinger(SparserKG): \nclass NELL23k(SparserKG): \nclass FB15k237_10(SparserKG): \nclass FB15k237_20(SparserKG): \nclass FB15k237_50(SparserKG): \nclass InductiveDataset(InMemoryDataset):\nclass IngramInductive(InductiveDataset):\nclass FBIngram(IngramInductive):\nclass WKIngram(IngramInductive):\nclass NLIngram(IngramInductive):\nclass ILPC2022(InductiveDataset):\nclass HM(InductiveDataset):\nclass MTDEAInductive(InductiveDataset):\nclass FBNELL(MTDEAInductive):\nclass Metafam(MTDEAInductive):\nclass WikiTopicsMT1(MTDEAInductive):\nclass WikiTopicsMT2(MTDEAInductive):\nclass WikiTopicsMT3(MTDEAInductive):\nclass WikiTopicsMT4(MTDEAInductive):\nclass JointDataset(InMemoryDataset):\n def __init__(self, root, version, transform=None, pre_transform=build_relation_graph, merge_valid_test=True):\n def num_relations(self):\n def raw_dir(self):\n def processed_dir(self):\n def processed_file_names(self):\n def raw_file_names(self):\n def download(self):\n def process(self):\n def __repr__(self):\n def __init__(self, root, version):\n def __init__(self, root, version):\n def __init__(self, root, version):\ndef FB15k237(root):\ndef WN18RR(root):\n def __init__(self, root, transform=None, pre_transform=build_relation_graph, **kwargs):\n def raw_file_names(self):\n def download(self):\n def load_file(self, triplet_file, inv_entity_vocab={}, inv_rel_vocab={}):\n def process(self):\n def __repr__(self):\n def num_relations(self):\n def raw_dir(self):\n def processed_dir(self):\n def processed_file_names(self):\n def download(self):\n def __init__(self, root):\n def __init__(self, root):\n def __init__(self, root):\n def raw_file_names(self):\n def process(self):\n def download(self):\n def raw_dir(self):\n def processed_dir(self):\n def download(self):\n def load_file(self, triplet_file, inv_entity_vocab={}, inv_rel_vocab={}):\n def __init__(self, root, version, transform=None, pre_transform=build_relation_graph, **kwargs):\n def download(self):\n def load_file(self, triplet_file, inv_entity_vocab={}, inv_rel_vocab={}):\n def process(self):\n def num_relations(self):\n def raw_dir(self):\n def processed_dir(self):\n def raw_file_names(self):\n def processed_file_names(self):\n def __repr__(self):\n def raw_dir(self):\n def processed_dir(self):\n def __init__(self, root, version, **kwargs):\n def process(self):\n def __init__(self, root, version, **kwargs):\n def raw_dir(self):\n def processed_dir(self):\n def raw_file_names(self):\n def download(self):\n def load_file(self, triplet_file, inv_entity_vocab={}, inv_rel_vocab={}, limit_vocab=False):\n def process(self):\n def __init__(self, **kwargs):\n def __init__(self, **kwargs):\n def __init__(self, **kwargs):\n def __init__(self, **kwargs):\n def __init__(self, **kwargs):\n def __init__(self, **kwargs):\n def __init__(self, root, graphs, transform=None, pre_transform=None):\n def raw_dir(self):\n def processed_dir(self):\n def processed_file_names(self):\n def process(self):" } ]
import os import sys import ast import copy import time import logging import argparse import yaml import jinja2 import easydict import torch from jinja2 import meta from torch import distributed as dist from torch_geometric.data import Data from torch_geometric.datasets import RelLinkPredDataset, WordNet18RR from ultra import models, datasets
2,154
env = jinja2.Environment() tree = env.parse(raw) vars = meta.find_undeclared_variables(tree) return vars def load_config(cfg_file, context=None): with open(cfg_file, "r") as fin: raw = fin.read() template = jinja2.Template(raw) instance = template.render(context) cfg = yaml.safe_load(instance) cfg = easydict.EasyDict(cfg) return cfg def literal_eval(string): try: return ast.literal_eval(string) except (ValueError, SyntaxError): return string def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("-c", "--config", help="yaml configuration file", required=True) parser.add_argument("-s", "--seed", help="random seed for PyTorch", type=int, default=1024) args, unparsed = parser.parse_known_args() # get dynamic arguments defined in the config file vars = detect_variables(args.config) parser = argparse.ArgumentParser() for var in vars: parser.add_argument("--%s" % var, required=True) vars = parser.parse_known_args(unparsed)[0] vars = {k: literal_eval(v) for k, v in vars._get_kwargs()} return args, vars def get_root_logger(file=True): format = "%(asctime)-10s %(message)s" datefmt = "%H:%M:%S" logging.basicConfig(format=format, datefmt=datefmt) logger = logging.getLogger("") logger.setLevel(logging.INFO) if file: handler = logging.FileHandler("log.txt") format = logging.Formatter(format, datefmt) handler.setFormatter(format) logger.addHandler(handler) return logger def get_rank(): if dist.is_initialized(): return dist.get_rank() if "RANK" in os.environ: return int(os.environ["RANK"]) return 0 def get_world_size(): if dist.is_initialized(): return dist.get_world_size() if "WORLD_SIZE" in os.environ: return int(os.environ["WORLD_SIZE"]) return 1 def synchronize(): if get_world_size() > 1: dist.barrier() def get_device(cfg): if cfg.train.gpus: device = torch.device(cfg.train.gpus[get_rank()]) else: device = torch.device("cpu") return device def create_working_directory(cfg): file_name = "working_dir.tmp" world_size = get_world_size() if cfg.train.gpus is not None and len(cfg.train.gpus) != world_size: error_msg = "World size is %d but found %d GPUs in the argument" if world_size == 1: error_msg += ". Did you launch with `python -m torch.distributed.launch`?" raise ValueError(error_msg % (world_size, len(cfg.train.gpus))) if world_size > 1 and not dist.is_initialized(): dist.init_process_group("nccl", init_method="env://") working_dir = os.path.join(os.path.expanduser(cfg.output_dir), cfg.model["class"], cfg.dataset["class"], time.strftime("%Y-%m-%d-%H-%M-%S")) # synchronize working directory if get_rank() == 0: with open(file_name, "w") as fout: fout.write(working_dir) os.makedirs(working_dir) synchronize() if get_rank() != 0: with open(file_name, "r") as fin: working_dir = fin.read() synchronize() if get_rank() == 0: os.remove(file_name) os.chdir(working_dir) return working_dir def build_dataset(cfg): data_config = copy.deepcopy(cfg.dataset) cls = data_config.pop("class")
logger = logging.getLogger(__file__) def detect_variables(cfg_file): with open(cfg_file, "r") as fin: raw = fin.read() env = jinja2.Environment() tree = env.parse(raw) vars = meta.find_undeclared_variables(tree) return vars def load_config(cfg_file, context=None): with open(cfg_file, "r") as fin: raw = fin.read() template = jinja2.Template(raw) instance = template.render(context) cfg = yaml.safe_load(instance) cfg = easydict.EasyDict(cfg) return cfg def literal_eval(string): try: return ast.literal_eval(string) except (ValueError, SyntaxError): return string def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("-c", "--config", help="yaml configuration file", required=True) parser.add_argument("-s", "--seed", help="random seed for PyTorch", type=int, default=1024) args, unparsed = parser.parse_known_args() # get dynamic arguments defined in the config file vars = detect_variables(args.config) parser = argparse.ArgumentParser() for var in vars: parser.add_argument("--%s" % var, required=True) vars = parser.parse_known_args(unparsed)[0] vars = {k: literal_eval(v) for k, v in vars._get_kwargs()} return args, vars def get_root_logger(file=True): format = "%(asctime)-10s %(message)s" datefmt = "%H:%M:%S" logging.basicConfig(format=format, datefmt=datefmt) logger = logging.getLogger("") logger.setLevel(logging.INFO) if file: handler = logging.FileHandler("log.txt") format = logging.Formatter(format, datefmt) handler.setFormatter(format) logger.addHandler(handler) return logger def get_rank(): if dist.is_initialized(): return dist.get_rank() if "RANK" in os.environ: return int(os.environ["RANK"]) return 0 def get_world_size(): if dist.is_initialized(): return dist.get_world_size() if "WORLD_SIZE" in os.environ: return int(os.environ["WORLD_SIZE"]) return 1 def synchronize(): if get_world_size() > 1: dist.barrier() def get_device(cfg): if cfg.train.gpus: device = torch.device(cfg.train.gpus[get_rank()]) else: device = torch.device("cpu") return device def create_working_directory(cfg): file_name = "working_dir.tmp" world_size = get_world_size() if cfg.train.gpus is not None and len(cfg.train.gpus) != world_size: error_msg = "World size is %d but found %d GPUs in the argument" if world_size == 1: error_msg += ". Did you launch with `python -m torch.distributed.launch`?" raise ValueError(error_msg % (world_size, len(cfg.train.gpus))) if world_size > 1 and not dist.is_initialized(): dist.init_process_group("nccl", init_method="env://") working_dir = os.path.join(os.path.expanduser(cfg.output_dir), cfg.model["class"], cfg.dataset["class"], time.strftime("%Y-%m-%d-%H-%M-%S")) # synchronize working directory if get_rank() == 0: with open(file_name, "w") as fout: fout.write(working_dir) os.makedirs(working_dir) synchronize() if get_rank() != 0: with open(file_name, "r") as fin: working_dir = fin.read() synchronize() if get_rank() == 0: os.remove(file_name) os.chdir(working_dir) return working_dir def build_dataset(cfg): data_config = copy.deepcopy(cfg.dataset) cls = data_config.pop("class")
ds_cls = getattr(datasets, cls)
1
2023-10-23 17:06:10+00:00
4k
ZhengyiLuo/PerpetualHumanoidControl
phc/learning/im_amp.py
[ { "identifier": "RunningMeanStd", "path": "phc/utils/running_mean_std.py", "snippet": "class RunningMeanStd(nn.Module):\n\n def __init__(self,\n insize,\n epsilon=1e-05,\n per_channel=False,\n norm_only=False):\n super(RunningMeanStd, self).__init__()\n print('RunningMeanStd: ', insize)\n self.insize = insize\n self.mean_size = insize[0]\n self.epsilon = epsilon\n\n self.norm_only = norm_only\n self.per_channel = per_channel\n if per_channel:\n if len(self.insize) == 3:\n self.axis = [0, 2, 3]\n if len(self.insize) == 2:\n self.axis = [0, 2]\n if len(self.insize) == 1:\n self.axis = [0]\n in_size = self.insize[0]\n else:\n self.axis = [0]\n in_size = insize\n\n self.register_buffer(\"running_mean\",\n torch.zeros(in_size, dtype=torch.float64))\n self.register_buffer(\"running_var\",\n torch.ones(in_size, dtype=torch.float64))\n self.register_buffer(\"count\", torch.ones((), dtype=torch.float64))\n\n self.forzen = False\n self.forzen_partial = False\n\n def freeze(self):\n self.forzen = True\n\n def unfreeze(self):\n self.forzen = False\n\n def freeze_partial(self, diff):\n self.forzen_partial = True\n self.diff = diff\n\n\n def _update_mean_var_count_from_moments(self, mean, var, count, batch_mean,\n batch_var, batch_count):\n delta = batch_mean - mean\n tot_count = count + batch_count\n\n new_mean = mean + delta * batch_count / tot_count\n m_a = var * count\n m_b = batch_var * batch_count\n M2 = m_a + m_b + delta**2 * count * batch_count / tot_count\n new_var = M2 / tot_count\n new_count = tot_count\n return new_mean, new_var, new_count\n\n def forward(self, input, unnorm=False):\n # change shape\n if self.per_channel:\n if len(self.insize) == 3:\n current_mean = self.running_mean.view(\n [1, self.insize[0], 1, 1]).expand_as(input)\n current_var = self.running_var.view([1, self.insize[0], 1,1]).expand_as(input)\n if len(self.insize) == 2:\n current_mean = self.running_mean.view([1, self.insize[0],1]).expand_as(input)\n current_var = self.running_var.view([1, self.insize[0],1]).expand_as(input)\n if len(self.insize) == 1:\n current_mean = self.running_mean.view([1, self.insize[0]]).expand_as(input)\n current_var = self.running_var.view([1, self.insize[0]]).expand_as(input)\n else:\n current_mean = self.running_mean\n current_var = self.running_var\n # get output\n\n if unnorm:\n y = torch.clamp(input, min=-5.0, max=5.0)\n y = torch.sqrt(current_var.float() +\n self.epsilon) * y + current_mean.float()\n else:\n if self.norm_only:\n y = input / torch.sqrt(current_var.float() + self.epsilon)\n else:\n y = (input - current_mean.float()) / torch.sqrt(current_var.float() + self.epsilon)\n y = torch.clamp(y, min=-5.0, max=5.0)\n\n # update After normalization, so that the values used for training and testing are the same.\n if self.training and not self.forzen:\n mean = input.mean(self.axis) # along channel axis\n var = input.var(self.axis)\n new_mean, new_var, new_count = self._update_mean_var_count_from_moments(self.running_mean, self.running_var, self.count, mean, var, input.size()[0])\n if self.forzen_partial:\n # Only update the last bit (futures)\n self.running_mean[-self.diff:], self.running_var[-self.diff:], self.count = new_mean[-self.diff:], new_var[-self.diff:], new_count\n else:\n self.running_mean, self.running_var, self.count = new_mean, new_var, new_count\n\n return y" }, { "identifier": "HumanoidAMPTask", "path": "phc/env/tasks/humanoid_amp_task.py", "snippet": "class HumanoidAMPTask(humanoid_amp.HumanoidAMP):\n def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless):\n self._enable_task_obs = cfg[\"env\"][\"enableTaskObs\"]\n\n super().__init__(cfg=cfg,\n sim_params=sim_params,\n physics_engine=physics_engine,\n device_type=device_type,\n device_id=device_id,\n headless=headless)\n self.has_task = True\n return\n\n\n def get_obs_size(self):\n obs_size = super().get_obs_size()\n if (self._enable_task_obs):\n task_obs_size = self.get_task_obs_size()\n obs_size += task_obs_size\n return obs_size\n\n def get_task_obs_size(self):\n return 0\n\n def pre_physics_step(self, actions):\n super().pre_physics_step(actions)\n self._update_task()\n \n return\n\n def render(self, sync_frame_time=False):\n super().render(sync_frame_time)\n\n if self.viewer or flags.server_mode:\n self._draw_task()\n return\n\n def _update_task(self):\n return\n\n def _reset_envs(self, env_ids):\n super()._reset_envs(env_ids)\n self._reset_task(env_ids)\n return\n\n def _reset_task(self, env_ids):\n return\n\n def _compute_observations(self, env_ids=None):\n # env_ids is used for resetting\n if env_ids is None:\n env_ids = torch.arange(self.num_envs).to(self.device)\n humanoid_obs = self._compute_humanoid_obs(env_ids)\n\n if (self._enable_task_obs):\n task_obs = self._compute_task_obs(env_ids)\n obs = torch.cat([humanoid_obs, task_obs], dim=-1)\n else:\n obs = humanoid_obs\n \n \n if self.obs_v == 2:\n # Double sub will return a copy.\n B, N = obs.shape\n sums = self.obs_buf[env_ids, 0:self.past_track_steps].abs().sum(dim=1)\n zeros = sums == 0\n nonzero = ~zeros\n obs_slice = self.obs_buf[env_ids]\n obs_slice[zeros] = torch.tile(obs[zeros], (1, self.past_track_steps))\n obs_slice[nonzero] = torch.cat([obs_slice[nonzero, N:], obs[nonzero]], dim=-1)\n self.obs_buf[env_ids] = obs_slice\n else:\n self.obs_buf[env_ids] = obs\n\n return\n\n def _compute_task_obs(self, env_ids=None):\n return NotImplemented\n\n def _compute_reward(self, actions):\n return NotImplemented\n\n def _draw_task(self):\n return" }, { "identifier": "flags", "path": "phc/utils/flags.py", "snippet": "class Flags(object):\n def __init__(self, items):" }, { "identifier": "compute_metrics_lite", "path": "uhc/smpllib/smpl_eval.py", "snippet": "def compute_metrics_lite(pred_pos_all, gt_pos_all, root_idx = 0, use_tqdm = True, concatenate = True):\n metrics = defaultdict(list)\n if use_tqdm:\n pbar = tqdm(range(len(pred_pos_all)))\n else:\n pbar = range(len(pred_pos_all))\n \n for idx in pbar:\n jpos_pred = pred_pos_all[idx].copy()\n jpos_gt = gt_pos_all[idx].copy()\n mpjpe_g = np.linalg.norm(jpos_gt - jpos_pred, axis=2) * 1000\n \n\n vel_dist = (compute_error_vel(jpos_pred, jpos_gt)) * 1000\n accel_dist = (compute_error_accel(jpos_pred, jpos_gt)) * 1000\n\n jpos_pred = jpos_pred - jpos_pred[:, [root_idx]] # zero out root\n jpos_gt = jpos_gt - jpos_gt[:, [root_idx]]\n\n pa_mpjpe = p_mpjpe(jpos_pred, jpos_gt) * 1000\n mpjpe = np.linalg.norm(jpos_pred - jpos_gt, axis=2)* 1000\n \n metrics[\"mpjpe_g\"].append(mpjpe_g)\n metrics[\"mpjpe_l\"].append(mpjpe)\n metrics[\"mpjpe_pa\"].append(pa_mpjpe)\n metrics[\"accel_dist\"].append(accel_dist)\n metrics[\"vel_dist\"].append(vel_dist)\n \n if concatenate:\n metrics = {k:np.concatenate(v) for k, v in metrics.items()}\n return metrics" } ]
import glob import os import sys import pdb import os.path as osp import time import numpy as np import torch import learning.replay_buffer as replay_buffer import phc.learning.amp_agent as amp_agent import joblib import gc from phc.utils.running_mean_std import RunningMeanStd from rl_games.algos_torch import torch_ext from rl_games.common import a2c_common from rl_games.common import schedulers from rl_games.common import vecenv from isaacgym.torch_utils import * from datetime import datetime from torch import optim from torch import nn from phc.env.tasks.humanoid_amp_task import HumanoidAMPTask from phc.utils.flags import flags from rl_games.common.tr_helpers import unsqueeze_obs from rl_games.algos_torch.players import rescale_actions from tensorboardX import SummaryWriter from uhc.smpllib.smpl_eval import compute_metrics_lite from tqdm import tqdm
3,162
sys.path.append(os.getcwd()) class IMAmpAgent(amp_agent.AMPAgent): def __init__(self, base_name, config): super().__init__(base_name, config) def get_action(self, obs_dict, is_determenistic=False): obs = obs_dict["obs"] if self.has_batch_dimension == False: obs = unsqueeze_obs(obs) obs = self._preproc_obs(obs) input_dict = { "is_train": False, "prev_actions": None, "obs": obs, "rnn_states": self.states, } with torch.no_grad(): res_dict = self.model(input_dict) mu = res_dict["mus"] action = res_dict["actions"] self.states = res_dict["rnn_states"] if is_determenistic: current_action = mu else: current_action = action if self.has_batch_dimension == False: current_action = torch.squeeze(current_action.detach()) if self.clip_actions: return rescale_actions( self.actions_low, self.actions_high, torch.clamp(current_action, -1.0, 1.0), ) else: return current_action def env_eval_step(self, env, actions): if not self.is_tensor_obses: actions = actions.cpu().numpy() obs, rewards, dones, infos = env.step(actions) if hasattr(obs, "dtype") and obs.dtype == np.float64: obs = np.float32(obs) if self.value_size > 1: rewards = rewards[0] if self.is_tensor_obses: return obs, rewards.to(self.device), dones.to(self.device), infos else: if np.isscalar(dones): rewards = np.expand_dims(np.asarray(rewards), 0) dones = np.expand_dims(np.asarray(dones), 0) return ( self.obs_to_torch(obs), torch.from_numpy(rewards), torch.from_numpy(dones), infos, ) def restore(self, fn): super().restore(fn) all_fails = glob.glob(osp.join(self.network_path, f"failed_*")) if len(all_fails) > 0: print("------------------------------------------------------ Restoring Termination History ------------------------------------------------------") failed_pth = sorted(all_fails, key=lambda x: int(x.split("_")[-1].split(".")[0]))[-1] print(f"loading: {failed_pth}") termination_history = joblib.load(failed_pth)['termination_history'] humanoid_env = self.vec_env.env.task res = humanoid_env._motion_lib.update_sampling_prob(termination_history) if res: print("Successfully restored termination history") else: print("Termination history length does not match") return def init_rnn(self): if self.is_rnn: rnn_states = self.model.get_default_rnn_state() self.states = [torch.zeros((s.size()[0], self.vec_env.env.task.num_envs, s.size( )[2]), dtype=torch.float32).to(self.device) for s in rnn_states] def update_training_data(self, failed_keys): humanoid_env = self.vec_env.env.task joblib.dump({"failed_keys": failed_keys, "termination_history": humanoid_env._motion_lib._termination_history}, osp.join(self.network_path, f"failed_{self.epoch_num:010d}.pkl")) def eval(self): print("############################ Evaluation ############################")
sys.path.append(os.getcwd()) class IMAmpAgent(amp_agent.AMPAgent): def __init__(self, base_name, config): super().__init__(base_name, config) def get_action(self, obs_dict, is_determenistic=False): obs = obs_dict["obs"] if self.has_batch_dimension == False: obs = unsqueeze_obs(obs) obs = self._preproc_obs(obs) input_dict = { "is_train": False, "prev_actions": None, "obs": obs, "rnn_states": self.states, } with torch.no_grad(): res_dict = self.model(input_dict) mu = res_dict["mus"] action = res_dict["actions"] self.states = res_dict["rnn_states"] if is_determenistic: current_action = mu else: current_action = action if self.has_batch_dimension == False: current_action = torch.squeeze(current_action.detach()) if self.clip_actions: return rescale_actions( self.actions_low, self.actions_high, torch.clamp(current_action, -1.0, 1.0), ) else: return current_action def env_eval_step(self, env, actions): if not self.is_tensor_obses: actions = actions.cpu().numpy() obs, rewards, dones, infos = env.step(actions) if hasattr(obs, "dtype") and obs.dtype == np.float64: obs = np.float32(obs) if self.value_size > 1: rewards = rewards[0] if self.is_tensor_obses: return obs, rewards.to(self.device), dones.to(self.device), infos else: if np.isscalar(dones): rewards = np.expand_dims(np.asarray(rewards), 0) dones = np.expand_dims(np.asarray(dones), 0) return ( self.obs_to_torch(obs), torch.from_numpy(rewards), torch.from_numpy(dones), infos, ) def restore(self, fn): super().restore(fn) all_fails = glob.glob(osp.join(self.network_path, f"failed_*")) if len(all_fails) > 0: print("------------------------------------------------------ Restoring Termination History ------------------------------------------------------") failed_pth = sorted(all_fails, key=lambda x: int(x.split("_")[-1].split(".")[0]))[-1] print(f"loading: {failed_pth}") termination_history = joblib.load(failed_pth)['termination_history'] humanoid_env = self.vec_env.env.task res = humanoid_env._motion_lib.update_sampling_prob(termination_history) if res: print("Successfully restored termination history") else: print("Termination history length does not match") return def init_rnn(self): if self.is_rnn: rnn_states = self.model.get_default_rnn_state() self.states = [torch.zeros((s.size()[0], self.vec_env.env.task.num_envs, s.size( )[2]), dtype=torch.float32).to(self.device) for s in rnn_states] def update_training_data(self, failed_keys): humanoid_env = self.vec_env.env.task joblib.dump({"failed_keys": failed_keys, "termination_history": humanoid_env._motion_lib._termination_history}, osp.join(self.network_path, f"failed_{self.epoch_num:010d}.pkl")) def eval(self): print("############################ Evaluation ############################")
if not flags.has_eval:
2
2023-10-15 19:05:47+00:00
4k
laike9m/Python-Type-Challenges
tests/test_challenge.py
[ { "identifier": "ChallengeKey", "path": "views/challenge.py", "snippet": "class ChallengeKey:\n level: Level\n name: ChallengeName\n\n @classmethod\n def from_str(cls, key: str):\n \"\"\"Create a key object from a string like \"basic-foo\".\"\"\"\n level, name = key.split(\"-\", maxsplit=1)\n return cls(Level(level), name)" }, { "identifier": "ChallengeManager", "path": "views/challenge.py", "snippet": "class ChallengeManager:\n \"\"\"The manager for challenges.\n\n :param root_dir: The root directory that contains the files of challenges.\n \"\"\"\n\n def __init__(self, root_dir: Optional[Path] = None):\n if not root_dir:\n root_dir = ROOT_DIR / \"challenges\"\n self.challenges: dict[ChallengeKey, Challenge] = self._load_challenges(root_dir)\n self.challenges_groupby_level: dict[Level, list[ChallengeName]]\n self.challenges_groupby_level = self._get_challenges_groupby_level()\n\n def has_challenge(self, key: ChallengeKey) -> bool:\n return key in self.challenges\n\n def get_challenge(self, key: ChallengeKey) -> Challenge:\n return self.challenges[key]\n\n @property\n def challenge_count(self) -> int:\n \"\"\"The count of challenges.\"\"\"\n return len(self.challenges)\n\n def run_challenge(self, key: ChallengeKey, user_code: str) -> TypeCheckResult:\n challenge = self.get_challenge(key)\n # Make sure user code ends with a new line to avoid issue #63.\n return self._type_check_with_pyright(user_code + \"\\n\", challenge.test_code)\n\n def get_random_challenge(self) -> dict[str, str]:\n level = random.choice(list(self.challenges_groupby_level.keys()))\n name = random.choice(self.challenges_groupby_level[level])\n return {\"level\": level, \"name\": name}\n\n @staticmethod\n def _load_challenges(root_dir: Path) -> dict[ChallengeKey, Challenge]:\n challenges = {}\n for challenge_folder in root_dir.iterdir():\n question_source = challenge_folder / \"question.py\"\n if not question_source.exists():\n continue\n\n # Try to read the optional hints file\n hints_file = challenge_folder / \"hints.md\"\n if hints_file.exists():\n hints = hints_file.read_text().strip()\n else:\n hints = None\n\n key = ChallengeKey.from_str(challenge_folder.name)\n challenges[key] = Challenge(\n name=key.name,\n level=key.level,\n code=question_source.read_text(),\n hints=hints,\n )\n return challenges\n\n def _get_challenges_groupby_level(self) -> dict[Level, list[ChallengeName]]:\n groups: defaultdict[str, list[ChallengeName]] = defaultdict(list)\n\n for challenge in self.challenges.values():\n groups[challenge.level].append(challenge.name)\n\n # Sort challenge by name alphabetically.\n for challenge_names in groups.values():\n challenge_names.sort()\n\n # Make sure groups are ordered by level (from easy to hard)\n return {level: groups[level] for level in Level}\n\n EXPECT_ERROR_COMMENT = \"expect-type-error\"\n\n # Pyright error messages look like:\n # `<filename>:<line_no>:<col_no> - <error|warning|information>: <message>`\n # Here we only capture the error messages and line numbers\n PYRIGHT_MESSAGE_REGEX = r\"^(?:.+?):(\\d+):[\\s\\-\\d]+(error:.+)$\"\n\n @classmethod\n def _type_check_with_pyright(\n cls, user_code: str, test_code: str\n ) -> TypeCheckResult:\n code = f\"{user_code}{test_code}\"\n buffer = io.StringIO(code)\n\n # This produces a stream of TokenInfos, example:\n # TokenInfo(type=4 (NEWLINE), string='\\n', start=(4, 3), end=(4, 4), line='\"\"\"\\n'),\n # TokenInfo(type=62 (NL), string='\\n', start=(5, 0), end=(5, 1), line='\\n')\n # See https://docs.python.org/3/library/tokenize.html#tokenize.tokenize for more details\n tokens = list(tokenize.generate_tokens(buffer.readline))\n\n # Find all lines that are followed by a comment # expect-type-error\n expect_error_line_numbers = [\n token.start[0]\n for token in tokens\n if token.type == tokenize.COMMENT\n and token.string[1:].strip() == cls.EXPECT_ERROR_COMMENT\n ]\n # Tracks whether an expected error has been reported by type checker.\n error_line_seen_in_err_msg: dict[int, bool] = {\n lineno: False for lineno in expect_error_line_numbers\n }\n\n with tempfile.NamedTemporaryFile(suffix=\".py\") as temp:\n temp.write(code.encode())\n temp.flush()\n # TODO: switch to json output to simplify output parsing.\n # https://microsoft.github.io/pyright/#/command-line?id=json-output\n raw_result = subprocess.run(\n [\"pyright\", \"--pythonversion\", \"3.12\", temp.name],\n capture_output=True,\n text=True,\n )\n stdout, stderr = raw_result.stdout, raw_result.stderr\n if stderr:\n return TypeCheckResult(message=stderr, passed=False)\n error_lines: list[str] = []\n\n # Substract lineno in merged code by lineno_delta, so that the lineno in\n # error message matches those in the test code editor. Fixed #20.\n lineno_delta = len(user_code.splitlines())\n for line in stdout.splitlines():\n m = re.match(cls.PYRIGHT_MESSAGE_REGEX, line)\n if m is None:\n continue\n line_number, message = int(m.group(1)), m.group(2)\n if line_number in error_line_seen_in_err_msg:\n # Each reported error should be attached to a specific line,\n # If it is commented with # expect-type-error, let it pass.\n error_line_seen_in_err_msg[line_number] = True\n continue\n # Error could be thrown from user code too, in which case delta shouldn't be applied.\n error_lines.append(\n f\"{line_number if line_number <= lineno_delta else line_number - lineno_delta}:{message}\"\n )\n\n # If there are any lines that are expected to fail but not reported by pyright,\n # they should be considered as errors.\n for line_number, seen in error_line_seen_in_err_msg.items():\n if not seen:\n error_lines.append(\n f\"{line_number - lineno_delta}: error: Expected type error but instead passed\"\n )\n\n passed = len(error_lines) == 0\n if passed:\n error_lines.append(\"\\nAll tests passed\")\n else:\n error_lines.append(f\"\\nFound {len(error_lines)} errors\")\n\n return TypeCheckResult(message=\"\\n\".join(error_lines), passed=passed)" } ]
from pathlib import Path from views.challenge import ChallengeKey, ChallengeManager import pytest
1,788
class TestLoadChallenges: def test_load_empty_dir(self, tmpdir): assert ChallengeManager(Path(tmpdir)).challenge_count == 0 def test_defaults(self): assert ChallengeManager().challenge_count > 0 def test_load_tests_assets(self, assets_dir): mgr = ChallengeManager(assets_dir / "challenges") assert mgr.challenge_count > 0 class TestChallengeWithHints: @pytest.fixture() def challenge_mgr(self, assets_dir): return ChallengeManager(assets_dir / "challenges") def test_misc(self, challenge_mgr):
class TestLoadChallenges: def test_load_empty_dir(self, tmpdir): assert ChallengeManager(Path(tmpdir)).challenge_count == 0 def test_defaults(self): assert ChallengeManager().challenge_count > 0 def test_load_tests_assets(self, assets_dir): mgr = ChallengeManager(assets_dir / "challenges") assert mgr.challenge_count > 0 class TestChallengeWithHints: @pytest.fixture() def challenge_mgr(self, assets_dir): return ChallengeManager(assets_dir / "challenges") def test_misc(self, challenge_mgr):
c_foo = challenge_mgr.get_challenge(ChallengeKey.from_str("basic-foo"))
0
2023-10-23 05:11:41+00:00
4k
uni-medical/SAM-Med3D
train.py
[ { "identifier": "sam_model_registry3D", "path": "segment_anything/build_sam3D.py", "snippet": "def build_sam3D_vit_h(checkpoint=None):\ndef build_sam3D_vit_l(checkpoint=None):\ndef build_sam3D_vit_b(checkpoint=None):\ndef build_sam3D_vit_b_ori(checkpoint=None):\ndef _build_sam3D(\n encoder_embed_dim,\n encoder_depth,\n encoder_num_heads,\n encoder_global_attn_indexes,\n checkpoint=None,\n):\ndef _build_sam3D_ori(\n encoder_embed_dim,\n encoder_depth,\n encoder_num_heads,\n encoder_global_attn_indexes,\n checkpoint=None,\n):" }, { "identifier": "get_next_click3D_torch_2", "path": "utils/click_method.py", "snippet": "def get_next_click3D_torch_2(prev_seg, gt_semantic_seg):\n\n mask_threshold = 0.5\n\n batch_points = []\n batch_labels = []\n # dice_list = []\n\n pred_masks = (prev_seg > mask_threshold)\n true_masks = (gt_semantic_seg > 0)\n fn_masks = torch.logical_and(true_masks, torch.logical_not(pred_masks))\n fp_masks = torch.logical_and(torch.logical_not(true_masks), pred_masks)\n\n to_point_mask = torch.logical_or(fn_masks, fp_masks)\n\n for i in range(gt_semantic_seg.shape[0]):\n\n points = torch.argwhere(to_point_mask[i])\n point = points[np.random.randint(len(points))]\n # import pdb; pdb.set_trace()\n if fn_masks[i, 0, point[1], point[2], point[3]]:\n is_positive = True\n else:\n is_positive = False\n\n bp = point[1:].clone().detach().reshape(1,1,3) \n bl = torch.tensor([int(is_positive),]).reshape(1,1)\n batch_points.append(bp)\n batch_labels.append(bl)\n\n return batch_points, batch_labels # , (sum(dice_list)/len(dice_list)).item() " }, { "identifier": "Dataset_Union_ALL", "path": "utils/data_loader.py", "snippet": "class Dataset_Union_ALL(Dataset): \n def __init__(self, paths, mode='train', data_type='Tr', image_size=128, \n transform=None, threshold=500,\n split_num=1, split_idx=0, pcc=False):\n self.paths = paths\n self.data_type = data_type\n self.split_num=split_num\n self.split_idx=split_idx\n\n self._set_file_paths(self.paths)\n self.image_size = image_size\n self.transform = transform\n self.threshold = threshold\n self.mode = mode\n self.pcc = pcc\n \n def __len__(self):\n return len(self.label_paths)\n\n def __getitem__(self, index):\n\n sitk_image = sitk.ReadImage(self.image_paths[index])\n sitk_label = sitk.ReadImage(self.label_paths[index])\n\n if sitk_image.GetOrigin() != sitk_label.GetOrigin():\n sitk_image.SetOrigin(sitk_label.GetOrigin())\n if sitk_image.GetDirection() != sitk_label.GetDirection():\n sitk_image.SetDirection(sitk_label.GetDirection())\n\n subject = tio.Subject(\n image = tio.ScalarImage.from_sitk(sitk_image),\n label = tio.LabelMap.from_sitk(sitk_label),\n )\n\n if '/ct_' in self.image_paths[index]:\n subject = tio.Clamp(-1000,1000)(subject)\n\n if self.transform:\n try:\n subject = self.transform(subject)\n except:\n print(self.image_paths[index])\n\n if(self.pcc):\n print(\"using pcc setting\")\n # crop from random click point\n random_index = torch.argwhere(subject.label.data == 1)\n if(len(random_index)>=1):\n random_index = random_index[np.random.randint(0, len(random_index))]\n # print(random_index)\n crop_mask = torch.zeros_like(subject.label.data)\n # print(crop_mask.shape)\n crop_mask[random_index[0]][random_index[1]][random_index[2]][random_index[3]] = 1\n subject.add_image(tio.LabelMap(tensor=crop_mask,\n affine=subject.label.affine),\n image_name=\"crop_mask\")\n subject = tio.CropOrPad(mask_name='crop_mask', \n target_shape=(self.image_size,self.image_size,self.image_size))(subject)\n\n if subject.label.data.sum() <= self.threshold:\n return self.__getitem__(np.random.randint(self.__len__()))\n \n if self.mode == \"train\" and self.data_type == 'Tr':\n return subject.image.data.clone().detach(), subject.label.data.clone().detach()\n else:\n return subject.image.data.clone().detach(), subject.label.data.clone().detach(), self.image_paths[index] \n \n def _set_file_paths(self, paths):\n self.image_paths = []\n self.label_paths = []\n\n # if ${path}/labelsTr exists, search all .nii.gz\n for path in paths:\n d = os.path.join(path, f'labels{self.data_type}')\n if os.path.exists(d):\n for name in os.listdir(d):\n base = os.path.basename(name).split('.nii.gz')[0]\n label_path = os.path.join(path, f'labels{self.data_type}', f'{base}.nii.gz')\n self.image_paths.append(label_path.replace('labels', 'images'))\n self.label_paths.append(label_path)" }, { "identifier": "Union_Dataloader", "path": "utils/data_loader.py", "snippet": "class Union_Dataloader(DataLoader):\n def __iter__(self):\n return BackgroundGenerator(super().__iter__())" }, { "identifier": "img_datas", "path": "utils/data_paths.py", "snippet": "" } ]
import numpy as np import random import datetime import logging import matplotlib.pyplot as plt import os import torch import torch.distributed as dist import torch.nn.functional as F import torchio as tio import argparse import torch.multiprocessing as mp from tqdm import tqdm from torch.backends import cudnn from torch.utils.data.distributed import DistributedSampler from segment_anything.build_sam3D import sam_model_registry3D from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from monai.losses import DiceCELoss from contextlib import nullcontext from utils.click_method import get_next_click3D_torch_2 from utils.data_loader import Dataset_Union_ALL, Union_Dataloader from utils.data_paths import img_datas
1,960
# set up environment join = os.path.join # %% set up parser parser = argparse.ArgumentParser() parser.add_argument('--task_name', type=str, default='union_train') parser.add_argument('--click_type', type=str, default='random') parser.add_argument('--multi_click', action='store_true', default=False) parser.add_argument('--model_type', type=str, default='vit_b_ori') parser.add_argument('--checkpoint', type=str, default='./work_dir/SAM/sam_vit_b.pth') parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--work_dir', type=str, default='./work_dir') # train parser.add_argument('--num_workers', type=int, default=24) parser.add_argument('--gpu_ids', type=int, nargs='+', default=[0,1]) parser.add_argument('--multi_gpu', action='store_true', default=False) parser.add_argument('--resume', action='store_true', default=False) # lr_scheduler parser.add_argument('--lr_scheduler', type=str, default='multisteplr') parser.add_argument('--step_size', type=list, default=[120, 180]) parser.add_argument('--gamma', type=float, default=0.1) parser.add_argument('--num_epochs', type=int, default=200) parser.add_argument('--img_size', type=int, default=128) parser.add_argument('--batch_size', type=int, default=10) parser.add_argument('--accumulation_steps', type=int, default=20) parser.add_argument('--lr', type=float, default=8e-4) parser.add_argument('--weight_decay', type=float, default=0.1) parser.add_argument('--port', type=int, default=12361) args = parser.parse_args() device = args.device os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(i) for i in args.gpu_ids]) logger = logging.getLogger(__name__) LOG_OUT_DIR = join(args.work_dir, args.task_name) click_methods = { 'random': get_next_click3D_torch_2, } MODEL_SAVE_PATH = join(args.work_dir, args.task_name) os.makedirs(MODEL_SAVE_PATH, exist_ok=True) def build_model(args):
# set up environment join = os.path.join # %% set up parser parser = argparse.ArgumentParser() parser.add_argument('--task_name', type=str, default='union_train') parser.add_argument('--click_type', type=str, default='random') parser.add_argument('--multi_click', action='store_true', default=False) parser.add_argument('--model_type', type=str, default='vit_b_ori') parser.add_argument('--checkpoint', type=str, default='./work_dir/SAM/sam_vit_b.pth') parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--work_dir', type=str, default='./work_dir') # train parser.add_argument('--num_workers', type=int, default=24) parser.add_argument('--gpu_ids', type=int, nargs='+', default=[0,1]) parser.add_argument('--multi_gpu', action='store_true', default=False) parser.add_argument('--resume', action='store_true', default=False) # lr_scheduler parser.add_argument('--lr_scheduler', type=str, default='multisteplr') parser.add_argument('--step_size', type=list, default=[120, 180]) parser.add_argument('--gamma', type=float, default=0.1) parser.add_argument('--num_epochs', type=int, default=200) parser.add_argument('--img_size', type=int, default=128) parser.add_argument('--batch_size', type=int, default=10) parser.add_argument('--accumulation_steps', type=int, default=20) parser.add_argument('--lr', type=float, default=8e-4) parser.add_argument('--weight_decay', type=float, default=0.1) parser.add_argument('--port', type=int, default=12361) args = parser.parse_args() device = args.device os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(i) for i in args.gpu_ids]) logger = logging.getLogger(__name__) LOG_OUT_DIR = join(args.work_dir, args.task_name) click_methods = { 'random': get_next_click3D_torch_2, } MODEL_SAVE_PATH = join(args.work_dir, args.task_name) os.makedirs(MODEL_SAVE_PATH, exist_ok=True) def build_model(args):
sam_model = sam_model_registry3D[args.model_type](checkpoint=None).to(device)
0
2023-10-23 15:41:07+00:00
4k
VikParuchuri/libgen_to_txt
download_and_clean.py
[ { "identifier": "get_file_path", "path": "libgen_to_txt/files.py", "snippet": "def get_file_path(num, client, parent_id):\n files = client.File.list(parent_id=parent_id)\n try:\n sel_file = [f for f in files if get_leading_digits(f.name) == num][0]\n except IndexError:\n return\n return sel_file" }, { "identifier": "download_folder", "path": "libgen_to_txt/files.py", "snippet": "def download_folder(url, num, client, parent_folder_id, no_download=False):\n transfer = None\n if not no_download:\n transfer = client.Transfer.add_url(url, parent_id=parent_folder_id)\n\n iterations = 0\n sel_file = None\n sleep_interval = 60\n max_iterations = settings.MAX_TIME_TO_WAIT // sleep_interval\n\n # If we're not downloading, don't need to wait\n if no_download:\n sel_file = get_file_path(num, client, parent_folder_id)\n else:\n # Wait for the file to be downloaded\n while not sel_file and iterations < max_iterations:\n time.sleep(sleep_interval)\n sel_file = get_file_path(num, client, parent_folder_id)\n iterations += 1\n\n if transfer:\n # Cancel the transfer\n transfer.cancel()\n\n return sel_file" }, { "identifier": "download_folder_locally", "path": "libgen_to_txt/files.py", "snippet": "def download_folder_locally(fpath):\n putio_path = fpath\n if settings.PUTIO_FOLDER:\n putio_path = f\"{settings.PUTIO_FOLDER}/{fpath}\"\n\n local_path = f\"{settings.BASE_STORAGE_FOLDER}/{fpath}\"\n # Need to configure rclone first\n command = [\"rclone\", \"copy\", f\"{settings.RCLONE_ADAPTER_NAME}:{putio_path}\", local_path]\n subprocess.run(command)\n return local_path" }, { "identifier": "delete_file_locally", "path": "libgen_to_txt/files.py", "snippet": "def delete_file_locally(fpath):\n local_path = f\"{settings.BASE_STORAGE_FOLDER}/{fpath}\"\n shutil.rmtree(local_path)" }, { "identifier": "get_parent_id", "path": "libgen_to_txt/files.py", "snippet": "def get_parent_id(client):\n folder_name = settings.PUTIO_FOLDER\n if not folder_name:\n return 0\n folders = [n for n in client.File.list() if n.name == \"libgen\"]\n if len(folders) == 0:\n return 0\n\n folder = folders[0]\n return folder.id" }, { "identifier": "process_folder_marker", "path": "libgen_to_txt/marker/convert.py", "snippet": "def process_folder_marker(stored_path, out_path, num, max_workers):\n metadata = filter_invalid(stored_path)\n metadata_file = os.path.join(settings.BASE_METADATA_FOLDER, f\"{num}_meta.json\")\n\n with open(metadata_file, \"w+\") as f:\n json.dump(metadata, f)\n\n if settings.GPU_COUNT == 0:\n marker_cpu(stored_path, out_path, metadata_file, max_workers)\n else:\n marker_gpu(stored_path, out_path, metadata_file, max_workers)" }, { "identifier": "batch_write_metadata", "path": "libgen_to_txt/metadata.py", "snippet": "def batch_write_metadata(files, out_folder_path, max_workers):\n with ProcessPoolExecutor(max_workers=max_workers) as executor:\n executor.map(try_write_metadata, files, repeat(out_folder_path), chunksize=10)" }, { "identifier": "process_batch_files_naive", "path": "libgen_to_txt/naive/convert.py", "snippet": "def process_batch_files_naive(files, stored_path, out_path, max_workers):\n with ProcessPoolExecutor(max_workers=max_workers) as executor:\n executor.map(try_process_single_file, files, repeat(stored_path), repeat(out_path), chunksize=10)" }, { "identifier": "settings", "path": "libgen_to_txt/settings.py", "snippet": "class Settings(BaseSettings):\n class Config:\n BASE_STORAGE_FOLDER: str = \"libgen\" # temp storage for downloaded chunks\n BASE_PROCESSED_FOLDER: str = \"processed\" # After a chunk is processed, an empty file is created here\n BASE_TXT_FOLDER: str = \"txt\" # Where the final text is stored\n BASE_METADATA_FOLDER: str = \"metadata\" # Where to store metadata for processing\n LIBGEN_DB_NAME: str = \"libgen\"\n LIBGEN_DB_USER: str = \"libgen\"\n LIBGEN_DB_PASS: str = \"password\"\n CONVERSION_WORKERS: int = 18 # Number of workers to use to convert pdfs for each libgen chunk\n DOWNLOAD_WORKERS: int = 8 # Number of download workers (bandwidth-bound)\n MAX_TIME_TO_WAIT: int = 60 * 60 * 6 # 6 hours to wait for a download to finish\n RCLONE_ADAPTER_NAME: str = \"putio\"\n TEXT_FLAGS: int = pymupdf.TEXTFLAGS_TEXT & ~pymupdf.TEXT_PRESERVE_LIGATURES\n CONVERSION_METHOD: str = \"naive\" # Either naive or marker. Naive is faster, but marker is more accurate.\n GPU_COUNT: int = 0 # Number of GPUs to use for marker. 0 means to use CPU only\n MARKER_FOLDER: str = \"../marker\"\n MARKER_GPU_TIMEOUT: int = 60 * 60 * 8 # Time to wait for marker gpu to finish\n MARKER_CPU_TIMEOUT: int = 60 * 60 * 24 # Time to wait for marker to finish\n MARKER_SUPPORTED_LANGUAGES: List = [\"English\", \"Spanish\", \"Portuguese\", \"French\", \"German\", \"Russian\"]\n MARKER_SUPPORTED_EXTENSIONS: List = [\"pdf\", \"epub\", \"mobi\", \"xps\", \"fb2\"]\n MARKER_MIN_LENGTH: int = 10000 # Min amount of text to extract from file naively before using marker\n MARKER_DEBUG_DATA_FOLDER: Optional[str] = None # Folder to store debug data in\n POETRY_DIR: str = \"~/.local/bin\" # Poetry directory, used to activate marker venv\n PUTIO_TOKEN: str = \"\"\n PUTIO_FOLDER: str = \"libgen\"" } ]
import argparse import multiprocessing import putiopy import os from concurrent.futures import ProcessPoolExecutor from itertools import repeat from tqdm import tqdm from libgen_to_txt.files import get_file_path, download_folder, download_folder_locally, delete_file_locally, \ get_parent_id from libgen_to_txt.marker.convert import process_folder_marker from libgen_to_txt.metadata import batch_write_metadata from libgen_to_txt.naive.convert import process_batch_files_naive from libgen_to_txt.settings import settings
1,814
def process_single_libgen_chunk(torrent_info, conversion_lock, no_download, no_delete, max_workers=settings.CONVERSION_WORKERS): num, url = torrent_info client = putiopy.Client(settings.PUTIO_TOKEN, timeout=15, use_retry=True) parent_folder_id = get_parent_id(client) sel_file = get_file_path(num, client, parent_folder_id) if not sel_file: sel_file = download_folder(url, num, client, parent_folder_id, no_download) if not sel_file: return stored_path = download_folder_locally(sel_file.name) files = os.listdir(stored_path) out_path = os.path.join(settings.BASE_TXT_FOLDER, num) os.makedirs(out_path, exist_ok=True) # Only one chunk can be converted at once with conversion_lock: match settings.CONVERSION_METHOD: case "naive": # PDF -> markdown process_batch_files_naive(files, stored_path, out_path, max_workers) # Write metadata batch_write_metadata(files, out_path, max_workers) case "marker": # PDF -> markdown process_folder_marker(stored_path, out_path, num, max_workers) case _: print(f"Unknown conversion method {settings.CONVERSION_METHOD}") return # Mark that we have processed this segment of libgen with open(os.path.join(settings.BASE_PROCESSED_FOLDER, num), "w+") as f: f.write(sel_file.name) # Delete files from remote and local if not no_download: sel_file.delete() if not no_delete:
def process_single_libgen_chunk(torrent_info, conversion_lock, no_download, no_delete, max_workers=settings.CONVERSION_WORKERS): num, url = torrent_info client = putiopy.Client(settings.PUTIO_TOKEN, timeout=15, use_retry=True) parent_folder_id = get_parent_id(client) sel_file = get_file_path(num, client, parent_folder_id) if not sel_file: sel_file = download_folder(url, num, client, parent_folder_id, no_download) if not sel_file: return stored_path = download_folder_locally(sel_file.name) files = os.listdir(stored_path) out_path = os.path.join(settings.BASE_TXT_FOLDER, num) os.makedirs(out_path, exist_ok=True) # Only one chunk can be converted at once with conversion_lock: match settings.CONVERSION_METHOD: case "naive": # PDF -> markdown process_batch_files_naive(files, stored_path, out_path, max_workers) # Write metadata batch_write_metadata(files, out_path, max_workers) case "marker": # PDF -> markdown process_folder_marker(stored_path, out_path, num, max_workers) case _: print(f"Unknown conversion method {settings.CONVERSION_METHOD}") return # Mark that we have processed this segment of libgen with open(os.path.join(settings.BASE_PROCESSED_FOLDER, num), "w+") as f: f.write(sel_file.name) # Delete files from remote and local if not no_download: sel_file.delete() if not no_delete:
delete_file_locally(sel_file.name)
3
2023-10-16 17:56:36+00:00
4k
NVIDIA/GenerativeAIExamples
RetrievalAugmentedGeneration/examples/developer_rag/chains.py
[ { "identifier": "LimitRetrievedNodesLength", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "class LimitRetrievedNodesLength(BaseNodePostprocessor):\n \"\"\"Llama Index chain filter to limit token lengths.\"\"\"\n\n def _postprocess_nodes(\n self, nodes: List[\"NodeWithScore\"] = [], query_bundle: Optional[\"QueryBundle\"] = None\n ) -> List[\"NodeWithScore\"]:\n \"\"\"Filter function.\"\"\"\n included_nodes = []\n current_length = 0\n limit = DEFAULT_MAX_CONTEXT\n\n for node in nodes:\n current_length += len(\n globals_helper.tokenizer(\n node.node.get_content(metadata_mode=MetadataMode.LLM)\n )\n )\n if current_length > limit:\n break\n included_nodes.append(node)\n\n return included_nodes" }, { "identifier": "get_config", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "@lru_cache\ndef get_config() -> \"ConfigWizard\":\n \"\"\"Parse the application configuration.\"\"\"\n config_file = os.environ.get(\"APP_CONFIG_FILE\", \"/dev/null\")\n config = configuration.AppConfig.from_file(config_file)\n if config:\n return config\n raise RuntimeError(\"Unable to find configuration.\")" }, { "identifier": "get_doc_retriever", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "@lru_cache\ndef get_doc_retriever(num_nodes: int = 4) -> \"BaseRetriever\":\n \"\"\"Create the document retriever.\"\"\"\n index = get_vector_index()\n return index.as_retriever(similarity_top_k=num_nodes)" }, { "identifier": "get_llm", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "@lru_cache\ndef get_llm() -> LangChainLLM:\n \"\"\"Create the LLM connection.\"\"\"\n settings = get_config()\n\n logger.info(f\"Using {settings.llm.model_engine} as model engine for llm\")\n if settings.llm.model_engine == \"triton-trt-llm\":\n trtllm = TensorRTLLM( # type: ignore\n server_url=settings.llm.server_url,\n model_name=settings.llm.model_name,\n tokens=DEFAULT_NUM_TOKENS,\n )\n return LangChainLLM(llm=trtllm)\n elif settings.llm.model_engine == \"ai-playground\":\n if os.getenv('NVAPI_KEY') is None:\n raise RuntimeError(\"AI PLayground key is not set\")\n aipl_llm = GeneralLLM(\n model=settings.llm.model_name,\n max_tokens=DEFAULT_NUM_TOKENS,\n streaming=True\n )\n return LangChainLLM(llm=aipl_llm)\n else:\n raise RuntimeError(\"Unable to find any supported Large Language Model server. Supported engines are triton-trt-llm and ai-playground.\")" }, { "identifier": "get_text_splitter", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "def get_text_splitter() -> SentenceTransformersTokenTextSplitter:\n \"\"\"Return the token text splitter instance from langchain.\"\"\"\n return SentenceTransformersTokenTextSplitter(\n model_name=TEXT_SPLITTER_EMBEDDING_MODEL,\n chunk_size=get_config().text_splitter.chunk_size,\n chunk_overlap=get_config().text_splitter.chunk_overlap,\n )" }, { "identifier": "get_vector_index", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "@lru_cache\ndef get_vector_index() -> VectorStoreIndex:\n \"\"\"Create the vector db index.\"\"\"\n config = get_config()\n vector_store = MilvusVectorStore(uri=config.milvus.url, dim=config.embeddings.dimensions, overwrite=False)\n return VectorStoreIndex.from_vector_store(vector_store)" }, { "identifier": "is_base64_encoded", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "@lru_cache\ndef is_base64_encoded(s: str) -> bool:\n \"\"\"Check if a string is base64 encoded.\"\"\"\n try:\n # Attempt to decode the string as base64\n decoded_bytes = base64.b64decode(s)\n # Encode the decoded bytes back to a string to check if it's valid\n decoded_str = decoded_bytes.decode(\"utf-8\")\n # If the original string and the decoded string match, it's base64 encoded\n return s == base64.b64encode(decoded_str.encode(\"utf-8\")).decode(\"utf-8\")\n except Exception: # pylint:disable = broad-exception-caught\n # An exception occurred during decoding, so it's not base64 encoded\n return False" }, { "identifier": "set_service_context", "path": "RetrievalAugmentedGeneration/common/utils.py", "snippet": "@lru_cache\ndef set_service_context() -> None:\n \"\"\"Set the global service context.\"\"\"\n service_context = ServiceContext.from_defaults(\n llm=get_llm(), embed_model=get_embedding_model()\n )\n set_global_service_context(service_context)" } ]
import base64 import os import logging from pathlib import Path from typing import Generator from llama_index import Prompt, download_loader from llama_index.query_engine import RetrieverQueryEngine from llama_index.response.schema import StreamingResponse from llama_index.node_parser import LangchainNodeParser from RetrievalAugmentedGeneration.common.utils import ( LimitRetrievedNodesLength, get_config, get_doc_retriever, get_llm, get_text_splitter, get_vector_index, is_base64_encoded, set_service_context, )
1,951
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LLM Chains for executing Retrival Augmented Generation.""" logger = logging.getLogger(__name__) def llm_chain( context: str, question: str, num_tokens: int ) -> Generator[str, None, None]: """Execute a simple LLM chain using the components defined above.""" logger.info("Using llm to generate response directly without knowledge base.") set_service_context() prompt = get_config().prompts.chat_template.format( context_str=context, query_str=question ) logger.info(f"Prompt used for response generation: {prompt}") response = get_llm().stream_complete(prompt, tokens=num_tokens) gen_response = (resp.delta for resp in response) return gen_response def rag_chain(prompt: str, num_tokens: int) -> Generator[str, None, None]: """Execute a Retrieval Augmented Generation chain using the components defined above.""" logger.info("Using rag to generate response from document") set_service_context() if get_config().llm.model_engine == "triton-trt-llm": get_llm().llm.tokens = num_tokens # type: ignore else: get_llm().llm.max_tokens = num_tokens retriever = get_doc_retriever(num_nodes=4) qa_template = Prompt(get_config().prompts.rag_template) logger.info(f"Prompt used for response generation: {qa_template}") query_engine = RetrieverQueryEngine.from_args( retriever, text_qa_template=qa_template, node_postprocessors=[LimitRetrievedNodesLength()], streaming=True, ) response = query_engine.query(prompt) # Properly handle an empty response if isinstance(response, StreamingResponse): return response.response_gen logger.warning("No response generated from LLM, make sure you've ingested document.") return StreamingResponse(iter(["No response generated from LLM, make sure you have ingested document from the Knowledge Base Tab."])).response_gen # type: ignore def ingest_docs(data_dir: str, filename: str) -> None: """Ingest documents to the VectorDB.""" logger.info(f"Ingesting {filename} in vectorDB") _, ext = os.path.splitext(filename) if ext.lower() == ".pdf": PDFReader = download_loader("PDFReader") loader = PDFReader() documents = loader.load_data(file=Path(data_dir)) else: unstruct_reader = download_loader("UnstructuredReader") loader = unstruct_reader() documents = loader.load_data(file=Path(data_dir), split_documents=False) encoded_filename = filename[:-4] if not is_base64_encoded(encoded_filename): encoded_filename = base64.b64encode(encoded_filename.encode("utf-8")).decode( "utf-8" ) for document in documents: document.metadata = {"filename": encoded_filename}
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LLM Chains for executing Retrival Augmented Generation.""" logger = logging.getLogger(__name__) def llm_chain( context: str, question: str, num_tokens: int ) -> Generator[str, None, None]: """Execute a simple LLM chain using the components defined above.""" logger.info("Using llm to generate response directly without knowledge base.") set_service_context() prompt = get_config().prompts.chat_template.format( context_str=context, query_str=question ) logger.info(f"Prompt used for response generation: {prompt}") response = get_llm().stream_complete(prompt, tokens=num_tokens) gen_response = (resp.delta for resp in response) return gen_response def rag_chain(prompt: str, num_tokens: int) -> Generator[str, None, None]: """Execute a Retrieval Augmented Generation chain using the components defined above.""" logger.info("Using rag to generate response from document") set_service_context() if get_config().llm.model_engine == "triton-trt-llm": get_llm().llm.tokens = num_tokens # type: ignore else: get_llm().llm.max_tokens = num_tokens retriever = get_doc_retriever(num_nodes=4) qa_template = Prompt(get_config().prompts.rag_template) logger.info(f"Prompt used for response generation: {qa_template}") query_engine = RetrieverQueryEngine.from_args( retriever, text_qa_template=qa_template, node_postprocessors=[LimitRetrievedNodesLength()], streaming=True, ) response = query_engine.query(prompt) # Properly handle an empty response if isinstance(response, StreamingResponse): return response.response_gen logger.warning("No response generated from LLM, make sure you've ingested document.") return StreamingResponse(iter(["No response generated from LLM, make sure you have ingested document from the Knowledge Base Tab."])).response_gen # type: ignore def ingest_docs(data_dir: str, filename: str) -> None: """Ingest documents to the VectorDB.""" logger.info(f"Ingesting {filename} in vectorDB") _, ext = os.path.splitext(filename) if ext.lower() == ".pdf": PDFReader = download_loader("PDFReader") loader = PDFReader() documents = loader.load_data(file=Path(data_dir)) else: unstruct_reader = download_loader("UnstructuredReader") loader = unstruct_reader() documents = loader.load_data(file=Path(data_dir), split_documents=False) encoded_filename = filename[:-4] if not is_base64_encoded(encoded_filename): encoded_filename = base64.b64encode(encoded_filename.encode("utf-8")).decode( "utf-8" ) for document in documents: document.metadata = {"filename": encoded_filename}
index = get_vector_index()
5
2023-10-19 13:46:31+00:00
4k
MolecularAI/REINVENT4
reinvent/runmodes/TL/linkinvent.py
[ { "identifier": "Learning", "path": "reinvent/runmodes/TL/learning.py", "snippet": "class Learning(ABC):\n \"\"\"Trains a given model with new data from SMILES.\"\"\"\n\n def __init__(\n self,\n model: ModelAdapter,\n tb_logdir: str,\n configuration: Configuration,\n logger_parameters,\n ):\n \"\"\"Setup\n\n :param model: model adapter\n :param tb_logdir: name of the TensorBoard log directory\n :param configuration: configuration\n :param logger_parameters: parameters for remote logging\n \"\"\"\n\n self.model = model\n self._config = configuration\n self.device = model.device\n\n # Setup sampler\n\n try:\n _ = getattr(self.model, \"sample_smiles\")\n self.can_do_similarity = True\n except AttributeError:\n self.can_do_similarity = False\n\n # FIXME: SMILES standardization in preprocessing\n\n self._optimizer = configuration.optimizer\n self._lr_scheduler = configuration.learning_rate_scheduler\n\n self.ref_fps = None\n\n self.smilies = self._config.smilies\n self.validation_smilies = self._config.validation_smilies\n\n model_type = self.model.__class__.__name__.split(\"Adapter\")[0]\n chemistry = ChemistryHelpers(\n Conversions(), # Lib/LinkInvent, Mol2Mol\n BondMaker(), # LibInvent\n AttachmentPoints(), # Lib/LinkInvent\n )\n sampling_parameters = {\"num_smiles\": 10, \"batch_size\": 1}\n sampler, _ = setup_sampler(model_type, sampling_parameters, self.model, chemistry)\n sampler.unique_sequences = False\n self.sampler = sampler\n self.sampling_smilies = self.smilies[: sampling_parameters[\"num_smiles\"]]\n if len(self.sampling_smilies) < sampling_parameters[\"num_smiles\"]:\n self.sampling_smilies = self.sampling_smilies + [self.sampling_smilies[-1]] * (\n sampling_parameters[\"num_smiles\"] - len(self.sampling_smilies)\n )\n if not isinstance(self.sampling_smilies[0], str):\n self.sampling_smilies = [s[0] for s in self.sampling_smilies]\n\n # FIXME: think what to do for Lib and LinkInvent\n if self.can_do_similarity:\n nmols = min(len(self.smilies), self._config.num_refs)\n ref_smilies = random.sample(self.smilies, nmols)\n mols = filter(lambda m: m, [Chem.MolFromSmiles(smiles) for smiles in ref_smilies])\n self.ref_fps = [Chem.RDKFingerprint(mol) for mol in mols]\n\n self.sample_batch_size = max(self._config.sample_batch_size, 128)\n self.batch_size = configuration.batch_size\n self.save_freq = max(self._config.save_every_n_epochs, 1)\n\n self.reporter = get_reporter()\n self.tb_reporter = None\n if tb_logdir:\n self.tb_reporter = SummaryWriter(log_dir=tb_logdir)\n if self.can_do_similarity:\n mols = filter(\n lambda m: m,\n [Chem.MolFromSmiles(smiles) for smiles in self.smilies],\n )\n fps = [Chem.RDKFingerprint(mol) for mol in mols]\n\n sim = []\n\n for n in range(len(fps) - 1):\n s = DataStructs.BulkTanimotoSimilarity(fps[n], fps[n + 1 :])\n sim.extend(s)\n\n self.tb_reporter.add_histogram(\"Tanimoto input SMILES\", np.array(sim), 0)\n\n # FIXME: this is only available for Mol2mol\n if self._config.max_sequence_length:\n self.model.set_max_sequence_length(self._config.max_sequence_length)\n\n @abstractmethod\n def prepare_dataloader(self):\n \"\"\"Prepare a pytorch Dataloader\"\"\"\n ...\n\n def _common_dataloader(self):\n \"\"\"For Reinvent, LibInvent, LinkInvent\"\"\"\n\n # FIXME: had to set the generator explicitly to cuda to make this work\n # shuffle=False would work do but probably not a good idea because\n # it would affect training\n self.dataloader = DataLoader(\n self.dataset,\n batch_size=self.batch_size,\n generator=torch.Generator(device=self.device),\n shuffle=self._config.shuffle_each_epoch,\n collate_fn=self.collate_fn,\n drop_last=True,\n )\n\n self.validation_dataloader = None\n\n if self.validation_dataset:\n self.validation_dataloader = DataLoader(\n self.validation_dataset,\n batch_size=self.batch_size,\n generator=torch.Generator(device=self.device),\n shuffle=False,\n collate_fn=self.collate_fn,\n drop_last=False,\n )\n\n def optimize(self):\n start_epoch = self._config.starting_epoch - 1 # user input is 1 based\n end_epoch = start_epoch + self._config.num_epochs\n pbar = tqdm.tqdm(\n range(start_epoch, end_epoch),\n bar_format=\"{desc}|{bar}|{elapsed}\",\n ascii=True,\n )\n\n self.prepare_dataloader()\n\n with tqdm_logging_redirect(loggers=[logger]):\n validation_losses = {}\n for epoch in pbar:\n self.model.set_mode(\"training\")\n\n epoch_no = epoch + 1\n pbar.set_description(f\"Epoch {epoch_no}\")\n\n mean_nll = self.train_epoch()\n\n if epoch_no % self.save_freq == 0 or epoch_no == end_epoch:\n mean_nll_valid = None\n if self.validation_dataloader:\n self.model.set_mode(\"inference\")\n stats = self.compute_stats(self.validation_dataloader)\n mean_nll_valid = stats[\"nll\"]\n validation_losses[epoch_no] = mean_nll_valid\n\n saved_model_path = self._save_model(epoch_no)\n # sampled_smiles, sampled_nll = self.model.sample_smiles(\n # self.dataloader, num=10\n # )\n samples = self.sampler.sample(self.sampling_smilies)\n sampled_smilies = [\n s\n for s, state in zip(samples.smilies, samples.states)\n if state == SmilesState.DUPLICATE or state == SmilesState.VALID\n ]\n self.report(\n mean_nll,\n mean_nll_valid,\n epoch_no,\n saved_model_path,\n sampled_smilies,\n )\n\n if self._terminate():\n break\n\n self._save_model()\n\n if self.validation_dataloader:\n best_epoch_no = min(validation_losses, key=validation_losses.get)\n best_validation_loss = validation_losses[best_epoch_no]\n logger.info(\n f\"Best validation loss ({best_validation_loss:.3f}) was at epoch {best_epoch_no:d}\"\n )\n if best_epoch_no == max(validation_losses):\n logger.warning(\n f\"Best validation loss occured at the last epoch. Consider to train your model for more epochs\"\n )\n\n @abstractmethod\n def train_epoch(self):\n ...\n\n @abstractmethod\n def compute_nll(self, batch):\n ...\n\n def _train_epoch_common(self) -> float:\n \"\"\"Run one epoch of training\n\n :returns: mean negative log likelihood over all SMILES\n \"\"\"\n\n mean_epoch_nlls = np.zeros(self.batch_size)\n\n for step, batch in enumerate(self.dataloader):\n nll = self.compute_nll(batch)\n loss = nll.mean()\n mean_epoch_nlls[step] = loss.item()\n\n self._optimizer.zero_grad()\n loss.backward()\n\n if self._config.clip_gradient_norm > 0:\n clip_grad_norm_(self.model.network.parameters(), self._config.clip_gradient_norm)\n self._optimizer.step()\n\n self._lr_scheduler.step() # Mol2Mol does this once per batch\n\n return mean_epoch_nlls.mean()\n\n def _terminate(self):\n terminate = False\n\n # FIXME: why are two steps made?\n self._lr_scheduler.step()\n\n new_lr = self._lr_scheduler.optimizer.param_groups[0][\"lr\"]\n\n if new_lr < self._config.learning_rate_config.min:\n terminate = True\n\n return terminate\n\n def _save_model(self, epoch: int = None) -> str:\n \"\"\"Save the model to a file\n\n :param epoch: number when give to use for filename\n \"\"\"\n\n suffix = f\".{epoch}.chkpt\" if epoch else \"\"\n path = f\"{self._config.output_model_file}{suffix}\"\n\n self.model.save_to_file(path)\n return path\n\n def compute_stats(self, dataloader: DataLoader) -> dict:\n \"\"\"Compute several evaluation stats\n (only NLL is supported at the moment)\n\n :param dataloader: pytorch DataLoader object containing all the smilies\n to use for evaluation\n \"\"\"\n total_nll = 0.0\n n_examples = 0\n for step, batch in enumerate(dataloader):\n nll = self.compute_nll(batch).mean() * len(batch)\n total_nll = total_nll + nll.item()\n n_examples = n_examples + len(batch)\n return {\"nll\": total_nll / n_examples}\n\n def report(\n self,\n mean_nll: float,\n mean_nll_valid: float,\n epoch_no: int,\n model_path: str,\n sampled_smiles: list[str],\n ):\n \"\"\"Log the report to various sources\"\"\"\n\n if self.tb_reporter:\n tb_data = TBData(\n epoch=epoch_no,\n mean_nll=mean_nll,\n mean_nll_valid=mean_nll_valid,\n sample_batch_size=self.sample_batch_size,\n ref_fps=self.ref_fps,\n )\n\n write_report(\n self.tb_reporter,\n tb_data,\n self.model,\n self.can_do_similarity,\n self.dataloader,\n )\n\n remote_data = RemoteData(\n epoch=epoch_no,\n model_path=model_path,\n sampled_smiles=sampled_smiles,\n mean_nll=mean_nll,\n mean_nll_valid=mean_nll_valid,\n )\n\n send_report(remote_data, self.reporter)" }, { "identifier": "PairedDataset", "path": "reinvent/models/linkinvent/dataset/paired_dataset.py", "snippet": "class PairedDataset(tud.Dataset):\n \"\"\"Dataset that takes a list of (input, output) pairs.\"\"\"\n\n def __init__(\n self, input_target_smi_list: List[List[str]], vocabulary: PairedModelVocabulary\n ):\n self.vocabulary = vocabulary\n self._encoded_list = []\n\n for input_smi, target_smi in input_target_smi_list:\n en_input = self.vocabulary.input.encode(input_smi)\n en_output = self.vocabulary.target.encode(target_smi)\n\n if en_input is not None and en_output is not None:\n self._encoded_list.append((en_input, en_output))\n else:\n pass\n # TODO log theses cases\n\n def __getitem__(self, i):\n en_input, en_output = self._encoded_list[i]\n return (\n torch.tensor(en_input, dtype=torch.long),\n torch.tensor(en_output, dtype=torch.long),\n ) # pylint: disable=E1102\n\n def __len__(self):\n return len(self._encoded_list)\n\n @staticmethod\n def collate_fn(encoded_pairs):\n \"\"\"Turns a list of encoded pairs (input, target) of sequences and turns them into two batches.\n\n :param: A list of pairs of encoded sequences.\n :return: A tuple with two tensors, one for the input and one for the targets in the same order as given.\n \"\"\"\n\n encoded_inputs, encoded_targets = list(zip(*encoded_pairs))\n\n return _pad_batch(encoded_inputs), _pad_batch(encoded_targets)" } ]
import logging from .learning import Learning from reinvent.models.linkinvent.dataset.paired_dataset import PairedDataset
3,182
"""LinkInvent transfer learning Train a given model with new data. The data comes from a file with SMILES strings. The file is assumed to be in multi-column format separated by commas (CSV) or spaces. The SMILES strings are taken from the first two columns. The two SMILES in each row correspond to two pipe-symbol (|) separated SMILES fragments (the warheads, 'input') and a single SMILES (linker. 'target', 'output') e.g. *C#CCO|*CCC#CCCCCCCC(C)C [*]C#CC(O)CCCCCCC[*] The asterisks (*) are the attachment points to form a complete molecule. The order of the columns must follow the order in the model (file). Currently, this means that the warheads/input are in column 1 and the linker/target in column 2. See (the model is read from the torch pickle file) >>> import torch >>> model = torch.load('link_invent_prior.model') >>> model['vocabulary'].input.vocabulary.tokens() >>> model['vocabulary'].target.vocabulary.tokens() """ from __future__ import annotations __all__ = ["Linkinvent"] logger = logging.getLogger(__name__)
"""LinkInvent transfer learning Train a given model with new data. The data comes from a file with SMILES strings. The file is assumed to be in multi-column format separated by commas (CSV) or spaces. The SMILES strings are taken from the first two columns. The two SMILES in each row correspond to two pipe-symbol (|) separated SMILES fragments (the warheads, 'input') and a single SMILES (linker. 'target', 'output') e.g. *C#CCO|*CCC#CCCCCCCC(C)C [*]C#CC(O)CCCCCCC[*] The asterisks (*) are the attachment points to form a complete molecule. The order of the columns must follow the order in the model (file). Currently, this means that the warheads/input are in column 1 and the linker/target in column 2. See (the model is read from the torch pickle file) >>> import torch >>> model = torch.load('link_invent_prior.model') >>> model['vocabulary'].input.vocabulary.tokens() >>> model['vocabulary'].target.vocabulary.tokens() """ from __future__ import annotations __all__ = ["Linkinvent"] logger = logging.getLogger(__name__)
class Linkinvent(Learning):
0
2023-10-20 06:43:16+00:00
4k
lion-agi/lionagi
lionagi/loaders/chunker.py
[ { "identifier": "lcall", "path": "lionagi/utils/call_util.py", "snippet": "def lcall(\n input_: Any, func_: Callable, flatten: bool = False, \n dropna: bool = False, **kwargs\n ) -> List[Any]:\n \"\"\"\n Applies a function to each element of `input`, after converting it to a list.\n\n This function converts the `input` to a list, with options to flatten structures \n and lists, and then applies a given `func` to each element of the list.\n\n Parameters:\n input (Any): The input to be converted to a list and processed.\n\n func (Callable): The function to apply to each element of the list.\n\n flatten_dict (bool, optional): If True, flattens dictionaries in the input. Defaults to False.\n\n flat (bool, optional): If True, flattens nested lists in the input. Defaults to False.\n\n dropna (bool, optional): If True, drops None values during flattening. Defaults to True.\n\n Returns:\n List[Any]: A list containing the results of applying the `func` to each element.\n\n Raises:\n ValueError: If the `func` cannot be applied to the `input`.\n\n Example:\n >>> def square(x):\n ... return x * x\n >>> l_call([1, 2, 3], square)\n [1, 4, 9]\n \"\"\"\n try:\n lst = to_list(input_=input_, flatten=flatten, dropna=dropna)\n return [func_(i, **kwargs) for i in lst]\n except Exception as e:\n raise ValueError(f\"Given function cannot be applied to the input. Error: {e}\")" }, { "identifier": "DataNode", "path": "lionagi/schema/data_node.py", "snippet": "class DataNode(BaseNode):\n\n def to_llama_index(self, **kwargs):\n # to llama_index textnode\n from lionagi.bridge.llama_index import to_llama_index_textnode\n return to_llama_index_textnode(self, **kwargs)\n\n def to_langchain(self, **kwargs):\n # to langchain document\n from lionagi.bridge.langchain import to_langchain_document\n return to_langchain_document(self, **kwargs)\n\n @classmethod\n def from_llama_index(cls, llama_node: Any, **kwargs):\n llama_dict = llama_node.to_dict(**kwargs)\n return cls.from_dict(llama_dict)\n\n @classmethod\n def from_langchain(cls, lc_doc: Any):\n info_json = lc_doc.to_json()\n info_node = {'lc_id': info_json['id']}\n info_node = {**info_node, **info_json['kwargs']}\n return cls(**info_node)" }, { "identifier": "from_langchain", "path": "lionagi/bridge/langchain.py", "snippet": "def from_langchain(lc_doc: Any) -> T:\n \"\"\"\n Converts a langchain document into a DataNode object.\n\n Parameters:\n lc_doc (Any): The langchain document to be converted.\n\n Returns:\n DataNode: A DataNode object created from the langchain document.\n \"\"\"\n info_json = lc_doc.to_json()\n info_node = {'lc_id': info_json['id']}\n info_node = {**info_node, **info_json['kwargs']}\n return DataNode(**info_node)" }, { "identifier": "langchain_text_splitter", "path": "lionagi/bridge/langchain.py", "snippet": "def langchain_text_splitter(data: Union[str, List],\n splitter: Union[str, Callable], \n splitter_args: List[Any] = [], \n splitter_kwargs: Dict[str, Any] = {}) -> List[str]:\n\n \"\"\"\n Splits text or a list of documents using a specified langchain text splitter.\n\n Parameters:\n data (Union[str, List]): The input text or list of documents to be split.\n\n splitter (Union[str, Callable]): The name of the text splitter function or the function itself.\n\n splitter_args (List[Any]): Positional arguments to pass to the splitter function.\n\n splitter_kwargs (Dict[str, Any]): Keyword arguments to pass to the splitter function.\n\n Returns:\n List[str]: A list of chunks obtained by splitting the input.\n\n Raises:\n ValueError: If the specified text splitter is invalid or if the splitting fails.\n \"\"\"\n\n import langchain.text_splitter as text_splitter\n\n try:\n if isinstance(splitter, str):\n splitter = getattr(text_splitter, splitter)\n else:\n splitter = splitter\n except Exception as e:\n raise ValueError(f'Invalid text splitter: {splitter}. Error: {e}')\n\n try:\n splitter_obj = splitter(*splitter_args, **splitter_kwargs)\n if isinstance(data, str):\n chunk = splitter_obj.split_text(data)\n else:\n chunk = splitter_obj.split_documents(data)\n return chunk\n except Exception as e:\n raise ValueError(f'Failed to split. Error: {e}')" }, { "identifier": "from_llama_index", "path": "lionagi/bridge/llama_index.py", "snippet": "def from_llama_index(llama_node: Any, **kwargs: Any) -> T:\n \"\"\"\n Converts a Llama Index node into a DataNode object.\n\n Parameters:\n llama_node (Any): The Llama Index node to be converted.\n \n **kwargs: Additional keyword arguments for JSON serialization.\n\n Returns:\n DataNode: A DataNode object created from the Llama Index node.\n \"\"\"\n llama_dict = llama_node.to_dict(**kwargs)\n return DataNode.from_dict(llama_dict)" }, { "identifier": "llama_index_node_parser", "path": "lionagi/bridge/llama_index.py", "snippet": "def llama_index_node_parser(documents: List[Any], \n parser: Union[str, Callable], \n parser_args: List[Any] = [], \n parser_kwargs: Dict[str, Any] = {},\n parsing_kwargs: Dict[str, Any] = {}) -> List[Any]:\n \"\"\"\n Parses documents into nodes using a specified Llama Index node parser.\n\n Parameters:\n documents (List[Any]): The documents to parse.\n \n parser (Union[str, Callable]): The name of the parser function or the parser function itself.\n \n parser_args (List[Any]): Positional arguments to pass to the parser function.\n \n parser_kwargs (Dict[str, Any]): Keyword arguments to pass to the parser function.\n\n Returns:\n List[Any]: A list of nodes parsed from the documents.\n\n Raises:\n ValueError: If the specified parser is invalid or if the parser fails to parse the documents.\n \"\"\"\n parser = get_llama_parser(parser)\n\n try:\n parser_obj = parser(*parser_args, **parser_kwargs)\n nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)\n return nodes\n\n except Exception as e1:\n try:\n parser_obj = parser.from_defaults(*parser_args, **parser_kwargs)\n nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)\n return nodes\n except Exception as e2:\n raise ValueError(f'Failed to parse. Error: {e1}, {e2}')" }, { "identifier": "ChunkerType", "path": "lionagi/loaders/load_util.py", "snippet": "class ChunkerType(str, Enum):\n PLAIN = 'plain' # default\n LANGCHAIN = 'langchain' # using langchain functions\n LLAMAINDEX = 'llama_index' # using llamaindex functions\n SELFDEFINED = 'self_defined' # create custom functions" }, { "identifier": "file_to_chunks", "path": "lionagi/loaders/load_util.py", "snippet": "def file_to_chunks(input,\n # project='project',\n # output_dir='data/logs/sources/',\n chunk_func = _file_to_chunks, **kwargs):\n # out_to_csv=False,\n # filename=None,\n # verbose=True,\n # timestamp=True,\n # logger=None,\n logs = to_list(lcall(input, chunk_func, **kwargs), flatten=True)\n return logs" } ]
from typing import Union, Callable from lionagi.utils import lcall from lionagi.schema import DataNode from lionagi.bridge import langchain_text_splitter, from_langchain, llama_index_node_parser, from_llama_index from .load_util import ChunkerType, file_to_chunks
2,973
Returns: List[DataNode]: The list of converted DataNode instances. """ for i in range(len(documents)): if type(documents[i]) == DataNode: if chunker_type == ChunkerType.LLAMAINDEX: documents[i] = documents[i].to_llama_index() elif chunker_type == ChunkerType.LANGCHAIN: documents[i] = documents[i].to_langchain() return documents # Function to chunk text documents def text_chunker(documents, args, kwargs): """ Chunks text documents into smaller pieces. Parameters: documents (List[DataNode]): A list of DataNode instances to be chunked. args (List[Any]): Positional arguments to be passed to the chunking function. kwargs (dict): Keyword arguments to be passed to the chunking function. Returns: List[DataNode]: A list of chunked DataNode instances. """ def chunk_node(node): chunks = file_to_chunks(node.to_dict(), *args, **kwargs) lcall(chunks, lambda chunk: chunk.pop('node_id')) chunk_nodes = lcall(chunks, lambda x: DataNode(**x)) return chunk_nodes nodes = [] for doc in documents: nodes += chunk_node(doc) return nodes def _datanode_parser(nodes, parser): """ Parses raw data into DataNode instances using the provided parser function. Parameters: nodes (List[Any]): A list of raw data to be parsed. parser (Callable): A function that parses raw data into DataNode instances. Returns: List[DataNode]: A list of parsed DataNode instances. Raises: ValueError: If the parser function fails. """ try: nodes = parser(nodes) except Exception as e: raise ValueError(f'DataNode parser {parser} failed. Error:{e}') return nodes def chunk(documents, chunker, chunker_type=ChunkerType.PLAIN, chunker_args=[], chunker_kwargs={}, chunking_kwargs={}, documents_convert_func=None, to_datanode: Union[bool, Callable] = True): """ Chunks documents using the specified chunker and chunker type. Parameters: documents (List[Any]): A list of documents to be chunked. chunker (Callable): The chunking function to be used. chunker_type (ChunkerType): The type of the chunker. Defaults to ChunkerType.PLAIN. chunker_args (List[Any]): Positional arguments for the chunker function. Defaults to an empty list. chunker_kwargs (dict): Keyword arguments for the chunker function. Defaults to an empty dict. chunking_kwargs (dict): Additional keyword arguments for the chunking process. Defaults to an empty dict. documents_convert_func (Callable): A function to convert documents to a specific format. Defaults to None. to_datanode (Union[bool, Callable]): Determines whether to convert the result into DataNode instances, or a callable to convert the result. Defaults to True. Returns: List[DataNode]: A list of chunked DataNode instances after applying the chunker. Raises: ValueError: If the chunker fails or an unsupported chunker type is provided. """ if chunker_type == ChunkerType.PLAIN: try: if chunker == 'text_chunker': chunker = text_chunker nodes = chunker(documents, chunker_args, chunker_kwargs) return nodes except Exception as e: raise ValueError(f'Reader {chunker} is currently not supported. Error: {e}') if chunker_type == ChunkerType.LANGCHAIN: if documents_convert_func: documents = documents_convert_func(documents, 'langchain') nodes = langchain_text_splitter(documents, chunker, chunker_args, chunker_kwargs) if isinstance(to_datanode, bool) and to_datanode is True: if isinstance(documents, str): nodes = lcall(nodes, lambda x: DataNode(content=x)) else: nodes = lcall(nodes, from_langchain) elif isinstance(to_datanode, Callable): nodes = _datanode_parser(nodes, to_datanode) return nodes elif chunker_type == ChunkerType.LLAMAINDEX: if documents_convert_func: documents = documents_convert_func(documents, 'llama_index')
# use utils, schema and bridge # Function to convert documents to a specific format based on the chunker type def datanodes_convert(documents, chunker_type): """ Converts a lionagi DataNode documents to a specific format based on the chunker type. Parameters: documents (List[DataNode]): A list of DataNode instances to be converted. chunker_type (ChunkerType): The chunker type to determine the conversion format. Returns: List[DataNode]: The list of converted DataNode instances. """ for i in range(len(documents)): if type(documents[i]) == DataNode: if chunker_type == ChunkerType.LLAMAINDEX: documents[i] = documents[i].to_llama_index() elif chunker_type == ChunkerType.LANGCHAIN: documents[i] = documents[i].to_langchain() return documents # Function to chunk text documents def text_chunker(documents, args, kwargs): """ Chunks text documents into smaller pieces. Parameters: documents (List[DataNode]): A list of DataNode instances to be chunked. args (List[Any]): Positional arguments to be passed to the chunking function. kwargs (dict): Keyword arguments to be passed to the chunking function. Returns: List[DataNode]: A list of chunked DataNode instances. """ def chunk_node(node): chunks = file_to_chunks(node.to_dict(), *args, **kwargs) lcall(chunks, lambda chunk: chunk.pop('node_id')) chunk_nodes = lcall(chunks, lambda x: DataNode(**x)) return chunk_nodes nodes = [] for doc in documents: nodes += chunk_node(doc) return nodes def _datanode_parser(nodes, parser): """ Parses raw data into DataNode instances using the provided parser function. Parameters: nodes (List[Any]): A list of raw data to be parsed. parser (Callable): A function that parses raw data into DataNode instances. Returns: List[DataNode]: A list of parsed DataNode instances. Raises: ValueError: If the parser function fails. """ try: nodes = parser(nodes) except Exception as e: raise ValueError(f'DataNode parser {parser} failed. Error:{e}') return nodes def chunk(documents, chunker, chunker_type=ChunkerType.PLAIN, chunker_args=[], chunker_kwargs={}, chunking_kwargs={}, documents_convert_func=None, to_datanode: Union[bool, Callable] = True): """ Chunks documents using the specified chunker and chunker type. Parameters: documents (List[Any]): A list of documents to be chunked. chunker (Callable): The chunking function to be used. chunker_type (ChunkerType): The type of the chunker. Defaults to ChunkerType.PLAIN. chunker_args (List[Any]): Positional arguments for the chunker function. Defaults to an empty list. chunker_kwargs (dict): Keyword arguments for the chunker function. Defaults to an empty dict. chunking_kwargs (dict): Additional keyword arguments for the chunking process. Defaults to an empty dict. documents_convert_func (Callable): A function to convert documents to a specific format. Defaults to None. to_datanode (Union[bool, Callable]): Determines whether to convert the result into DataNode instances, or a callable to convert the result. Defaults to True. Returns: List[DataNode]: A list of chunked DataNode instances after applying the chunker. Raises: ValueError: If the chunker fails or an unsupported chunker type is provided. """ if chunker_type == ChunkerType.PLAIN: try: if chunker == 'text_chunker': chunker = text_chunker nodes = chunker(documents, chunker_args, chunker_kwargs) return nodes except Exception as e: raise ValueError(f'Reader {chunker} is currently not supported. Error: {e}') if chunker_type == ChunkerType.LANGCHAIN: if documents_convert_func: documents = documents_convert_func(documents, 'langchain') nodes = langchain_text_splitter(documents, chunker, chunker_args, chunker_kwargs) if isinstance(to_datanode, bool) and to_datanode is True: if isinstance(documents, str): nodes = lcall(nodes, lambda x: DataNode(content=x)) else: nodes = lcall(nodes, from_langchain) elif isinstance(to_datanode, Callable): nodes = _datanode_parser(nodes, to_datanode) return nodes elif chunker_type == ChunkerType.LLAMAINDEX: if documents_convert_func: documents = documents_convert_func(documents, 'llama_index')
nodes = llama_index_node_parser(documents, chunker, chunker_args, chunker_kwargs, chunking_kwargs)
5
2023-10-17 03:10:02+00:00
4k
ziqipang/LM4VisualEncoding
pointcloud_classification/models/Point_BERT.py
[ { "identifier": "Group", "path": "pointcloud_classification/models/dvae.py", "snippet": "class Group(nn.Module):\n def __init__(self, num_group, group_size):\n super().__init__()\n self.num_group = num_group\n self.group_size = group_size\n # self.knn = KNN(k=self.group_size, transpose_mode=True)\n\n def forward(self, xyz):\n '''\n input: B N 3\n ---------------------------\n output: B G M 3\n center : B G 3\n '''\n batch_size, num_points, _ = xyz.shape\n # fps the centers out\n center = misc.fps(xyz, self.num_group) # B G 3\n # knn to get the neighborhood\n # _, idx = self.knn(xyz, center) # B G M\n idx = knn_point(self.group_size, xyz, center) # B G M\n assert idx.size(1) == self.num_group\n assert idx.size(2) == self.group_size\n idx_base = torch.arange(0, batch_size, device=xyz.device).view(-1, 1, 1) * num_points\n idx = idx + idx_base\n idx = idx.view(-1)\n neighborhood = xyz.view(batch_size * num_points, -1)[idx, :]\n neighborhood = neighborhood.view(batch_size, self.num_group, self.group_size, 3).contiguous()\n # normalize\n neighborhood = neighborhood - center.unsqueeze(2)\n return neighborhood, center" }, { "identifier": "DiscreteVAE", "path": "pointcloud_classification/models/dvae.py", "snippet": "class DiscreteVAE(nn.Module):\n def __init__(self, config, **kwargs):\n super().__init__()\n self.group_size = config.group_size\n self.num_group = config.num_group\n self.encoder_dims = config.encoder_dims\n self.tokens_dims = config.tokens_dims\n\n self.decoder_dims = config.decoder_dims\n self.num_tokens = config.num_tokens\n\n \n self.group_divider = Group(num_group = self.num_group, group_size = self.group_size)\n self.encoder = Encoder(encoder_channel = self.encoder_dims)\n self.dgcnn_1 = DGCNN(encoder_channel = self.encoder_dims, output_channel = self.num_tokens)\n self.codebook = nn.Parameter(torch.randn(self.num_tokens, self.tokens_dims))\n\n self.dgcnn_2 = DGCNN(encoder_channel = self.tokens_dims, output_channel = self.decoder_dims)\n self.decoder = Decoder(encoder_channel = self.decoder_dims, num_fine = self.group_size)\n self.build_loss_func()\n\n \n \n def build_loss_func(self):\n self.loss_func_cdl1 = ChamferDistanceL1().cuda()\n self.loss_func_cdl2 = ChamferDistanceL2().cuda()\n self.loss_func_emd = emd().cuda()\n\n def recon_loss(self, ret, gt):\n whole_coarse, whole_fine, coarse, fine, group_gt, _ = ret\n\n bs, g, _, _ = coarse.shape\n\n coarse = coarse.reshape(bs*g, -1, 3).contiguous()\n fine = fine.reshape(bs*g, -1, 3).contiguous()\n group_gt = group_gt.reshape(bs*g, -1, 3).contiguous()\n\n loss_coarse_block = self.loss_func_cdl1(coarse, group_gt)\n loss_fine_block = self.loss_func_cdl1(fine, group_gt)\n\n loss_recon = loss_coarse_block + loss_fine_block\n\n return loss_recon\n\n def get_loss(self, ret, gt):\n\n # reconstruction loss\n loss_recon = self.recon_loss(ret, gt)\n # kl divergence\n logits = ret[-1] # B G N\n softmax = F.softmax(logits, dim=-1)\n mean_softmax = softmax.mean(dim=1)\n log_qy = torch.log(mean_softmax)\n log_uniform = torch.log(torch.tensor([1. / self.num_tokens], device = gt.device))\n loss_klv = F.kl_div(log_qy, log_uniform.expand(log_qy.size(0), log_qy.size(1)), None, None, 'batchmean', log_target = True)\n\n return loss_recon, loss_klv\n\n\n def forward(self, inp, temperature = 1., hard = False, **kwargs):\n neighborhood, center = self.group_divider(inp)\n logits = self.encoder(neighborhood) # B G C\n logits = self.dgcnn_1(logits, center) # B G N\n soft_one_hot = F.gumbel_softmax(logits, tau = temperature, dim = 2, hard = hard) # B G N\n sampled = torch.einsum('b g n, n c -> b g c', soft_one_hot, self.codebook) # B G C\n feature = self.dgcnn_2(sampled, center)\n coarse, fine = self.decoder(feature)\n\n\n with torch.no_grad():\n whole_fine = (fine + center.unsqueeze(2)).reshape(inp.size(0), -1, 3)\n whole_coarse = (coarse + center.unsqueeze(2)).reshape(inp.size(0), -1, 3)\n\n assert fine.size(2) == self.group_size\n ret = (whole_coarse, whole_fine, coarse, fine, neighborhood, logits)\n return ret" }, { "identifier": "Encoder", "path": "pointcloud_classification/models/dvae.py", "snippet": "class Encoder(nn.Module):\n def __init__(self, encoder_channel):\n super().__init__()\n self.encoder_channel = encoder_channel\n self.first_conv = nn.Sequential(\n nn.Conv1d(3, 128, 1),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 256, 1)\n )\n self.second_conv = nn.Sequential(\n nn.Conv1d(512, 512, 1),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Conv1d(512, self.encoder_channel, 1)\n )\n def forward(self, point_groups):\n '''\n point_groups : B G N 3\n -----------------\n feature_global : B G C\n '''\n bs, g, n , _ = point_groups.shape\n point_groups = point_groups.reshape(bs * g, n, 3)\n # encoder\n feature = self.first_conv(point_groups.transpose(2,1)) # BG 256 n\n feature_global = torch.max(feature,dim=2,keepdim=True)[0] # BG 256 1\n feature = torch.cat([feature_global.expand(-1,-1,n), feature], dim=1)# BG 512 n\n feature = self.second_conv(feature) # BG 1024 n\n feature_global = torch.max(feature, dim=2, keepdim=False)[0] # BG 1024\n return feature_global.reshape(bs, g, self.encoder_channel)" }, { "identifier": "LLaMATransformer", "path": "pointcloud_classification/models/llama.py", "snippet": "class LLaMATransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.n_layers = config['n_layers']\n self.first_layer = config['first_layer']\n self.layers = torch.nn.ModuleList()\n print(f'LLaMA Transformer with {self.n_layers} layers, first layer {self.first_layer}')\n for layer_id in range(self.first_layer, self.n_layers):\n self.layers.append(TransformerBlock(layer_id, config))\n\n self.norm = RMSNorm(config['dim'], eps=config['norm_eps'])\n self.prepare_inputs_for_generation = None\n \n # @torch.inference_mode()\n def forward(self, h):\n for layer in self.layers:\n h = layer(h)\n h = self.norm(h)\n return h.float()\n\n def custom_load_state_dict(self, checkpoint, tail=False, strict=False):\n # self.load_state_dict(checkpoint, strict=strict)\n # load the final layers\n if tail:\n for i in range(self.first_layer, self.n_layers):\n layer_checkpoint_keys = [k for k in checkpoint.keys() if f'layers.{i}.' in k]\n layer_checkpoint_keys = [k.replace(f'layers.{i}.', '') for k in layer_checkpoint_keys]\n layer_checkpoint = {k: checkpoint[f'layers.{i}.{k}'] for k in layer_checkpoint_keys}\n layer_checkpoint.pop('attention.inner_attention.rope.freqs')\n self.layers[i - self.first_layer].load_state_dict(layer_checkpoint, strict=True)\n return\n\n @torch.inference_mode()\n def forward_llama(self, tokens: torch.Tensor, start_pos: int):\n _bsz, seqlen = tokens.shape\n h = self.tok_embeddings(tokens)\n self.freqs_cis = self.freqs_cis.to(h.device)\n freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]\n\n mask = None\n if seqlen > 1:\n mask = torch.full((1, 1, seqlen, seqlen), float(\"-inf\"), device=tokens.device)\n mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h)\n\n if self.adapter:\n adapter_index = 0\n adapter = self.adapter_query.weight.reshape(-1, self.adapter_len, 4096).unsqueeze(1)\n for layer in self.layers:\n if not self.use_adapter:\n h = layer(h, start_pos, freqs_cis, mask)\n else:\n h = layer(h, start_pos, freqs_cis, mask, adapter[adapter_index])\n adapter_index += 1\n h = self.norm(h)\n output = self.output(h[:, -1, :]) # only compute last logits\n return output.float()" }, { "identifier": "MODELS", "path": "pointcloud_classification/models/build.py", "snippet": "MODELS = registry.Registry('models')" } ]
import torch import torch.nn as nn import torch.nn.functional as F import timm import numpy as np import random from pathlib import Path from timm.models.layers import DropPath, trunc_normal_ from .dvae import Group from .dvae import DiscreteVAE, Encoder from .llama import LLaMATransformer from .build import MODELS from utils import misc from utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message from utils.logger import *
3,370
class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class TransformerEncoder(nn.Module): """ Transformer Encoder without hierarchical structure """ def __init__(self, embed_dim=768, depth=4, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.): super().__init__() self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path = drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate ) for i in range(depth)]) def forward(self, x, pos): for _, block in enumerate(self.blocks): x = block(x + pos) return x
class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class TransformerEncoder(nn.Module): """ Transformer Encoder without hierarchical structure """ def __init__(self, embed_dim=768, depth=4, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.): super().__init__() self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path = drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate ) for i in range(depth)]) def forward(self, x, pos): for _, block in enumerate(self.blocks): x = block(x + pos) return x
@MODELS.register_module()
4
2023-10-19 15:40:57+00:00
4k
stanford-oval/WikiChat
benchmark/scripts/automatic_eval.py
[ { "identifier": "DialogueTurn", "path": "pipelines/dialog_turn.py", "snippet": "class DialogueTurn:\n def __init__(\n self,\n agent_utterance: str = None,\n user_utterance: str = None,\n pipeline: str = None,\n engine: str = None,\n generate_engine: str = None,\n draft_engine: str = None,\n ):\n self.engine = engine\n self.generate_engine = generate_engine\n self.draft_engine = draft_engine\n self.pipeline = pipeline\n self.wall_time_seconds = (\n 0 # how much time it took to generate this turn, in seconds\n )\n self.agent_utterance = agent_utterance\n self.user_utterance = user_utterance\n\n # retrieve_and_generate pipeline\n self.initial_search_query = None\n self.initial_search_query_time = None\n self.initial_search_results = []\n self.initial_search_result_titles = []\n self.initial_search_bullets = []\n\n # generate_and_correct pipeline\n self.llm_utterance = None\n self.claims = []\n self.verification_retrieval_results = {}\n self.verification_result = {}\n\n # early_combine pipeline\n self.combined_evidences = []\n self.combined_utterance = None\n self.feedback = []\n self.feedback_scores = []\n self.refined_utterance = None\n\n def _summarize_vc_log(self):\n verification_summary = {}\n assert len(self.verification_result) == len(\n self.verification_retrieval_results\n ), \"We need to have retrieved evidence for all claims\"\n for key, value in self.verification_retrieval_results.items():\n claim_idx = int(key)\n v_ret_results = []\n for v in value:\n title, paragraph, score = tuple(v)\n v_ret_results.append(\n {\"title\": title, \"paragraph\": paragraph, \"score\": round(score, 1)}\n )\n verification_summary[self.claims[claim_idx][0]] = OrderedDict(\n {\n \"label\": self.verification_result[claim_idx][\"label\"],\n \"fixed_claim\": self.verification_result[claim_idx][\"fixed_claim\"],\n \"retrieval_results\": v_ret_results,\n }\n )\n return verification_summary\n\n def _summarize_rg_log(self):\n rg_summary = {\n \"initial_search_query\": self.initial_search_query,\n \"initial_search_query_time\": self.initial_search_query_time,\n \"initial_search_bullets\": self.initial_search_bullets,\n \"initial_search_results\": [],\n }\n\n for i in range(len(self.initial_search_results)):\n rg_summary[\"initial_search_results\"].append(\n {\n \"title\": self.initial_search_result_titles[i],\n \"paragraph\": self.initial_search_results[i],\n # 'bullets': self.initial_search_bullets,\n }\n )\n\n return rg_summary\n\n def log(self):\n \"\"\"\n Returns a json object that contains all information inside `self`\n \"\"\"\n # combine fields into a more human-readable field\n verification_summary = self._summarize_vc_log()\n rg_summary = self._summarize_rg_log()\n\n return OrderedDict(\n {\n # retrieve_and_generate pipeline\n \"retrieve_and_generate\": rg_summary,\n # generate_and_correct pipeline\n \"llm_utterance\": self.llm_utterance,\n \"generate_and_correct\": verification_summary,\n # early_combine pipeline\n \"combined_evidences\": self.combined_evidences,\n \"combined_utterance\": self.combined_utterance,\n \"feedback\": self.feedback,\n \"feedback_scores\": self.feedback_scores,\n \"refined_utterance\": self.refined_utterance,\n \"user_utterance\": self.user_utterance,\n \"agent_utterance\": self.agent_utterance,\n \"engine\": self.engine,\n \"generate_engine\": self.generate_engine,\n \"draft_engine\": self.draft_engine,\n \"pipeline\": self.pipeline,\n \"wall_time_seconds\": round(self.wall_time_seconds, 1),\n }\n )\n\n @staticmethod\n def utterance_list_to_dialog_history(utterance_list: List[str]):\n \"\"\"\n The resulting dialog history will not have all the fields correctly initialized, since no information about e.g. search queries is available\n \"\"\"\n dialog_history = []\n assert (\n len(utterance_list) % 2 == 1\n ), \"The first turn is always the user, and the turn to be generated is always the agent, so the number of turns should be odd\"\n for i in range(0, len(utterance_list) - 2, 2):\n dialog_history.append(\n DialogueTurn(\n user_utterance=utterance_list[i],\n agent_utterance=utterance_list[i + 1],\n )\n )\n user_utterance = utterance_list[-1]\n\n return dialog_history, user_utterance\n\n @staticmethod\n def dialog_history_to_utterance_list(dialog_history) -> List[str]:\n \"\"\"\n Convert a list of DialogueTurns to a list of strings\n \"\"\"\n utterance_list = []\n for turn in dialog_history:\n utterance_list.append(turn.user_utterance)\n utterance_list.append(turn.agent_utterance)\n return utterance_list" }, { "identifier": "llm_generate", "path": "llm/llm_generate.py", "snippet": "def llm_generate(\n template_file: str,\n prompt_parameter_values: Union[dict, List[dict]],\n engine: str,\n max_tokens: int,\n temperature: float,\n stop_tokens,\n top_p: float = 0.9,\n frequency_penalty: float = 0,\n presence_penalty: float = 0,\n postprocess: bool = True,\n filled_prompt=None,\n):\n \"\"\"\n Generates continuations for one or more prompts in parallel\n Inputs:\n prompt_parameter_values: dict or list of dict. If the input is a list, the output will be a list as well\n filled_prompt: gives direct access to the underlying model, without having to load a prompt template from a .prompt file. Used for testing.\n \"\"\"\n if not (\n filled_prompt is None\n and prompt_parameter_values is not None\n and template_file is not None\n ) and not (\n filled_prompt is not None\n and prompt_parameter_values is None\n and template_file is None\n ):\n raise ValueError(\n \"Can only use filled_prompt if template_file and prompt_parameter_values are None\"\n )\n\n # Decide which LLM resource to send this request to.\n # Use hash so that each time this function gets called with the same parameters after a backoff, the request gets sent to the same resource\n potential_llm_resources = [\n resource\n for resource in global_variables.all_llm_endpoints\n if engine in resource[\"engine_map\"]\n ]\n llm_resource = potential_llm_resources[\n hash(\n str(\n (\n template_file,\n prompt_parameter_values,\n engine,\n max_tokens,\n temperature,\n stop_tokens,\n top_p,\n frequency_penalty,\n presence_penalty,\n )\n )\n )\n % len(potential_llm_resources)\n ]\n # uniform load balancing instead of hashing\n # llm_resource = potential_llm_resources[random.randrange(len(potential_llm_resources))]\n\n if llm_resource[\"api_type\"] == \"local\":\n prompt_format = llm_resource[\"prompt_format\"]\n else:\n prompt_format = \"none\"\n\n # convert to a single element list so that the rest of the code only has to deal with a list\n input_was_list = True\n if filled_prompt is None:\n assert prompt_parameter_values is not None\n if not isinstance(prompt_parameter_values, list):\n input_was_list = False\n prompt_parameter_values = [prompt_parameter_values]\n filled_prompt, rendered_blocks = _fill_prompt(\n template_file, prompt_parameter_values, engine, prompt_format\n )\n else:\n if not isinstance(filled_prompt, list):\n input_was_list = False\n filled_prompt = [filled_prompt]\n\n assert isinstance(filled_prompt, list)\n\n # Call LLM to generate outputs\n generation_output = _llm_completion_with_backoff_and_cache(\n original_engine_name=engine,\n **_set_llm_resource_fields(\n llm_resource=llm_resource,\n engine=engine,\n prompt=filled_prompt,\n max_tokens=max_tokens,\n temperature=temperature,\n top_p=top_p,\n frequency_penalty=frequency_penalty,\n presence_penalty=presence_penalty,\n stop=stop_tokens,\n )\n )\n outputs = []\n for choice in generation_output[\"choices\"]:\n if choice[\"text\"]:\n outputs.append(choice[\"text\"])\n\n logger.info(\"LLM output: %s\", json.dumps(outputs, indent=2, ensure_ascii=False))\n\n # calculate and record the cost\n cost_prompt, cost_completion = global_variables._model_name_to_cost(engine)\n total_cost = (\n generation_output[\"usage\"][\"prompt_tokens\"] * cost_prompt\n + generation_output[\"usage\"].get(\"completion_tokens\", 0) * cost_completion\n ) / 1000\n global_variables.add_to_total_cost(total_cost)\n\n # postprocess the generation outputs\n outputs = [o.strip() for o in outputs]\n if postprocess:\n outputs = [_postprocess_generations(o) for o in outputs]\n\n # add to prompt logs if needed\n if global_variables.debug_prompts:\n with global_variables.thread_lock:\n for i, o in enumerate(outputs):\n if template_file in global_variables.prompts_to_skip_for_debugging:\n continue\n global_variables.prompt_logs.append(\n {\n \"template_name\": template_file,\n \"instruction\": rendered_blocks[i][\"short_instruction\"]\n if \"short_instruction\" in rendered_blocks[i]\n else rendered_blocks[i][\"instruction\"],\n \"input\": rendered_blocks[i][\"input\"],\n \"output\": o,\n }\n )\n\n if outputs == []:\n outputs = \"\"\n\n # convert back to a single item\n if len(outputs) == 1 and not input_was_list:\n outputs = outputs[0]\n return outputs" }, { "identifier": "get_total_cost", "path": "llm/global_variables.py", "snippet": "def get_total_cost():\n global total_cost\n return total_cost" } ]
from concurrent.futures import ThreadPoolExecutor from typing import List from tqdm import tqdm from scipy.stats import ttest_ind from pipelines.dialog_turn import DialogueTurn from llm.llm_generate import llm_generate from llm.global_variables import get_total_cost import json import argparse import numpy as np import logging import sys
2,596
sys.path.insert(0, "./") logger = logging.getLogger(__name__) def get_feedback(object_dlg_history: List[DialogueTurn], new_dlg_turn: DialogueTurn):
sys.path.insert(0, "./") logger = logging.getLogger(__name__) def get_feedback(object_dlg_history: List[DialogueTurn], new_dlg_turn: DialogueTurn):
feedback = llm_generate(
1
2023-10-19 18:17:25+00:00
4k
TonicAI/tvalmetrics
tonic_validate/metrics/answer_consistency_metric.py
[ { "identifier": "LLMResponse", "path": "tonic_validate/classes/llm_response.py", "snippet": "class LLMResponse(BaseModel):\n llm_answer: str\n llm_context_list: list[str]\n benchmark_item: BenchmarkItem" }, { "identifier": "Metric", "path": "tonic_validate/metrics/metric.py", "snippet": "class Metric(ABC):\n @property\n @abstractmethod\n def name(self) -> str:\n pass\n\n @abstractmethod\n def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:\n pass" }, { "identifier": "parse_boolean_response", "path": "tonic_validate/utils/metrics_util.py", "snippet": "def parse_boolean_response(response: str) -> bool:\n \"\"\"Parse boolean response from LLM evaluator.\n\n Attempts to parse response as true or false.\n\n Parameters\n ----------\n response: str\n Response from LLM evaluator.\n\n Returns\n -------\n bool\n Whether response should be interpreted as true or false.\n \"\"\"\n response_lower = response.lower()\n if response_lower == \"true\":\n return True\n if response_lower == \"false\":\n return False\n logger.debug(f\"Relevance response {response_lower} is not true or false\")\n if \"true\" in response_lower and \"false\" not in response_lower:\n return True\n if \"false\" in response_lower and \"true\" not in response_lower:\n return False\n log_message = (\n f\"Could not determine true or false from response {response_lower}\"\n \", returning False\"\n )\n logger.debug(log_message)\n return False" }, { "identifier": "parse_bullet_list_response", "path": "tonic_validate/utils/metrics_util.py", "snippet": "def parse_bullet_list_response(response: str) -> List[str]:\n \"\"\"Parse bullet list response from LLM evaluator.\n\n Attempts to parse repsonse as a bullet list, returning a list of strings that\n correspond to the bullet points. The response is assumed to be a bullet list in\n markdown format with the bullet points denoted by asterisks.\n\n Parameters\n ----------\n response: str\n Response from LLM evaluator.\n\n Returns\n -------\n List[str]\n List of strings that correspond to the bullet points in the response.\n \"\"\"\n if \"*\" not in response:\n log_message = (\n f\"Response {response} does not contain bullet list. Returning all of \"\n \"response as main point.\"\n )\n logger.debug(log_message)\n return [response]\n if not response.startswith(\"*\"):\n log_message = (\n f\"Response {response} does not start with bullet, when it should be a \"\n \"bulleted list. Content before the first bullet will be removed.\"\n )\n logger.debug(log_message)\n bullet_list = response.split(\"*\")[1:]\n bullet_list = [bullet.strip() for bullet in bullet_list]\n return bullet_list" }, { "identifier": "OpenAIService", "path": "tonic_validate/services/openai_service.py", "snippet": "class OpenAIService:\n def __init__(self, model: str = \"gpt-4-1106-preview\") -> None:\n self.client = OpenAI()\n self.model = model\n self.cache: Dict[str, str] = {}\n\n def get_response(\n self,\n prompt: str,\n max_retries: int = 5,\n ) -> str:\n if prompt in self.cache:\n return self.cache[prompt]\n while max_retries > 0:\n try:\n completion = self.client.chat.completions.create(\n model=self.model,\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"You are a helpful assistant. Respond using markdown.\",\n },\n {\"role\": \"user\", \"content\": prompt},\n ],\n temperature=0.0,\n )\n response = completion.choices[0].message.content\n if response is None:\n raise Exception(\n f\"Failed to get message response from {self.model}, message does not exist\"\n )\n self.cache[prompt] = response\n return response\n except Exception as e:\n print(e)\n max_retries -= 1\n raise Exception(\n f\"Failed to get completion response from {self.model}, max retires hit\"\n )" }, { "identifier": "main_points_call", "path": "tonic_validate/utils/llm_calls.py", "snippet": "def main_points_call(answer: str, openai_service: OpenAIService) -> str:\n \"\"\"Sends prompt for main points in answer to Open AI API and returns response.\n\n Parameters\n ----------\n answer: str\n The answer that was generated by the RAG system.\n openai_service: OpenAIService\n The OpenAI Service which allows for communication with the OpenAI API.\n\n Returns\n -------\n str\n Response from OpenAI API.\n \"\"\"\n logger.debug(\n f\"Asking {openai_service.model} for bullet list of main points in answer\"\n )\n main_message = (\n 'Write down in a bulleted list using markdown (so each bullet is a \"*\"), the main'\n \"points in the following answer to a user's query. Respond with the bulleted list\"\n 'and no additional text. Only use a single \"*\" for each bullet and do not use a \"*\"'\n \"anywhere in your response except for the bullets.\"\n )\n main_message += f\"\\nANSWER: {answer}\"\n\n response_message = openai_service.get_response(main_message)\n\n return response_message" }, { "identifier": "statement_derived_from_context_call", "path": "tonic_validate/utils/llm_calls.py", "snippet": "def statement_derived_from_context_call(\n statement: str, context_list: List[str], openai_service: OpenAIService\n) -> str:\n \"\"\"Sends prompt for whether statement is derived from context and returns response.\n\n Parameters\n ----------\n statement: str\n The statement to be checked.\n context_list: List[str]\n List of retrieved context to see if statement is derived from this context.\n openai_service: OpenAIService\n The OpenAI Service which allows for communication with the OpenAI API.\n\n Returns\n -------\n str\n Response from OpenAI API.\n \"\"\"\n logger.debug(\n f\"Asking {openai_service.model} whether statement is derived from context\"\n )\n main_message = (\n \"Considering the following statement and then list of context, determine whether the\"\n \"statement can be derived from the context. If the statement can be derived from the\"\n \"context response with true. Otherwise response with false. Respond with either true\"\n \"or false and no additional text.\"\n )\n main_message += f\"\\n\\nSTATEMENT:\\n{statement}\\nEND OF STATEMENT\"\n for i, context in enumerate(context_list):\n main_message += f\"\\n\\nCONTEXT {i}:\\n{context}\\nEND OF CONTEXT {i}\"\n\n response_message = openai_service.get_response(main_message)\n\n return response_message" } ]
import logging from tonic_validate.classes.llm_response import LLMResponse from tonic_validate.metrics.metric import Metric from tonic_validate.utils.metrics_util import ( parse_boolean_response, parse_bullet_list_response, ) from tonic_validate.services.openai_service import OpenAIService from tonic_validate.utils.llm_calls import ( main_points_call, statement_derived_from_context_call, )
1,742
logger = logging.getLogger() class AnswerConsistencyMetric(Metric): name = "answer_consistency" def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:
logger = logging.getLogger() class AnswerConsistencyMetric(Metric): name = "answer_consistency" def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:
main_points_response = main_points_call(llm_response.llm_answer, openai_service)
5
2023-10-23 21:38:11+00:00
4k
jhejna/cpl
research/datasets/replay_buffer/sampling.py
[ { "identifier": "utils", "path": "research/utils/utils.py", "snippet": "def to_device(batch: Any, device: torch.device) -> Any:\ndef to_tensor(batch: Any) -> Any:\ndef to_np(batch: Any) -> Any:\ndef remove_float64(batch: Any):\ndef unsqueeze(batch: Any, dim: int) -> Any:\ndef squeeze(batch: Any, dim: int) -> Any:\ndef get_from_batch(batch: Any, start: Union[int, np.ndarray, torch.Tensor], end: Optional[int] = None) -> Any:\ndef set_in_batch(batch: Any, value: Any, start: int, end: Optional[int] = None) -> None:\ndef batch_copy(batch: Any) -> Any:\ndef space_copy(space: gym.Space):\ndef contains_tensors(batch: Any) -> bool:\ndef get_device(batch: Any) -> Optional[torch.device]:\ndef concatenate(*args, dim: int = 0):\ndef append(lst, item):\ndef extend(lst1, lst2):\n def __init__(self, name: str = \"\"):\n def forward(self, x: Any) -> Any:\ndef np_dataset_alloc(\n space: gym.Space, capacity: int, begin_pad: Tuple[int] = tuple(), end_pad: Tuple[int] = tuple()\n) -> np.ndarray:\ndef np_bytes_per_instance(space: gym.Space) -> int:\ndef _flatten_dict_helper(flat_dict: Dict, value: Any, prefix: str, separator: str = \".\") -> None:\ndef flatten_dict(d: Dict, separator: str = \".\") -> Dict:\ndef nest_dict(d: Dict, separator: str = \".\") -> Dict:\ndef fetch_from_dict(d: Dict, keys: Union[str, List, Tuple], separator=\".\") -> List[Any]:\ndef create_optim_groups(params, kwargs):\nclass PrintNode(torch.nn.Module):" }, { "identifier": "Storage", "path": "research/datasets/replay_buffer/storage.py", "snippet": "class Storage(abc.ABC):\n \"\"\"\n The storage object is responsible for holding the data.\n In a distributed setup, each worker might have its own storage object that holds data.\n\n All storage objects must be given a \"done\" flag.\n This is used to derive the starts, ends, and lengths properties.\n Done should be true at the last step of the episode.\n \"\"\"\n\n @property\n def capacity(self):\n return self._capacity\n\n @property\n def size(self):\n return self._size\n\n @property\n def starts(self):\n return self._starts\n\n @property\n def ends(self):\n return self._ends\n\n @property\n def lengths(self):\n return self._lengths\n\n @property\n def bytes(self):\n return get_bytes(self._buffers)\n\n def save(self, path):\n \"\"\"\n Directly save the buffer Storage. This saves everything as a flat file.\n This is generally not a good idea as it creates gigantic files.\n \"\"\"\n assert self._size != 0, \"Trying to save Storage with no data.\"\n assert path.endswith(\".npz\"), \"Path given to `save` was bad. Must save in .npz format.\"\n data = utils.get_from_batch(self._buffers, 0, self._size)\n os.makedirs(os.path.dirname(path), exist_ok=True)\n return save_data(data, path) # Returns the save path\n\n def __getitem__(self, key):\n return self._buffers[key]\n\n def __getattr__(self, name):\n \"\"\"Returns attributes of the buffers\"\"\"\n return getattr(self._buffers, name)\n\n def __contains__(self, key):\n return key in self._buffers\n\n @abc.abstractmethod\n def add(self, data):\n raise NotImplementedError\n\n @abc.abstractmethod\n def extend(self, data):\n raise NotImplementedError" } ]
import copy import numpy as np from typing import Callable, Optional, Tuple from research.utils import utils from .storage import Storage
1,743
""" This file defines a number of sampling functions used by the replay buffer. Each sample function returns tensors of the following shape: (Batch, Time, dims...) and requires `storage` and `discount` arguments. Many of these functions have large blocks of repeated code, but are implemented separately for readability and performance optimiztaion. Sequences are sampled as follows: -stack_length ... -1, 0, 1, 2, ..., seq_length | stack |idx| seq | The stack parameter will always be sampled immediately, and is desinged to be used as context to the network. Stack will not obey nstep returns. (negative indexing) Everything is sampled in batches directly from memory (preferred) If batch_size is set to one, then a squeeze operation will be performed at the very end. Samples are returned as with shape: (Batch, Time, Dims...) if seq or stack dims are set to 1, then these parameters are ignored. """ def _get_ep_idxs(storage: Storage, batch_size: int = 1, sample_by_timesteps: bool = True, min_length: int = 2): if batch_size is None or batch_size > 1: ep_idxs = np.arange(len(storage.lengths))[storage.lengths >= min_length] if sample_by_timesteps: # Lower the lengths by the min_length - 1 to give the number of valid sequences. lengths = storage.lengths[ep_idxs] - (min_length - 1) p = lengths / lengths.sum() ep_idxs = np.random.choice(ep_idxs, size=(batch_size,), replace=True, p=p) else: ep_idxs = ep_idxs[np.random.randint(0, len(ep_idxs), size=(batch_size,))] return ep_idxs else: # Use a different, much faster sampling scheme for batch_size = 1 assert sample_by_timesteps is False, "Cannot sample by timesteps with batch_size=1, it's too slow!" ep_idx = np.random.randint(0, len(storage.lengths)) if storage.lengths[ep_idx] < min_length: return _get_ep_idxs(storage, batch_size, sample_by_timesteps, min_length) else: return np.array([ep_idx], np.int64) def sample( storage: Storage, batch_size: int = 1, sample_by_timesteps: bool = True, stack: int = 1, stack_keys: Tuple = (), discount: float = 0.99, ): """ Default sampling for imitation learning. Returns (obs, action, ... keys) batches. """ min_length = 2 ep_idxs = _get_ep_idxs(storage, batch_size, sample_by_timesteps, min_length) # sample position within the episode randomly # Note that there is a plus one offset here to account for the difference # between the obs and action position offsets = np.random.randint(1, storage.lengths[ep_idxs]) if stack > 1: assert len(stack_keys) > 1, "Provided stack > 1 but no stack keys" stack_offsets = np.expand_dims(offsets, axis=-1) + np.arange(-stack + 1, 1) stack_offsets = np.clip(stack_offsets, 0, None) # Clip to zero as lowest offset = start of episode stack_idxs = np.expand_dims(storage.starts[ep_idxs], axis=-1) + stack_offsets idxs = storage.starts[ep_idxs] + offsets # Sample from the dataset batch = {} for k in storage.keys(): sample_idxs = stack_idxs if k in stack_keys else idxs if k == "obs": sample_idxs = sample_idxs - 1 if k == "discount":
""" This file defines a number of sampling functions used by the replay buffer. Each sample function returns tensors of the following shape: (Batch, Time, dims...) and requires `storage` and `discount` arguments. Many of these functions have large blocks of repeated code, but are implemented separately for readability and performance optimiztaion. Sequences are sampled as follows: -stack_length ... -1, 0, 1, 2, ..., seq_length | stack |idx| seq | The stack parameter will always be sampled immediately, and is desinged to be used as context to the network. Stack will not obey nstep returns. (negative indexing) Everything is sampled in batches directly from memory (preferred) If batch_size is set to one, then a squeeze operation will be performed at the very end. Samples are returned as with shape: (Batch, Time, Dims...) if seq or stack dims are set to 1, then these parameters are ignored. """ def _get_ep_idxs(storage: Storage, batch_size: int = 1, sample_by_timesteps: bool = True, min_length: int = 2): if batch_size is None or batch_size > 1: ep_idxs = np.arange(len(storage.lengths))[storage.lengths >= min_length] if sample_by_timesteps: # Lower the lengths by the min_length - 1 to give the number of valid sequences. lengths = storage.lengths[ep_idxs] - (min_length - 1) p = lengths / lengths.sum() ep_idxs = np.random.choice(ep_idxs, size=(batch_size,), replace=True, p=p) else: ep_idxs = ep_idxs[np.random.randint(0, len(ep_idxs), size=(batch_size,))] return ep_idxs else: # Use a different, much faster sampling scheme for batch_size = 1 assert sample_by_timesteps is False, "Cannot sample by timesteps with batch_size=1, it's too slow!" ep_idx = np.random.randint(0, len(storage.lengths)) if storage.lengths[ep_idx] < min_length: return _get_ep_idxs(storage, batch_size, sample_by_timesteps, min_length) else: return np.array([ep_idx], np.int64) def sample( storage: Storage, batch_size: int = 1, sample_by_timesteps: bool = True, stack: int = 1, stack_keys: Tuple = (), discount: float = 0.99, ): """ Default sampling for imitation learning. Returns (obs, action, ... keys) batches. """ min_length = 2 ep_idxs = _get_ep_idxs(storage, batch_size, sample_by_timesteps, min_length) # sample position within the episode randomly # Note that there is a plus one offset here to account for the difference # between the obs and action position offsets = np.random.randint(1, storage.lengths[ep_idxs]) if stack > 1: assert len(stack_keys) > 1, "Provided stack > 1 but no stack keys" stack_offsets = np.expand_dims(offsets, axis=-1) + np.arange(-stack + 1, 1) stack_offsets = np.clip(stack_offsets, 0, None) # Clip to zero as lowest offset = start of episode stack_idxs = np.expand_dims(storage.starts[ep_idxs], axis=-1) + stack_offsets idxs = storage.starts[ep_idxs] + offsets # Sample from the dataset batch = {} for k in storage.keys(): sample_idxs = stack_idxs if k in stack_keys else idxs if k == "obs": sample_idxs = sample_idxs - 1 if k == "discount":
batch[k] = discount * utils.get_from_batch(storage[k], sample_idxs)
0
2023-10-19 17:25:45+00:00
4k
nbasyl/LLM-FP4
lm_eval/tasks/triviaqa.py
[ { "identifier": "Task", "path": "lm_eval/base.py", "snippet": "class LM(abc.ABC):\nclass BaseLM(LM):\nclass Task(abc.ABC):\nclass MultipleChoiceTask(Task):\nclass PerplexityTask(Task, abc.ABC):\nclass CacheHook:\nclass CachingLM:\nclass Request:\nclass RequestFactory:\n def __init__(self):\n def loglikelihood(self, requests):\n def loglikelihood_rolling(self, requests):\n def greedy_until(self, requests):\n def create_from_arg_string(cls, arg_string, additional_config=None):\n def set_cache_hook(self, cache_hook):\n def __init__(self):\n def eot_token_id(self):\n def max_length(self):\n def max_gen_toks(self):\n def batch_size(self):\n def device(self):\n def tok_encode(self, string: str):\n def tok_decode(self, tokens: Iterable[int]):\n def _model_generate(self, context, max_length, eos_token_id):\n def _model_call(self, inps):\n def _detect_batch_size(self, requests=None, pos=0):\n def forward_batch(batch_size):\n def _encode_pair(self, context, continuation):\n def loglikelihood(self, requests):\n def loglikelihood_rolling(self, requests):\n def _loglikelihood_tokens(self, requests, disable_tqdm=False, override_bs=None):\n def _collate(x):\n def _batch_scheduler(pos):\n def greedy_until(self, requests):\n def _collate(x):\n def __init__(self, data_dir=None, cache_dir=None, download_mode=None):\n def download(self, data_dir=None, cache_dir=None, download_mode=None):\n def should_decontaminate(self):\n def has_training_docs(self):\n def has_validation_docs(self):\n def has_test_docs(self):\n def training_docs(self):\n def validation_docs(self):\n def test_docs(self):\n def _process_doc(self, doc):\n def fewshot_examples(self, k, rnd):\n def doc_to_decontamination_query(self, doc):\n def doc_to_text(self, doc):\n def doc_to_target(self, doc):\n def construct_requests(self, doc, ctx):\n def process_results(self, doc, results):\n def aggregation(self):\n def higher_is_better(self):\n def fewshot_description(self):\n def fewshot_context(\n self, doc, num_fewshot, provide_description=None, rnd=None, description=None\n ):\n def doc_to_target(self, doc):\n def construct_requests(self, doc, ctx):\n def process_results(self, doc, results):\n def higher_is_better(self):\n def aggregation(self):\n def should_decontaminate(self):\n def has_training_docs(self):\n def fewshot_examples(self, k, rnd):\n def fewshot_context(\n self, doc, num_fewshot, provide_description=None, rnd=None, description=None\n ):\n def higher_is_better(self):\n def doc_to_decontamination_query(self, doc):\n def doc_to_text(self, doc):\n def doc_to_target(self, doc):\n def construct_requests(self, doc, ctx):\n def process_results(self, doc, results):\n def aggregation(self):\n def count_bytes(cls, doc):\n def count_words(cls, doc):\ndef hash_args(attr, args):\n def __init__(self, cachinglm):\n def add_partial(self, attr, req, res):\n def __init__(self, lm, cache_db):\n def __getattr__(self, attr):\n def fn(requests):\n def get_cache_hook(self):\n def __init__(self, request_type, args, index=None):\n def __iter__(self):\n def __getitem__(self, i):\n def __eq__(self, other):\n def __repr__(self):\n def __getattr__(self, attr):\n def fn(*args):\n DATASET_PATH: str = None\n DATASET_NAME: str = None\nREQUEST_RETURN_LENGTHS = {\n \"loglikelihood\": 2,\n \"greedy_until\": None,\n \"loglikelihood_rolling\": None,\n}" }, { "identifier": "mean", "path": "lm_eval/metrics.py", "snippet": "def mean(arr):\n return sum(arr) / len(arr)" } ]
import inspect import string from lm_eval.base import Task, rf from lm_eval.metrics import mean
1,617
""" TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension https://arxiv.org/pdf/1705.03551.pdf TriviaQA is a reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions. Homepage: https://nlp.cs.washington.edu/triviaqa/ """ _CITATION = """ @InProceedings{JoshiTriviaQA2017, author = {Joshi, Mandar and Choi, Eunsol and Weld, Daniel S. and Zettlemoyer, Luke}, title = {TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}, booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics}, month = {July}, year = {2017}, address = {Vancouver, Canada}, publisher = {Association for Computational Linguistics}, } """ class TriviaQA(Task): VERSION = 2 DATASET_PATH = "trivia_qa" DATASET_NAME = "rc.nocontext" def has_training_docs(self): return True def has_validation_docs(self): return True def has_test_docs(self): return False def training_docs(self): return self.dataset["train"] def validation_docs(self): return self.dataset["validation"] def test_docs(self): raise NotImplementedError() def doc_to_text(self, doc): return f"Question: {doc['question']}\nAnswer:" def should_decontaminate(self): return True def doc_to_decontamination_query(self, doc): return doc["question"] def doc_to_target(self, doc): return " " + doc["answer"]["value"] def _remove_prefixes(self, aliases): # Optimization: Remove any alias that has a strict prefix elsewhere in the list # we can do this because if the prefix is acceptable by isgreedy, we can stop looking aliases.sort() ret = [aliases[0]] for alias in aliases[1:]: if not alias.startswith(ret[-1]): ret.append(alias) return ret def construct_requests(self, doc, ctx): """Uses RequestFactory to construct Requests and returns an iterable of Requests which will be sent to the LM. :param doc: The document as returned from training_docs, validation_docs, or test_docs. :param ctx: str The context string, generated by fewshot_context. This includes the natural language description, as well as the few shot examples, and the question part of the document for `doc`. """
""" TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension https://arxiv.org/pdf/1705.03551.pdf TriviaQA is a reading comprehension dataset containing over 650K question-answer-evidence triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts and independently gathered evidence documents, six per question on average, that provide high quality distant supervision for answering the questions. Homepage: https://nlp.cs.washington.edu/triviaqa/ """ _CITATION = """ @InProceedings{JoshiTriviaQA2017, author = {Joshi, Mandar and Choi, Eunsol and Weld, Daniel S. and Zettlemoyer, Luke}, title = {TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}, booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics}, month = {July}, year = {2017}, address = {Vancouver, Canada}, publisher = {Association for Computational Linguistics}, } """ class TriviaQA(Task): VERSION = 2 DATASET_PATH = "trivia_qa" DATASET_NAME = "rc.nocontext" def has_training_docs(self): return True def has_validation_docs(self): return True def has_test_docs(self): return False def training_docs(self): return self.dataset["train"] def validation_docs(self): return self.dataset["validation"] def test_docs(self): raise NotImplementedError() def doc_to_text(self, doc): return f"Question: {doc['question']}\nAnswer:" def should_decontaminate(self): return True def doc_to_decontamination_query(self, doc): return doc["question"] def doc_to_target(self, doc): return " " + doc["answer"]["value"] def _remove_prefixes(self, aliases): # Optimization: Remove any alias that has a strict prefix elsewhere in the list # we can do this because if the prefix is acceptable by isgreedy, we can stop looking aliases.sort() ret = [aliases[0]] for alias in aliases[1:]: if not alias.startswith(ret[-1]): ret.append(alias) return ret def construct_requests(self, doc, ctx): """Uses RequestFactory to construct Requests and returns an iterable of Requests which will be sent to the LM. :param doc: The document as returned from training_docs, validation_docs, or test_docs. :param ctx: str The context string, generated by fewshot_context. This includes the natural language description, as well as the few shot examples, and the question part of the document for `doc`. """
continuation = rf.greedy_until(ctx, {"until": ["\n", ".", ","]})
0
2023-10-15 06:05:13+00:00
4k
alextamkin/generative-elicitation
run_human_evaluation.py
[ { "identifier": "FromSavedFileAgent", "path": "from_saved_file_agent.py", "snippet": "class FromSavedFileAgent(BaseActiveLearningAgent):\n \"\"\"Agent that loads generated interactions (queries and answers) from a saved file.\"\"\"\n\n def __init__(self, target_specification_file, engine, openai_cache_file=None, saved_interactions_file=None, eval_condition=\"at_end\", base_query_type=\"questions\", **kwargs):\n super().__init__(target_specification_file, engine, openai_cache_file)\n\n self.saved_interactions_file = saved_interactions_file\n self.saved_interactions_contents = json.load(open(saved_interactions_file))\n self.interaction_history = []\n self.initialize_test_cases(self.saved_interactions_contents[\"evaluation_results\"])\n self.noninteractive_mode = self.saved_interactions_contents.get(\"query_type\") == \"Non-interactive\"\n self.initialize_full_interaction_history(self.saved_interactions_contents[\"conversation_history\"])\n self.eval_condition = eval_condition\n self.last_eval_turn = 0\n self.base_query_type = base_query_type\n self.query_prompt = self.saved_interactions_contents.get(\"query_prompt\")\n \n def initialize_test_cases(self, human_test_cases):\n self.test_cases = [\n (test_case[\"sample\"], test_case[\"label\"] == \"yes\")\n for test_case in human_test_cases\n ]\n\n def initialize_full_interaction_history(self, human_interactions):\n self.turn_timings = {\"user\": [], \"assistant\": []}\n if self.noninteractive_mode:\n assert len(human_interactions) == 1\n self.full_interaction_history = [(\"\", turn[\"message\"]) for turn in human_interactions]\n self.turn_timings[\"user\"].append(human_interactions[0].get(\"time_spent_ms\", None))\n else:\n self.full_interaction_history = []\n for turn in human_interactions:\n if turn[\"sender\"] == \"assistant\":\n self.full_interaction_history.append((turn[\"message\"], None))\n else:\n self.full_interaction_history[-1] = (self.full_interaction_history[-1][0], turn[\"message\"])\n self.turn_timings[turn[\"sender\"]].append(turn.get(\"time_spent_ms\", None))\n # Remove the last turn if it's empty\n if self.full_interaction_history[-1][1] is None:\n self.full_interaction_history.pop()\n self.turn_timings[\"assistant\"].pop()\n\n def format_questions_and_answers(self, questions_and_answers):\n '''Formats the questions and answers into a string.\n\n Looks like:\n - Should the system allow numbers in the domain? -> Yes\n\n Args:\n questions_and_answers (list): A list of tuples of the form (question, answer).\n \n Returns:\n str: The formatted questions and answers.\n '''\n if self.noninteractive_mode:\n return '\\n'.join([f\"- {answer}\" for question, answer in questions_and_answers])\n else:\n return '\\n'.join([f\"- {question} -> {answer}\" for question, answer in questions_and_answers])\n\n def generate_active_query(self):\n '''Generates a question for the oracle.'''\n if len(self.interaction_history) >= len(self.full_interaction_history):\n return None\n return self.full_interaction_history[len(self.interaction_history)][0]\n \n def generate_oracle_response(self, question):\n '''Generates an oracle response for the question'''\n if len(self.interaction_history) < len(self.full_interaction_history):\n assert question == self.full_interaction_history[len(self.interaction_history)][0]\n answer = self.full_interaction_history[len(self.interaction_history)][1]\n self.interaction_history.append((question, answer))\n return answer\n else:\n return None\n \n def get_hypothesis_prompt(self, interaction_history, broken_regexes=None):\n pass\n \n def score_test_cases(self, **kwargs):\n self.last_eval_turn = len(self.interaction_history)\n return super().score_test_cases(**kwargs)\n\n def get_curr_user_timings_ms(self):\n return sum(self.turn_timings[\"user\"][:len(self.interaction_history)])\n\n def get_curr_user_message_lengths(self):\n return sum([len(turn[1]) for turn in self.interaction_history])\n\n def get_interaction_features(self):\n \"\"\"\n Returns a dictionary of features for the current interaction trajectory.\n\n The features are:\n - interaction_time: total time spent interacting with the system (in minutes)\n - interaction_num_turns: number of turns in the interaction\n - interaction_total_char_length: total number of characters in the user's messages\n \"\"\"\n return {\n \"interaction_time\": self.get_curr_user_timings_ms() / 60 / 1000,\n \"interaction_num_turns\": len(self.interaction_history),\n \"interaction_total_char_length\": self.get_curr_user_message_lengths(),\n }\n\n def evaluate_condition(self, **kwargs):\n if self.eval_condition == \"at_end\":\n return len(self.interaction_history) == len(self.full_interaction_history)\n elif self.eval_condition == \"per_minute\":\n total_interaction_time_ms_curr_turn = sum(self.turn_timings[\"user\"][:len(self.interaction_history)])\n if len(self.interaction_history) == 0:\n total_interaction_time_ms_prev_turn = 0\n else:\n total_interaction_time_ms_prev_turn = sum(self.turn_timings[\"user\"][:self.last_eval_turn])\n return (len(self.interaction_history) == len(self.full_interaction_history)) or (total_interaction_time_ms_curr_turn // 60000 > total_interaction_time_ms_prev_turn // 60000)\n elif self.eval_condition == \"per_turn_up_to_5\":\n return len(self.interaction_history) <= 5\n else:\n return True\n\n def get_query_prompt(self):\n return self.query_prompt" }, { "identifier": "run_problem_instance", "path": "run_model_evaluation.py", "snippet": "def run_problem_instance(\n problem_instance_filename, engine, openai_cache_file,\n num_interactions, agent_class, question_type, sampling_type,\n saved_interactions_file, temperature=0.0, outputs_save_file=None, base_query_type=None,\n pool_diversity_num_clusters=15, task=None, eval_condition=\"per_minute\",\n):\n '''Runs the generative active learning loop for a single problem instance.\n \n This entails doing several rounds of interaction between the AL agent and the oracle, \n evaluating the accuracies on held-out test cases after each interaction.\n\n Args:\n problem_instance_filename (str): The path to the problem instance file (holds the gold regex and test cases)\n engine (str): The OpenAI engine to use (e.g. gpt-3.5-turbo, gpt-4).\n openai_cache_file (str): The path to the OpenAI cache file.\n num_interactions (int): The number of interactions between the AL agent and the oracle.\n \n Returns:\n list: The test scores after each interaction.\n '''\n\n generative_al_agent = agent_class(\n problem_instance_filename, engine, openai_cache_file=openai_cache_file,\n question_type=question_type, saved_interactions_file=saved_interactions_file,\n eval_condition=eval_condition, pool_al_sampling_type=sampling_type,\n pool_diversity_num_clusters=pool_diversity_num_clusters,\n temperature=temperature, base_query_type=base_query_type,\n )\n \n if agent_class != FromSavedFileAgent:\n outputs_save_file.write(f\"0. {generative_al_agent.persona}\\n\\n\")\n query_type = AGENT_CLASS_TO_NAME[agent_class]\n if query_type == \"questions\":\n query_type += \"_\" + question_type\n elif query_type == \"pool\":\n query_type += \"_\" + sampling_type\n else:\n query_type = FILE_QUERY_TYPE_TO_NAME[base_query_type]\n\n test_xs = generative_al_agent.get_interaction_features()\n test_score, test_responses = generative_al_agent.score_test_cases()\n print(test_score)\n all_test_xs = update_metrics({}, test_xs)\n test_scores = update_metrics({}, test_score)\n start_test_scores = deepcopy(test_scores)\n all_test_responses = update_test_responses([], test_responses)\n\n for i in tqdm(range(num_interactions)):\n query = generative_al_agent.generate_active_query()\n if query is None:\n break\n\n answer = generative_al_agent.generate_oracle_response(query)\n\n outputs_save_file.write(f\"{i}. {query}\\n{answer}\\n\\n\")\n if not generative_al_agent.evaluate_condition():\n continue\n \n outputs_save_file.write(\"EVAL POINT\\n\")\n\n test_xs = generative_al_agent.get_interaction_features()\n test_score, test_responses = generative_al_agent.score_test_cases(start_metrics=start_test_scores)\n print(test_score)\n all_test_xs = update_metrics(all_test_xs, test_xs)\n test_scores = update_metrics(test_scores, test_score)\n all_test_responses = update_test_responses(all_test_responses, test_responses)\n \n print(test_xs)\n print(test_scores)\n outputs_save_file.write(f\"===TEST RESPONSES===\\n{json.dumps(all_test_responses, indent=2)}\\n\\n\")\n \n return all_test_xs, test_scores" } ]
import glob import sys import json import os import random import pandas as pd from tap import Tap from from_saved_file_agent import FromSavedFileAgent from run_model_evaluation import run_problem_instance from tqdm import tqdm
3,013
task_specific_directives = { "website_preferences": '\nFor this task, "yes" means the user would like the website, and "no" means the user would not like the website', "moral_reasoning": '\nFor this task, "yes" means the user would believe it is ethical to steal a loaf of bread, and "no" means the user would believe it is not ethical to steal a loaf of bread', "email_regex": '\nFor this task, "yes" means the user would find the email address valid, while "no" means the user would find the email address invalid', } task_specific_instructions = { "website_preferences": "asks a user about their preferences for a website", "moral_reasoning": "asks a user under what conditions they would believe it is ethical to steal a loaf of bread", "email_regex": "asks a user about their preferences for what makes a valid format for email addresses", } def get_saved_interaction_files_for_task(saved_annotations_dir, task): with open(f"{saved_annotations_dir}/experiment_type_to_prolific_id.json") as f: experiment_type_to_prolific_id = json.load(f) files_to_return = {} for experiment_type in experiment_type_to_prolific_id[task]: files_to_return[experiment_type] = [ f"{saved_annotations_dir}/{file_id}.json" for file_id in experiment_type_to_prolific_id[task][experiment_type] if os.path.exists(f"{saved_annotations_dir}/{file_id}.json") ] return files_to_return def main(args): if args.no_cache: openai_cache_file = None else: openai_cache_file = f"{args.engine}-cache-seed-{args.seed}.jsonl" all_test_xs = { "interaction_time": {}, "interaction_num_turns": {}, "interaction_total_char_length": {}, } all_test_scores = { "accuracy": {}, "AUCROC": {}, "correct_prob": {}, "accuracy_relative": {}, "AUCROC_relative": {}, "correct_prob_relative": {} } # initialize a dataframe all_test_results = pd.DataFrame(columns=[ 'interaction_time', 'interaction_num_turns', 'interaction_total_char_length', 'accuracy', 'AUCROC', 'correct_prob', 'accuracy_relative', 'AUCROC_relative', 'correct_prob_relative', 'question_mode', 'task', 'engine', 'seed', 'interaction_id', ]) problem_instance_filename = random.choice(glob.glob(f"gpt_prompts/{args.task}/*.json")) saved_interaction_files_for_task = get_saved_interaction_files_for_task(args.saved_annotations_dir ,args.task) for question_mode in saved_interaction_files_for_task: print(question_mode) for metric in all_test_xs: all_test_xs[metric][question_mode] = [] for metric in all_test_scores: all_test_scores[metric][question_mode] = [] for saved_interactions_file in tqdm(saved_interaction_files_for_task[question_mode]): print(saved_interactions_file) # filter out preferences that are trivial if args.filter_trivial_preferences: with open(saved_interactions_file) as f: saved_interactions = json.load(f) all_answers = [sample["label"] for sample in saved_interactions["evaluation_results"]] if len(set(all_answers)) == 1: continue os.makedirs(f"model_human_results/{args.task}", exist_ok=True) outputs_save_file = open(f"model_human_results/{args.task}/{args.engine}_{args.eval_condition}_{question_mode.replace('/', '_').replace(' ', '_')}_{os.path.split(saved_interactions_file)[-1][:-5]}.txt", "w") test_xs, test_scores = run_problem_instance( problem_instance_filename=problem_instance_filename, engine=args.engine, openai_cache_file=openai_cache_file, num_interactions=sys.maxsize,
task_specific_directives = { "website_preferences": '\nFor this task, "yes" means the user would like the website, and "no" means the user would not like the website', "moral_reasoning": '\nFor this task, "yes" means the user would believe it is ethical to steal a loaf of bread, and "no" means the user would believe it is not ethical to steal a loaf of bread', "email_regex": '\nFor this task, "yes" means the user would find the email address valid, while "no" means the user would find the email address invalid', } task_specific_instructions = { "website_preferences": "asks a user about their preferences for a website", "moral_reasoning": "asks a user under what conditions they would believe it is ethical to steal a loaf of bread", "email_regex": "asks a user about their preferences for what makes a valid format for email addresses", } def get_saved_interaction_files_for_task(saved_annotations_dir, task): with open(f"{saved_annotations_dir}/experiment_type_to_prolific_id.json") as f: experiment_type_to_prolific_id = json.load(f) files_to_return = {} for experiment_type in experiment_type_to_prolific_id[task]: files_to_return[experiment_type] = [ f"{saved_annotations_dir}/{file_id}.json" for file_id in experiment_type_to_prolific_id[task][experiment_type] if os.path.exists(f"{saved_annotations_dir}/{file_id}.json") ] return files_to_return def main(args): if args.no_cache: openai_cache_file = None else: openai_cache_file = f"{args.engine}-cache-seed-{args.seed}.jsonl" all_test_xs = { "interaction_time": {}, "interaction_num_turns": {}, "interaction_total_char_length": {}, } all_test_scores = { "accuracy": {}, "AUCROC": {}, "correct_prob": {}, "accuracy_relative": {}, "AUCROC_relative": {}, "correct_prob_relative": {} } # initialize a dataframe all_test_results = pd.DataFrame(columns=[ 'interaction_time', 'interaction_num_turns', 'interaction_total_char_length', 'accuracy', 'AUCROC', 'correct_prob', 'accuracy_relative', 'AUCROC_relative', 'correct_prob_relative', 'question_mode', 'task', 'engine', 'seed', 'interaction_id', ]) problem_instance_filename = random.choice(glob.glob(f"gpt_prompts/{args.task}/*.json")) saved_interaction_files_for_task = get_saved_interaction_files_for_task(args.saved_annotations_dir ,args.task) for question_mode in saved_interaction_files_for_task: print(question_mode) for metric in all_test_xs: all_test_xs[metric][question_mode] = [] for metric in all_test_scores: all_test_scores[metric][question_mode] = [] for saved_interactions_file in tqdm(saved_interaction_files_for_task[question_mode]): print(saved_interactions_file) # filter out preferences that are trivial if args.filter_trivial_preferences: with open(saved_interactions_file) as f: saved_interactions = json.load(f) all_answers = [sample["label"] for sample in saved_interactions["evaluation_results"]] if len(set(all_answers)) == 1: continue os.makedirs(f"model_human_results/{args.task}", exist_ok=True) outputs_save_file = open(f"model_human_results/{args.task}/{args.engine}_{args.eval_condition}_{question_mode.replace('/', '_').replace(' ', '_')}_{os.path.split(saved_interactions_file)[-1][:-5]}.txt", "w") test_xs, test_scores = run_problem_instance( problem_instance_filename=problem_instance_filename, engine=args.engine, openai_cache_file=openai_cache_file, num_interactions=sys.maxsize,
agent_class=FromSavedFileAgent,
0
2023-10-16 18:43:47+00:00
4k
bcmi/libcom
libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/openaimodel.py
[ { "identifier": "checkpoint", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)" }, { "identifier": "conv_nd", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "avg_pool_nd", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "zero_module", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "normalization", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)" }, { "identifier": "timestep_embedding", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n\n\n\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n\n\n\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n\n\n\n\n for i, block in enumerate(self.transformer_blocks):\n\n\n x = block(x, context=context[i])\n\n\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "CrossAttentionInteraction", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/fusion/dual_encoder_fusion.py", "snippet": "class CrossAttentionInteraction(nn.Module):\n def __init__(self, in_channels, n_heads=8, d_head=64,\n dropout=0.):\n super().__init__()\n\n inner_dim = n_heads * d_head\n\n self.norm = Normalize(inner_dim)\n\n self.crossAtt_1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout,\n context_dim=inner_dim)\n\n self.crossAtt_2 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout,\n context_dim=inner_dim)\n\n self.fc = nn.Conv1d(in_channels=inner_dim * 2, out_channels=inner_dim, kernel_size=1)\n\n self.conv1 = nn.Conv2d(in_channels=inner_dim, out_channels=inner_dim, kernel_size=1)\n\n def downsample(self, image_tensor, width, height):\n image_upsample_tensor = torch.nn.functional.interpolate(image_tensor, size=[width, height])\n image_upsample_tensor = image_upsample_tensor.clamp(0, 1)\n return image_upsample_tensor\n\n def forward(self, adapter_feature, unet_feature, fg_mask):\n ori_adapter_feature = adapter_feature\n\n b, c, h, w = adapter_feature.shape\n fg_mask = self.downsample(fg_mask, h, w)\n\n original_h = adapter_feature.shape[-1]\n\n adapter_feature = self.norm(adapter_feature)\n\n fg = adapter_feature * fg_mask\n\n fg = rearrange(fg, 'b c h w -> b c (h w)').contiguous()\n\n fg = rearrange(fg, 'b c h -> b h c').contiguous()\n\n bg = adapter_feature * (1 - fg_mask)\n\n bg = rearrange(bg, 'b c h w -> b (h w) c').contiguous()\n\n adapter_feature = self.crossAtt_1(fg, bg, mask=fg_mask, is_foreground='ada')\n\n adapter_feature = adapter_feature.permute(0, 2, 1)\n\n unet_feature = self.norm(unet_feature)\n\n unet_fg = unet_feature * fg_mask\n unet_fg = rearrange(unet_fg, 'b c h w -> b c (h w)').contiguous()\n\n unet_fg = rearrange(unet_fg, 'b c h -> b h c').contiguous()\n\n unet_bg = unet_feature * (1 - fg_mask)\n unet_bg = rearrange(unet_bg, 'b c h w -> b (h w) c').contiguous()\n unet_feature = self.crossAtt_2(unet_fg, unet_bg, mask=fg_mask, is_foreground='unet')\n\n unet_feature = unet_feature.permute(0, 2, 1)\n\n interact_feature = self.fc(torch.cat([adapter_feature, unet_feature], dim=1)) # 1 640 mm -> 1 320 mm\n\n interact_feature = interact_feature.repeat(1, 1, int(original_h * original_h / interact_feature.shape[-1]))\n\n b, c, h = interact_feature.shape\n\n interact_feature = interact_feature.reshape(b, c, original_h, original_h)\n\n interact_feature = self.conv1(interact_feature)\n\n new_adapter_feature = interact_feature * fg_mask + ori_adapter_feature * (1 - fg_mask)\n\n return new_adapter_feature" }, { "identifier": "exists", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def exists(x):\n return x is not None" } ]
from abc import abstractmethod from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.diffusionmodules.util import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding, ) from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.attention import SpatialTransformer from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.fusion.dual_encoder_fusion import CrossAttentionInteraction from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.util import exists from omegaconf.listconfig import ListConfig import math import torch import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F
3,221
# dummy replace def convert_module_to_f16(x): pass def convert_module_to_f32(x): pass ## go class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb)
# dummy replace def convert_module_to_f16(x): pass def convert_module_to_f32(x): pass ## go class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
7
2023-10-19 05:08:12+00:00
4k
facebookresearch/motif
rlaif/annotators.py
[ { "identifier": "BlstatsTransform", "path": "rlaif/annotators_transforms.py", "snippet": "class BlstatsTransform:\n def __init__(self, blstats_keys: List[str]):\n self.blstats_keys = blstats_keys\n self.hunger_num_to_str = {\n 0: \"Satiated\", 1: \"\", 2: \"Hungry\", 3: \"Weak\",\n 4: \"Fainting\", 5: \"Fainted \", 6: \"Starved\"\n }\n self.blstats_to_index = {\n \"NLE_BL_X\": (0, \"X:{}\"),\n \"NLE_BL_Y\": (1, \"Y:{}\"),\n \"NLE_BL_STR25\": (2, \"Str:{}\"),\n \"NLE_BL_STR125\": (3, \"Str:{}\"),\n \"NLE_BL_DEX\": (4, \"Dex:{}\"),\n \"NLE_BL_CON\": (5, \"Con:{}\"),\n \"NLE_BL_INT\": (6, \"Int:{}\"),\n \"NLE_BL_WIS\": (7, \"Wis:{}\"),\n \"NLE_BL_CHA\": (8, \"Cha:{}\"),\n \"NLE_BL_SCORE\": (9, \"Score:{}\"),\n \"NLE_BL_HP\": (10, \"HP:{}\"),\n \"NLE_BL_HPMAX\": (11, \"({})\"),\n \"NLE_BL_DEPTH\": (12, \"Dlvl:{}\"),\n \"NLE_BL_GOLD\": (13, \"$:{}\"),\n \"NLE_BL_ENE\": (14, \"Ene:{}\"),\n \"NLE_BL_ENEMAX\": (15, \"Em:{}\"),\n \"NLE_BL_AC\": (16, \"AC:{}\"),\n \"NLE_BL_HD\": (17, \"HD:{}\"),\n \"NLE_BL_XP\": (18, \"Xp:{}\"),\n \"NLE_BL_EXP\": (19, \"/{}\"),\n \"NLE_BL_TIME\": (20, \"T:{}\"),\n \"NLE_BL_HUNGER\": (21, \"{}\"),\n \"NLE_BL_CAP\": (22, \"Cap:{}\"),\n \"NLE_BL_DNUM\": (23, \"Dn:{}\"),\n \"NLE_BL_DLEVEL\": (24, \"Lvl:{}\"),\n \"NLE_BL_CONDITION\": (25, \"Cnd:{}\"),\n \"NLE_BL_ALIGN\": (26, \"Algn:{}\"),\n }\n\n def blstats_to_str(self, blstats: np.ndarray):\n \"\"\"Process an individual blstat\"\"\"\n bls = \" \".join([self.blstats_to_index[key][1].format(blstats[self.blstats_to_index[key][0]]\n if key != \"NLE_BL_HUNGER\"\n else self.hunger_num_to_str[int(blstats[self.blstats_to_index[key][0]])])\n for key in self.blstats_keys])\n return bls\n\n def __call__(self, pair: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n # Pass through all the keys, if any\n dict_to_return = {k: v for k, v in pair.items() if k != 'blstats'}\n # pair['blstats'] is (2, seq_len, bldim)\n dict_to_return['blstats'] = [[self.blstats_to_str(bls) for bls in seq] for seq in pair['blstats']]\n return dict_to_return" }, { "identifier": "MessageTransform", "path": "rlaif/annotators_transforms.py", "snippet": "class MessageTransform:\n def __call__(self, pair: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n # Pass through all the keys, if any\n dict_to_return = {k: v for k, v in pair.items() if k != \"message\"}\n dict_to_return[\"message\"] = np.array([[\"\".join([chr(c) for c in row if c != 0]) for row in message]\n for message in pair['message']])\n return dict_to_return" }, { "identifier": "system_prompts", "path": "rlaif/prompts.py", "snippet": "" }, { "identifier": "LocalLanguageModel", "path": "rlaif/llms.py", "snippet": "class LocalLanguageModel:\n def __init__(\n self,\n system_prompt: str,\n answer_regex: str,\n retry_prompt: str,\n model_name: str = 'meta-llama/Llama-2-7b-chat-hf',\n num_gpus: int = 8,\n logdir: Optional[str] = None,\n ) -> None:\n self.model_name = model_name\n self.answer_regex = answer_regex\n self.retry_prompt = retry_prompt\n self.llm = LLM(model=model_name, tensor_parallel_size=num_gpus,\n dtype='float16', max_num_batched_tokens=4096)\n self.logdir = logdir\n self.system_prompt = system_prompt\n if self.logdir is not None:\n # Create directory\n os.makedirs(self.logdir, exist_ok=True)\n\n def generate(self, messages: List[str], logging_indices: Sequence[int] = None) -> List[int]:\n assert len(messages) == len(logging_indices)\n prompts = []\n convs = []\n for message in messages:\n conv = get_conversation_template(self.model_name)\n conv.system = self.system_prompt\n conv.append_message(conv.roles[0], message)\n conv.append_message(conv.roles[1], None)\n prompt = conv.get_prompt()\n prompts.append(prompt)\n convs.append(conv)\n sampling_params = SamplingParams(top_k=50, max_tokens=4096,\n temperature=0.1, top_p=0.95,\n stop=conv.stop_str)\n outputs = self.llm.generate(prompts, sampling_params)\n # Parse all the outputs\n cleaned_outputs = np.full(len(messages), AnnotationIdx.UNKOWN)\n indexes_to_retry = []\n prompts_to_retry = []\n print(\"Retrying prompts\")\n for i, output in enumerate(outputs):\n text_answer = output.outputs[0].text\n result = re.search(self.answer_regex, text_answer)\n conv = convs[i]\n conv.append_message('', text_answer)\n if result:\n try:\n best_sequence = int(result.group(1))\n if best_sequence == 1:\n best_sequence = AnnotationIdx.FIRST\n elif best_sequence == 2:\n best_sequence = AnnotationIdx.SECOND\n except ValueError:\n best_sequence = AnnotationIdx.TIE\n cleaned_outputs[i] = best_sequence\n else:\n # Ask the model again\n conv.append_message(conv.roles[0], self.retry_prompt)\n conv.append_message(conv.roles[1], None)\n prompt = conv.get_prompt()\n prompts_to_retry.append(prompt)\n indexes_to_retry.append(i)\n # Retry the prompts that were not good\n second_batch = self.llm.generate(prompts_to_retry, sampling_params)\n for i, output in zip(indexes_to_retry, second_batch):\n text_answer = output.outputs[0].text\n convs[i].append_message('', text_answer)\n result = re.search(self.answer_regex, text_answer)\n if result:\n try:\n best_sequence = int(result.group(1))\n if best_sequence == 1:\n best_sequence = AnnotationIdx.FIRST\n elif best_sequence == 2:\n best_sequence = AnnotationIdx.SECOND\n except ValueError:\n best_sequence = AnnotationIdx.TIE\n cleaned_outputs[i] = best_sequence\n if self.logdir is not None and logging_indices is not None:\n # Log the conversations\n for conv, idx in zip(convs, logging_indices):\n text_conv = conv.get_prompt()\n with open(os.path.join(self.logdir, str(idx) + \".txt\"), 'w') as f:\n f.write(text_conv)\n return cleaned_outputs" }, { "identifier": "AnnotationIdx", "path": "rlaif/llms.py", "snippet": "class AnnotationIdx:\n FIRST = 0\n SECOND = 1\n TIE = 2\n UNKOWN = 3" } ]
from abc import ABC, abstractmethod from typing import Dict, List, Callable, Optional, Tuple, Sequence from rlaif.annotators_transforms import BlstatsTransform, MessageTransform from rlaif.prompts import system_prompts, prompt_templates, goal_strings, regexes, retry_prompts from rlaif.llms import LocalLanguageModel, AnnotationIdx import itertools import numpy as np import torchvision
2,345
class Annotator(ABC): def __init__(self, batch_size: int): self.batch_size = batch_size @abstractmethod def __call__(self, batch: Dict[str, np.ndarray], logging_indices: Sequence[int]) -> np.array: """General method which takes two sequences and returns whether the second element is better than the first one, for each batch element, together with a mask of the valid/invalid elements. Args: batch: Dictionary of arrays containing the data for the two sequences (bs, 2, subepisode_length, dims) logging_indices: a list of indices for logging info about computation for each element Return: annotations: int array of shape (bs,) where each element is out of (first, second, tie, invalid) """ pass @property @abstractmethod def data_keys(self) -> List[str]: pass @property @abstractmethod def info_keys(self) -> List[str]: pass @property @abstractmethod def transform(self) -> Optional[Callable]: pass class LanguageModelAnnotator(Annotator): """Annotator that annotates based on the output of a language model.""" def __init__(self, batch_size: int, model_name: str, use_messages: bool, use_blstats: bool, num_gpus: int = 8, logdir: Optional[str] = None, prompt_version: str = 'v0', goal_key: str = '') -> None: assert use_messages or use_blstats self.use_messages = use_messages self.use_blstats = use_blstats self.blstats_keys = [ 'NLE_BL_DEPTH', 'NLE_BL_GOLD', 'NLE_BL_HP', 'NLE_BL_HPMAX', 'NLE_BL_XP', 'NLE_BL_HUNGER' ]
class Annotator(ABC): def __init__(self, batch_size: int): self.batch_size = batch_size @abstractmethod def __call__(self, batch: Dict[str, np.ndarray], logging_indices: Sequence[int]) -> np.array: """General method which takes two sequences and returns whether the second element is better than the first one, for each batch element, together with a mask of the valid/invalid elements. Args: batch: Dictionary of arrays containing the data for the two sequences (bs, 2, subepisode_length, dims) logging_indices: a list of indices for logging info about computation for each element Return: annotations: int array of shape (bs,) where each element is out of (first, second, tie, invalid) """ pass @property @abstractmethod def data_keys(self) -> List[str]: pass @property @abstractmethod def info_keys(self) -> List[str]: pass @property @abstractmethod def transform(self) -> Optional[Callable]: pass class LanguageModelAnnotator(Annotator): """Annotator that annotates based on the output of a language model.""" def __init__(self, batch_size: int, model_name: str, use_messages: bool, use_blstats: bool, num_gpus: int = 8, logdir: Optional[str] = None, prompt_version: str = 'v0', goal_key: str = '') -> None: assert use_messages or use_blstats self.use_messages = use_messages self.use_blstats = use_blstats self.blstats_keys = [ 'NLE_BL_DEPTH', 'NLE_BL_GOLD', 'NLE_BL_HP', 'NLE_BL_HPMAX', 'NLE_BL_XP', 'NLE_BL_HUNGER' ]
self.llm = LocalLanguageModel(system_prompt=system_prompts[prompt_version],
2
2023-10-24 17:45:26+00:00
4k
kyegomez/PALI3
pali3/main.py
[ { "identifier": "UL2", "path": "pali3/ul2.py", "snippet": "class UL2(nn.Module):\n def __init__(\n self,\n *,\n dim,\n tie_token_emb=False,\n ignore_index=-100,\n pad_value=0,\n cross_attn_tokens_dropout=0.0,\n **kwargs,\n ):\n super().__init__()\n enc_kwargs, kwargs = groupby_prefix_and_trim(\"enc_\", kwargs)\n dec_kwargs, kwargs = groupby_prefix_and_trim(\"dec_\", kwargs)\n\n assert (\n \"dim\" not in enc_kwargs and \"dim\" not in dec_kwargs\n ), \"dimension of either encoder or decoder must be set with `dim` keyword\"\n enc_transformer_kwargs = pick_and_pop([\"num_tokens\", \"max_seq_len\"], enc_kwargs)\n enc_transformer_kwargs[\"emb_dropout\"] = enc_kwargs.pop(\"emb_dropout\", 0)\n enc_transformer_kwargs[\"num_memory_tokens\"] = enc_kwargs.pop(\n \"num_memory_tokens\", None\n )\n enc_transformer_kwargs[\"scaled_sinu_pos_emb\"] = enc_kwargs.pop(\n \"scaled_sinu_pos_emb\", False\n )\n enc_transformer_kwargs[\"use_abs_pos_emb\"] = enc_kwargs.pop(\n \"use_abs_pos_emb\", True\n )\n\n dec_transformer_kwargs = pick_and_pop([\"num_tokens\", \"max_seq_len\"], dec_kwargs)\n dec_transformer_kwargs[\"emb_dropout\"] = dec_kwargs.pop(\"emb_dropout\", 0)\n dec_transformer_kwargs[\"scaled_sinu_pos_emb\"] = dec_kwargs.pop(\n \"scaled_sinu_pos_emb\", False\n )\n dec_transformer_kwargs[\"use_abs_pos_emb\"] = dec_kwargs.pop(\n \"use_abs_pos_emb\", True\n )\n\n self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories\n\n self.encoder = TransformerWrapper(\n **enc_transformer_kwargs, attn_layers=Encoder(dim=dim, **enc_kwargs)\n )\n\n self.decoder = TransformerWrapper(\n **dec_transformer_kwargs,\n attn_layers=Decoder(dim=dim, cross_attend=True, **dec_kwargs),\n )\n\n if tie_token_emb:\n self.decoder.token_emb = self.encoder.token_emb\n\n self.decoder = AutoregressiveWrapper(\n self.decoder, ignore_index=ignore_index, pad_value=pad_value\n )\n\n @torch.no_grad()\n def generate(\n self, seq_in, seq_out_start, seq_len, mask=None, attn_mask=None, **kwargs\n ):\n encodings = self.encoder(\n seq_in, mask=mask, attn_mask=attn_mask, return_embeddings=True\n )\n return self.decoder.generate(\n seq_out_start, seq_len, context=encodings, context_mask=mask, **kwargs\n )\n\n def forward(self, src, tgt, mask=None, attn_mask=None, src_prepend_embeds=None):\n if exists(src_prepend_embeds) and exists(mask):\n mask = pad_at_dim(\n mask, (src_prepend_embeds.shape[-2], 0), dim=-1, value=True\n )\n\n enc = self.encoder(\n src,\n mask=mask,\n attn_mask=attn_mask,\n prepend_embeds=src_prepend_embeds,\n return_embeddings=True,\n )\n\n if self.training and self.cross_attn_tokens_dropout > 0:\n enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)\n\n out = self.decoder(tgt, context=enc, context_mask=mask)\n return out" }, { "identifier": "ViTransformerWrapper", "path": "pali3/ul2.py", "snippet": "class ViTransformerWrapper(nn.Module):\n def __init__(\n self,\n *,\n image_size,\n patch_size,\n attn_layers,\n channels=3,\n num_classes=None,\n post_emb_norm=False,\n num_register_tokens=0,\n emb_dropout=0.0,\n ):\n super().__init__()\n assert isinstance(attn_layers, Encoder), \"attention layers must be an Encoder\"\n assert divisible_by(\n image_size, patch_size\n ), \"image dimensions must be divisible by the patch size\"\n dim = attn_layers.dim\n num_patches = (image_size // patch_size) ** 2\n patch_dim = channels * patch_size**2\n\n self.patch_size = patch_size\n\n self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))\n\n has_register_tokens = num_register_tokens > 0\n self.has_register_tokens = has_register_tokens\n\n if has_register_tokens:\n self.register_tokens = nn.Parameter(torch.randn(num_register_tokens, dim))\n\n self.patch_to_embedding = nn.Sequential(\n nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim)\n )\n\n self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()\n self.dropout = nn.Dropout(emb_dropout)\n\n self.attn_layers = attn_layers\n\n self.mlp_head = (\n nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()\n )\n\n def forward(self, img, return_embeddings=False):\n b, p = img.shape[0], self.patch_size\n\n x = rearrange(img, \"b c (h p1) (w p2) -> b (h w) (p1 p2 c)\", p1=p, p2=p)\n x = self.patch_to_embedding(x)\n n = x.shape[1]\n\n x = x + self.pos_embedding[:, :n]\n\n x = self.post_emb_norm(x)\n x = self.dropout(x)\n\n if self.has_register_tokens:\n r = repeat(self.register_tokens, \"n d -> b n d\", b=b)\n x, ps = pack((x, r), \"b * d\")\n\n x = self.attn_layers(x)\n\n if self.has_register_tokens:\n x, _ = unpack(x, ps, \"b * d\")\n\n if not exists(self.mlp_head) or return_embeddings:\n return x\n\n x = x.mean(dim=-2)\n return self.mlp_head(x)" }, { "identifier": "Encoder", "path": "pali3/ul2.py", "snippet": "class Encoder(AttentionLayers):\n def __init__(self, **kwargs):\n assert \"causal\" not in kwargs, \"cannot set causality on encoder\"\n super().__init__(causal=False, **kwargs)" } ]
import torch from torch import nn from pali3.ul2 import UL2, ViTransformerWrapper, Encoder
2,051
class PrependTokens(nn.Module): """ # Initialize models vit_model = ViTModel() text_embedding = TextEmbedding("bert-base-uncased") # Initialize PrependVisualTokens prepend_visual_tokens = PrependVisualTokens(vit_model, text_embedding) # Process image and text img = torch.randn(1, 3, 256, 256) # dummy image text = "This is a sample text" combined_tokens = prepend_visual_tokens.process(img, text) """ def __init__( self, vit, text_embedding, ): super().__init__() self.vit = vit self.text_embedding = text_embedding def forward(self, x): visual_tokens = self.vit.process(x) text_tokens = self.text_embedding.process(x) combined_tokens = torch.cat((visual_tokens, text_tokens), dim=1) return combined_tokens class VitModel: """ VitModel is a wrapper around the ViT model from the PyTorch Image Models library. Args: image_size (int, optional): Size of the image. Defaults to 256. patch_size (int, optional): Size of the patch. Defaults to 32. dim (int, optional): Dimension of the model. Defaults to 512. depth (int, optional): Depth of the model. Defaults to 6. heads (int, optional): Number of heads in the model. Defaults to 8. Raises: ValueError: If the input image is None. ValueError: If the input image shape is not [*, 3, image_size, image_size]. Examples: x = torch.randn(1, 3, 256, 256) model = VitModel() out = model.process(x) print(out) """ def __init__( self, image_size=256, patch_size=32, dim=512, depth=6, heads=8, *args, **kwargs ): self.image_size = image_size self.patch_size = patch_size self.dim = dim self.depth = depth self.heads = heads
class PrependTokens(nn.Module): """ # Initialize models vit_model = ViTModel() text_embedding = TextEmbedding("bert-base-uncased") # Initialize PrependVisualTokens prepend_visual_tokens = PrependVisualTokens(vit_model, text_embedding) # Process image and text img = torch.randn(1, 3, 256, 256) # dummy image text = "This is a sample text" combined_tokens = prepend_visual_tokens.process(img, text) """ def __init__( self, vit, text_embedding, ): super().__init__() self.vit = vit self.text_embedding = text_embedding def forward(self, x): visual_tokens = self.vit.process(x) text_tokens = self.text_embedding.process(x) combined_tokens = torch.cat((visual_tokens, text_tokens), dim=1) return combined_tokens class VitModel: """ VitModel is a wrapper around the ViT model from the PyTorch Image Models library. Args: image_size (int, optional): Size of the image. Defaults to 256. patch_size (int, optional): Size of the patch. Defaults to 32. dim (int, optional): Dimension of the model. Defaults to 512. depth (int, optional): Depth of the model. Defaults to 6. heads (int, optional): Number of heads in the model. Defaults to 8. Raises: ValueError: If the input image is None. ValueError: If the input image shape is not [*, 3, image_size, image_size]. Examples: x = torch.randn(1, 3, 256, 256) model = VitModel() out = model.process(x) print(out) """ def __init__( self, image_size=256, patch_size=32, dim=512, depth=6, heads=8, *args, **kwargs ): self.image_size = image_size self.patch_size = patch_size self.dim = dim self.depth = depth self.heads = heads
self.vit = ViTransformerWrapper(
1
2023-10-16 15:36:54+00:00
4k
pgorecki/lato
tests/test_application_example_from_readme.py
[ { "identifier": "Application", "path": "lato/application.py", "snippet": "class Application(ApplicationModule):\n dependency_provider_class = SimpleDependencyProvider\n\n def __init__(self, name=__name__, dependency_provider=None, **kwargs):\n super().__init__(name)\n self.dependency_provider = (\n dependency_provider or self.dependency_provider_class(**kwargs)\n )\n self._transaction_context_factory = None\n self._on_enter_transaction_context = lambda ctx: None\n self._on_exit_transaction_context = lambda ctx, exception=None: None\n self._transaction_middlewares = []\n self._composers: dict[str | Task, Callable] = {}\n\n def get_dependency(self, identifier: Any) -> Any:\n \"\"\"Get a dependency from the dependency provider\"\"\"\n return self.dependency_provider.get_dependency(identifier)\n\n def __getitem__(self, item) -> Any:\n return self.get_dependency(item)\n\n def call(self, func: Callable | str, *args, **kwargs):\n if isinstance(func, str):\n try:\n func = next(self.iterate_handlers_for(alias=func))\n except StopIteration:\n raise ValueError(f\"Handler not found\", func)\n\n with self.transaction_context() as ctx:\n result = ctx.call(func, *args, **kwargs)\n return result\n\n def execute(self, task: Task) -> tuple[Any, ...]:\n with self.transaction_context() as ctx:\n results = ctx.execute(task)\n return results\n\n def query(self, task: Task) -> Any:\n results = self.execute(task)\n alias = task.__class__\n composer = self._composers.get(alias, compose)\n return composer(results)\n\n def emit(self, event: Event) -> dict[Callable, Any]:\n with self.transaction_context() as ctx:\n result = ctx.emit(event)\n return result\n\n def on_enter_transaction_context(self, func):\n \"\"\"\n Decorator for registering a function to be called when entering a transaction context\n\n :param func:\n :return:\n \"\"\"\n self._on_enter_transaction_context = func\n return func\n\n def on_exit_transaction_context(self, func):\n \"\"\"\n Decorator for registering a function to be called when exiting a transaction context\n\n :param func:\n :return:\n \"\"\"\n self._on_exit_transaction_context = func\n return func\n\n def on_create_transaction_context(self, func):\n \"\"\"\n Decorator for overrinding default transaction context creation\n\n :param func:\n :return:\n \"\"\"\n self._transaction_context_factory = func\n return func\n\n def transaction_middleware(self, middleware_func):\n \"\"\"\n Decorator for registering a middleware function to be called when executing a function in a transaction context\n :param middleware_func:\n :return:\n \"\"\"\n self._transaction_middlewares.insert(0, middleware_func)\n return middleware_func\n\n def compose(self, alias):\n \"\"\"\n Decorator for composing results of tasks\n \"\"\"\n\n def decorator(func):\n self._composers[alias] = func\n return func\n\n return decorator\n\n def transaction_context(self, **dependencies) -> TransactionContext:\n \"\"\"\n Creates a transaction context with the application dependencies\n\n :param dependencies:\n :return:\n \"\"\"\n if self._transaction_context_factory:\n ctx = self._transaction_context_factory(**dependencies)\n else:\n dp = self.dependency_provider.copy(**dependencies)\n ctx = TransactionContext(dependency_provider=dp)\n\n ctx.configure(\n on_enter_transaction_context=self._on_enter_transaction_context,\n on_exit_transaction_context=self._on_exit_transaction_context,\n middlewares=self._transaction_middlewares,\n handlers_iterator=self.iterate_handlers_for,\n )\n return ctx" }, { "identifier": "Event", "path": "lato/message.py", "snippet": "class Event(Message):\n ..." }, { "identifier": "Task", "path": "lato/message.py", "snippet": "class Task(Message):\n ..." }, { "identifier": "TransactionContext", "path": "lato/transaction_context.py", "snippet": "class TransactionContext:\n \"\"\"A context spanning a single transaction for execution of a function\"\"\"\n\n dependency_provider_factory = SimpleDependencyProvider\n\n def __init__(\n self, dependency_provider: DependencyProvider | None = None, *args, **kwargs\n ):\n self.dependency_provider = (\n dependency_provider or self.dependency_provider_factory(*args, **kwargs)\n )\n self.resolved_kwargs: dict[str, Any] = {}\n self.current_action: tuple[str | Message, Any] | None = None\n self._on_enter_transaction_context = lambda ctx: None\n self._on_exit_transaction_context = lambda ctx, exception=None: None\n self._middlewares: list[Callable] = []\n self._handlers_iterator: Iterator = lambda alias: iter([])\n\n def configure(\n self,\n on_enter_transaction_context=None,\n on_exit_transaction_context=None,\n middlewares=None,\n handlers_iterator=None,\n ):\n if on_enter_transaction_context:\n self._on_enter_transaction_context = on_enter_transaction_context\n if on_exit_transaction_context:\n self._on_exit_transaction_context = on_exit_transaction_context\n if middlewares:\n self._middlewares = middlewares\n if handlers_iterator:\n self._handlers_iterator = handlers_iterator\n\n def begin(self):\n \"\"\"Should be used to start a transaction\"\"\"\n self._on_enter_transaction_context(self)\n\n def end(self, exception=None):\n \"\"\"Should be used to commit/end a transaction\"\"\"\n self._on_exit_transaction_context(self, exception)\n\n def iterate_handlers_for(self, alias: str):\n yield from self._handlers_iterator(alias)\n\n def __enter__(self):\n self.begin()\n return self\n\n def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):\n self.end(exc_val)\n\n def _wrap_with_middlewares(self, handler_func):\n p = handler_func\n for middleware in self._middlewares:\n p = partial(middleware, self, p)\n return p\n\n def call(self, func: Callable, *func_args: Any, **func_kwargs: Any) -> Any:\n \"\"\"\n Call a function with the given arguments and keyword arguments.\n Any dependencies will be resolved from the dependency provider.\n \"\"\"\n self.dependency_provider.update(ctx=as_type(self, TransactionContext))\n\n resolved_kwargs = self.dependency_provider.resolve_func_params(\n func, func_args, func_kwargs\n )\n self.resolved_kwargs.update(resolved_kwargs)\n p = partial(func, **resolved_kwargs)\n wrapped_handler = self._wrap_with_middlewares(p)\n result = wrapped_handler()\n return result\n\n def execute(self, task: Task) -> tuple[Any, ...]:\n results = self.emit(task)\n values = tuple(results.values())\n if len(values) == 0:\n raise ValueError(\"No handlers found for task\", task)\n return values\n\n def emit(self, message: str | Message, *args, **kwargs) -> dict[Callable, Any]:\n \"\"\"Emit a message by calling all handlers for that message\"\"\"\n alias = type(message) if isinstance(message, Message) else message\n\n if isinstance(message, Message):\n args = (message, *args)\n\n all_results = OrderedDict()\n for handler in self._handlers_iterator(alias):\n # FIXME: push and pop current action instead of setting it\n self.current_action = (message, handler)\n result = self.call(handler, *args, **kwargs)\n all_results[handler] = result\n return all_results\n\n def get_dependency(self, identifier: Any) -> Any:\n \"\"\"Get a dependency from the dependency provider\"\"\"\n return self.dependency_provider.get_dependency(identifier)\n\n def set_dependency(self, identifier: Any, dependency: Any) -> None:\n \"\"\"Set a dependency in the dependency provider\"\"\"\n self.dependency_provider.register_dependency(identifier, dependency)\n\n def __getitem__(self, item) -> Any:\n return self.get_dependency(item)" } ]
from uuid import uuid4 from lato import Application, Event, Task, TransactionContext
1,973
class UserService: def create_user(self, email, password): ... class EmailService: def send_welcome_email(self, email): ... def test_application_example_from_readme():
class UserService: def create_user(self, email, password): ... class EmailService: def send_welcome_email(self, email): ... def test_application_example_from_readme():
app = Application(
0
2023-10-21 11:33:05+00:00
4k
NVIDIA/trt-llm-rag-windows
app.py
[ { "identifier": "TrtLlmAPI", "path": "trt_llama_api.py", "snippet": "class TrtLlmAPI(CustomLLM):\n model_path: Optional[str] = Field(\n description=\"The path to the trt engine.\"\n )\n temperature: float = Field(description=\"The temperature to use for sampling.\")\n max_new_tokens: int = Field(description=\"The maximum number of tokens to generate.\")\n context_window: int = Field(\n description=\"The maximum number of context tokens for the model.\"\n )\n messages_to_prompt: Callable = Field(\n description=\"The function to convert messages to a prompt.\", exclude=True\n )\n completion_to_prompt: Callable = Field(\n description=\"The function to convert a completion to a prompt.\", exclude=True\n )\n generate_kwargs: Dict[str, Any] = Field(\n default_factory=dict, description=\"Kwargs used for generation.\"\n )\n model_kwargs: Dict[str, Any] = Field(\n default_factory=dict, description=\"Kwargs used for model initialization.\"\n )\n verbose: bool = Field(description=\"Whether to print verbose output.\")\n\n _model: Any = PrivateAttr()\n _model_config: Any = PrivateAttr()\n _tokenizer: Any = PrivateAttr()\n _max_new_tokens = PrivateAttr()\n _sampling_config = PrivateAttr()\n _verbose = PrivateAttr()\n\n def __init__(\n self,\n model_path: Optional[str] = None,\n engine_name: Optional[str] = None,\n tokenizer_dir: Optional[str] = None,\n temperature: float = 0.1,\n max_new_tokens: int = DEFAULT_NUM_OUTPUTS,\n context_window: int = DEFAULT_CONTEXT_WINDOW,\n messages_to_prompt: Optional[Callable] = None,\n completion_to_prompt: Optional[Callable] = None,\n callback_manager: Optional[CallbackManager] = None,\n generate_kwargs: Optional[Dict[str, Any]] = None,\n model_kwargs: Optional[Dict[str, Any]] = None,\n verbose: bool = False\n ) -> None:\n\n model_kwargs = model_kwargs or {}\n model_kwargs.update({\"n_ctx\": context_window, \"verbose\": verbose})\n self._max_new_tokens = max_new_tokens\n self._verbose = verbose\n # check if model is cached\n if model_path is not None:\n if not os.path.exists(model_path):\n raise ValueError(\n \"Provided model path does not exist. \"\n \"Please check the path or provide a model_url to download.\"\n )\n else:\n engine_dir = model_path\n engine_dir_path = Path(engine_dir)\n config_path = engine_dir_path / 'config.json'\n\n # config function\n with open(config_path, 'r') as f:\n config = json.load(f)\n use_gpt_attention_plugin = config['plugin_config']['gpt_attention_plugin']\n remove_input_padding = config['plugin_config']['remove_input_padding']\n tp_size = config['builder_config']['tensor_parallel']\n pp_size = config['builder_config']['pipeline_parallel']\n world_size = tp_size * pp_size\n assert world_size == tensorrt_llm.mpi_world_size(), \\\n f'Engine world size ({world_size}) != Runtime world size ({tensorrt_llm.mpi_world_size()})'\n num_heads = config['builder_config']['num_heads'] // tp_size\n hidden_size = config['builder_config']['hidden_size'] // tp_size\n vocab_size = config['builder_config']['vocab_size']\n num_layers = config['builder_config']['num_layers']\n num_kv_heads = config['builder_config'].get('num_kv_heads', num_heads)\n paged_kv_cache = config['plugin_config']['paged_kv_cache']\n if config['builder_config'].get('multi_query_mode', False):\n tensorrt_llm.logger.warning(\n \"`multi_query_mode` config is deprecated. Please rebuild the engine.\"\n )\n num_kv_heads = 1\n num_kv_heads = (num_kv_heads + tp_size - 1) // tp_size\n\n self._model_config = ModelConfig(num_heads=num_heads,\n num_kv_heads=num_kv_heads,\n hidden_size=hidden_size,\n vocab_size=vocab_size,\n num_layers=num_layers,\n gpt_attention_plugin=use_gpt_attention_plugin,\n paged_kv_cache=paged_kv_cache,\n remove_input_padding=remove_input_padding)\n\n assert pp_size == 1, 'Python runtime does not support pipeline parallelism'\n world_size = tp_size * pp_size\n\n runtime_rank = tensorrt_llm.mpi_rank()\n runtime_mapping = tensorrt_llm.Mapping(world_size,\n runtime_rank,\n tp_size=tp_size,\n pp_size=pp_size)\n torch.cuda.set_device(runtime_rank % runtime_mapping.gpus_per_node)\n self._tokenizer = LlamaTokenizer.from_pretrained(tokenizer_dir, legacy=False)\n self._sampling_config = SamplingConfig(end_id=EOS_TOKEN,\n pad_id=PAD_TOKEN,\n num_beams=1,\n temperature=temperature)\n\n serialize_path = engine_dir_path / engine_name\n with open(serialize_path, 'rb') as f:\n engine_buffer = f.read()\n decoder = tensorrt_llm.runtime.GenerationSession(self._model_config,\n engine_buffer,\n runtime_mapping,\n debug_mode=False)\n self._model = decoder\n messages_to_prompt = messages_to_prompt or generic_messages_to_prompt\n completion_to_prompt = completion_to_prompt or (lambda x: x)\n\n generate_kwargs = generate_kwargs or {}\n generate_kwargs.update(\n {\"temperature\": temperature, \"max_tokens\": max_new_tokens}\n )\n\n super().__init__(\n model_path=model_path,\n temperature=temperature,\n context_window=context_window,\n max_new_tokens=max_new_tokens,\n messages_to_prompt=messages_to_prompt,\n completion_to_prompt=completion_to_prompt,\n callback_manager=callback_manager,\n generate_kwargs=generate_kwargs,\n model_kwargs=model_kwargs,\n verbose=verbose,\n )\n\n @classmethod\n def class_name(cls) -> str:\n \"\"\"Get class name.\"\"\"\n return \"TrtLlmAPI\"\n\n @property\n def metadata(self) -> LLMMetadata:\n \"\"\"LLM metadata.\"\"\"\n return LLMMetadata(\n context_window=self.context_window,\n num_output=self.max_new_tokens,\n model_name=self.model_path,\n )\n\n @llm_chat_callback()\n def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:\n prompt = self.messages_to_prompt(messages)\n completion_response = self.complete(prompt, formatted=True, **kwargs)\n return completion_response_to_chat_response(completion_response)\n\n @llm_completion_callback()\n def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:\n self.generate_kwargs.update({\"stream\": False})\n\n is_formatted = kwargs.pop(\"formatted\", False)\n if not is_formatted:\n prompt = self.completion_to_prompt(prompt)\n\n input_text = prompt\n input_ids, input_lengths = self.parse_input(input_text, self._tokenizer,\n EOS_TOKEN,\n self._model_config)\n\n max_input_length = torch.max(input_lengths).item()\n self._model.setup(input_lengths.size(0), max_input_length, self._max_new_tokens, 1) # beam size is set to 1\n if self._verbose:\n start_time = time.time()\n\n output_ids = self._model.decode(input_ids, input_lengths, self._sampling_config)\n torch.cuda.synchronize()\n\n elapsed_time = None\n if self._verbose:\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n\n output_txt, output_token_ids = self.get_output(output_ids,\n input_lengths,\n self._max_new_tokens,\n self._tokenizer)\n\n if self._verbose:\n print(f\"Input context length : {input_ids.shape[1]}\")\n print(f\"Inference time : {elapsed_time:.2f} seconds\")\n print(f\"Output context length : {len(output_token_ids)} \")\n print(f\"Inference token/sec : {(len(output_token_ids) / elapsed_time):2f}\")\n\n # call garbage collected after inference\n torch.cuda.empty_cache()\n gc.collect()\n\n return CompletionResponse(text=output_txt, raw=self.generate_completion_dict(output_txt))\n\n def parse_input(self, input_text: str, tokenizer, end_id: int,\n remove_input_padding: bool):\n input_tokens = []\n\n input_tokens.append(\n tokenizer.encode(input_text, add_special_tokens=False))\n\n input_lengths = torch.tensor([len(x) for x in input_tokens],\n dtype=torch.int32,\n device='cuda')\n if remove_input_padding:\n input_ids = np.concatenate(input_tokens)\n input_ids = torch.tensor(input_ids, dtype=torch.int32,\n device='cuda').unsqueeze(0)\n else:\n input_ids = torch.nested.to_padded_tensor(\n torch.nested.nested_tensor(input_tokens, dtype=torch.int32),\n end_id).cuda()\n\n return input_ids, input_lengths\n\n def remove_extra_eos_ids(self, outputs):\n outputs.reverse()\n while outputs and outputs[0] == 2:\n outputs.pop(0)\n outputs.reverse()\n outputs.append(2)\n return outputs\n\n def get_output(self, output_ids, input_lengths, max_output_len, tokenizer):\n num_beams = output_ids.size(1)\n output_text = \"\"\n outputs = None\n for b in range(input_lengths.size(0)):\n for beam in range(num_beams):\n output_begin = input_lengths[b]\n output_end = input_lengths[b] + max_output_len\n outputs = output_ids[b][beam][output_begin:output_end].tolist()\n outputs = self.remove_extra_eos_ids(outputs)\n output_text = tokenizer.decode(outputs)\n\n return output_text, outputs\n\n def generate_completion_dict(self, text_str):\n \"\"\"\n Generate a dictionary for text completion details.\n Returns:\n dict: A dictionary containing completion details.\n \"\"\"\n completion_id: str = f\"cmpl-{str(uuid.uuid4())}\"\n created: int = int(time.time())\n model_name: str = self._model if self._model is not None else self.model_path\n return {\n \"id\": completion_id,\n \"object\": \"text_completion\",\n \"created\": created,\n \"model\": model_name,\n \"choices\": [\n {\n \"text\": text_str,\n \"index\": 0,\n \"logprobs\": None,\n \"finish_reason\": 'stop'\n }\n ],\n \"usage\": {\n \"prompt_tokens\": None,\n \"completion_tokens\": None,\n \"total_tokens\": None\n }\n }\n\n @llm_completion_callback()\n def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:\n pass" }, { "identifier": "FaissEmbeddingStorage", "path": "faiss_vector_storage.py", "snippet": "class FaissEmbeddingStorage:\n\n def __init__(self, data_dir, dimension=384):\n self.d = dimension\n self.data_dir = data_dir\n self.index = self.initialize_index()\n\n def initialize_index(self):\n if os.path.exists(\"storage-default\") and os.listdir(\"storage-default\"):\n print(\"Using the presisted value\")\n vector_store = FaissVectorStore.from_persist_dir(\"storage-default\")\n storage_context = StorageContext.from_defaults(\n vector_store=vector_store, persist_dir=\"storage-default\"\n )\n index = load_index_from_storage(storage_context=storage_context)\n return index\n else:\n print(\"generating new values\")\n documents = SimpleDirectoryReader(self.data_dir).load_data()\n faiss_index = faiss.IndexFlatL2(self.d)\n vector_store = FaissVectorStore(faiss_index=faiss_index)\n storage_context = StorageContext.from_defaults(vector_store=vector_store)\n index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n index.storage_context.persist(persist_dir = \"storage-default\")\n return index\n\n def get_query_engine(self):\n return self.index.as_query_engine()" } ]
import time import gradio as gr import argparse from trt_llama_api import TrtLlmAPI #llama_index does not currently support TRT-LLM. The trt_llama_api.py file defines a llama_index compatible interface for TRT-LLM. from langchain.embeddings.huggingface import HuggingFaceEmbeddings from llama_index import LangchainEmbedding, ServiceContext from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt from llama_index import set_global_service_context from faiss_vector_storage import FaissEmbeddingStorage
3,532
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # Create an argument parser parser = argparse.ArgumentParser(description='NVIDIA Chatbot Parameters') # Add arguments parser.add_argument('--trt_engine_path', type=str, required=True, help="Path to the TensorRT engine.", default="") parser.add_argument('--trt_engine_name', type=str, required=True, help="Name of the TensorRT engine.", default="") parser.add_argument('--tokenizer_dir_path', type=str, required=True, help="Directory path for the tokenizer.", default="") parser.add_argument('--embedded_model', type=str, help="Name or path of the embedded model. Defaults to 'sentence-transformers/all-MiniLM-L6-v2' if " "not provided.", default='sentence-transformers/all-MiniLM-L6-v2') parser.add_argument('--data_dir', type=str, required=False, help="Directory path for data.", default="./dataset") parser.add_argument('--verbose', type=bool, required=False, help="Enable verbose logging.", default=False) # Parse the arguments args = parser.parse_args() # Use the provided arguments trt_engine_path = args.trt_engine_path trt_engine_name = args.trt_engine_name tokenizer_dir_path = args.tokenizer_dir_path embedded_model = args.embedded_model data_dir = args.data_dir verbose = args.verbose # create trt_llm engine object
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # Create an argument parser parser = argparse.ArgumentParser(description='NVIDIA Chatbot Parameters') # Add arguments parser.add_argument('--trt_engine_path', type=str, required=True, help="Path to the TensorRT engine.", default="") parser.add_argument('--trt_engine_name', type=str, required=True, help="Name of the TensorRT engine.", default="") parser.add_argument('--tokenizer_dir_path', type=str, required=True, help="Directory path for the tokenizer.", default="") parser.add_argument('--embedded_model', type=str, help="Name or path of the embedded model. Defaults to 'sentence-transformers/all-MiniLM-L6-v2' if " "not provided.", default='sentence-transformers/all-MiniLM-L6-v2') parser.add_argument('--data_dir', type=str, required=False, help="Directory path for data.", default="./dataset") parser.add_argument('--verbose', type=bool, required=False, help="Enable verbose logging.", default=False) # Parse the arguments args = parser.parse_args() # Use the provided arguments trt_engine_path = args.trt_engine_path trt_engine_name = args.trt_engine_name tokenizer_dir_path = args.tokenizer_dir_path embedded_model = args.embedded_model data_dir = args.data_dir verbose = args.verbose # create trt_llm engine object
llm = TrtLlmAPI(
0
2023-10-18 12:57:53+00:00
4k
instadeepai/flashbax
flashbax/buffers/prioritised_flat_buffer.py
[ { "identifier": "ExperiencePair", "path": "flashbax/buffers/flat_buffer.py", "snippet": "class ExperiencePair(NamedTuple, Generic[Experience]):\n first: Experience\n second: Experience" }, { "identifier": "TransitionSample", "path": "flashbax/buffers/flat_buffer.py", "snippet": "class TransitionSample(Generic[Experience]):\n experience: ExperiencePair[Experience]" }, { "identifier": "validate_flat_buffer_args", "path": "flashbax/buffers/flat_buffer.py", "snippet": "def validate_flat_buffer_args(\n max_length: int,\n min_length: int,\n sample_batch_size: int,\n add_batch_size: int,\n):\n \"\"\"Validates the arguments for the flat buffer.\"\"\"\n\n validate_sample_batch_size(sample_batch_size, max_length)\n validate_min_length(min_length, add_batch_size, max_length)\n validate_max_length_add_batch_size(max_length, add_batch_size)" }, { "identifier": "Indices", "path": "flashbax/buffers/prioritised_trajectory_buffer.py", "snippet": "SET_BATCH_FN = {\n \"tpu\": sum_tree.set_batch_bincount,\n \"gpu\": sum_tree.set_batch_bincount,\n \"cpu\": sum_tree.set_batch_scan,\n}\nclass PrioritisedTrajectoryBufferState(TrajectoryBufferState, Generic[Experience]):\nclass PrioritisedTrajectoryBufferSample(TrajectoryBufferSample, Generic[Experience]):\nclass PrioritisedTrajectoryBuffer(\n TrajectoryBuffer[Experience, BufferState, BufferSample]\n):\ndef get_sum_tree_capacity(\n max_length_time_axis: int, period: int, add_batch_size: int\n) -> int:\ndef prioritised_init(\n experience: Experience,\n add_batch_size: int,\n max_length_time_axis: int,\n period: int,\n) -> PrioritisedTrajectoryBufferState[Experience]:\ndef calculate_item_indices_and_priorities(\n state: PrioritisedTrajectoryBufferState,\n sample_sequence_length: int,\n period: int,\n add_sequence_length: int,\n add_batch_size: int,\n max_length_time_axis: int,\n) -> Tuple[Array, Array]:\ndef _get_unnormalised_prob(\n add_batch_size: int,\n max_num_items: int,\n priority_mask: Array,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef _get_priority_indices(\n add_batch_size: int,\n max_length_time_axis: int,\n max_num_items: int,\n period: int,\n starting_priority_item_index: Array,\n) -> Array:\ndef _get_ending_data_idx(\n add_sequence_length: int,\n max_length_time_axis: int,\n sample_sequence_length: int,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef _get_starting_priority_item_idx(\n max_length_time_axis: int,\n period: int,\n previous_valid_data_index: Array,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef get_prev_valid_data_idx(\n max_length_time_axis: int,\n max_subsequence_data_index: int,\n sample_sequence_length: int,\n state: PrioritisedTrajectoryBufferState,\n) -> Array:\ndef prioritised_add(\n state: PrioritisedTrajectoryBufferState[Experience],\n batch: Experience,\n sample_sequence_length: int,\n period: int,\n device: str,\n) -> PrioritisedTrajectoryBufferState[Experience]:\ndef prioritised_sample(\n state: PrioritisedTrajectoryBufferState[Experience],\n rng_key: chex.PRNGKey,\n batch_size: int,\n sequence_length: int,\n period: int,\n) -> PrioritisedTrajectoryBufferSample[Experience]:\ndef _get_sample_trajectories(\n item_indices: Array,\n max_length_time_axis: int,\n period: int,\n sequence_length: int,\n state: PrioritisedTrajectoryBufferState,\n):\ndef set_priorities(\n state: PrioritisedTrajectoryBufferState[Experience],\n indices: Indices,\n priorities: Priorities,\n priority_exponent: float,\n device: str,\n) -> PrioritisedTrajectoryBufferState[Experience]:\ndef validate_priority_exponent(priority_exponent: float):\ndef validate_device(device: str):\ndef make_prioritised_trajectory_buffer(\n add_batch_size: int,\n sample_batch_size: int,\n sample_sequence_length: int,\n period: int,\n min_length_time_axis: int,\n max_size: Optional[int] = None,\n max_length_time_axis: Optional[int] = None,\n priority_exponent: float = 0.6,\n device: str = \"cpu\",\n) -> PrioritisedTrajectoryBuffer:" }, { "identifier": "add_dim_to_args", "path": "flashbax/utils.py", "snippet": "def add_dim_to_args(\n func: Callable,\n axis: int = 1,\n starting_arg_index: Optional[int] = 1,\n ending_arg_index: Optional[int] = None,\n kwargs_on_device_keys: Optional[list] = None,\n):\n \"\"\"Adds a dimension to the specified arguments of a function.\n\n Args:\n func (Callable): The function to wrap.\n axis (int, optional): The axis to add the dimension to. Defaults to 1.\n starting_arg_index (Optional[int], optional): The index of the first argument to\n add the dimension to. Defaults to 1.\n ending_arg_index (Optional[int], optional): The index of the last argument to\n add the dimension to. Defaults to None.\n kwargs_on_device_keys (Optional[list], optional): The keys of the kwargs that should\n be added to. Defaults to None.\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if ending_arg_index is None:\n end_index = len(args)\n else:\n end_index = ending_arg_index\n\n args = list(args)\n args[starting_arg_index:end_index] = [\n jax.tree_map(lambda x: jnp.expand_dims(x, axis=axis), a)\n for a in args[starting_arg_index:end_index]\n ]\n for k, v in kwargs.items():\n if kwargs_on_device_keys is None or k in kwargs_on_device_keys:\n kwargs[k] = jax.tree_map(lambda x: jnp.expand_dims(x, axis=1), v)\n return func(*args, **kwargs)\n\n return wrapper" } ]
import warnings import jax from typing import TYPE_CHECKING, Optional from chex import PRNGKey from flashbax.buffers.flat_buffer import ( ExperiencePair, TransitionSample, validate_flat_buffer_args, ) from flashbax.buffers.prioritised_trajectory_buffer import ( Indices, PrioritisedTrajectoryBuffer, PrioritisedTrajectoryBufferState, Probabilities, make_prioritised_trajectory_buffer, validate_device, ) from dataclasses import dataclass from chex import dataclass from flashbax.utils import add_dim_to_args
1,917
# Copyright 2023 InstaDeep Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if TYPE_CHECKING: # https://github.com/python/mypy/issues/6239 else: @dataclass(frozen=True) class PrioritisedTransitionSample(TransitionSample): indices: Indices priorities: Probabilities def validate_priority_exponent(priority_exponent: float) -> None: """Validates the priority exponent.""" if priority_exponent < 0 or priority_exponent > 1: raise ValueError( f"priority_exponent must be in the range [0, 1], but was {priority_exponent}" ) def make_prioritised_flat_buffer( max_length: int, min_length: int, sample_batch_size: int, add_sequences: bool = False, add_batch_size: Optional[int] = None, priority_exponent: float = 0.6, device: str = "cpu",
# Copyright 2023 InstaDeep Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if TYPE_CHECKING: # https://github.com/python/mypy/issues/6239 else: @dataclass(frozen=True) class PrioritisedTransitionSample(TransitionSample): indices: Indices priorities: Probabilities def validate_priority_exponent(priority_exponent: float) -> None: """Validates the priority exponent.""" if priority_exponent < 0 or priority_exponent > 1: raise ValueError( f"priority_exponent must be in the range [0, 1], but was {priority_exponent}" ) def make_prioritised_flat_buffer( max_length: int, min_length: int, sample_batch_size: int, add_sequences: bool = False, add_batch_size: Optional[int] = None, priority_exponent: float = 0.6, device: str = "cpu",
) -> PrioritisedTrajectoryBuffer:
3
2023-10-17 10:57:14+00:00
4k
TheDuckAI/DuckTrack
ducktrack/app.py
[ { "identifier": "close_obs", "path": "ducktrack/obs_client.py", "snippet": "def close_obs(obs_process: subprocess.Popen):\n if obs_process:\n obs_process.terminate()\n try:\n obs_process.wait(timeout=5)\n except subprocess.TimeoutExpired:\n obs_process.kill()" }, { "identifier": "is_obs_running", "path": "ducktrack/obs_client.py", "snippet": "def is_obs_running() -> bool:\n try:\n for process in psutil.process_iter(attrs=[\"pid\", \"name\"]):\n if \"obs\" in process.info[\"name\"].lower():\n return True\n return False\n except:\n raise Exception(\"Could not check if OBS is running already. Please check manually.\")" }, { "identifier": "open_obs", "path": "ducktrack/obs_client.py", "snippet": "def open_obs() -> subprocess.Popen:\n try:\n obs_path = find_obs()\n if system() == \"Windows\":\n # you have to change the working directory first for OBS to find the correct locale on windows\n os.chdir(os.path.dirname(obs_path))\n obs_path = os.path.basename(obs_path)\n return subprocess.Popen([obs_path, \"--startreplaybuffer\", \"--minimize-to-tray\"])\n except:\n raise Exception(\"Failed to find OBS, please open OBS manually.\")" }, { "identifier": "Player", "path": "ducktrack/playback.py", "snippet": "class Player:\n \"\"\"\n Plays back recordings.\n \"\"\"\n \n def __init__(self):\n self.stop_playback = False\n self.listener = KeyCombinationListener()\n \n def stop_comb_pressed():\n self.stop_playback = True\n return False\n \n self.listener.add_comb((\"shift\", \"esc\"), stop_comb_pressed)\n self.listener.start()\n \n def play(self, recording_path: str):\n with open(os.path.join(recording_path, \"events.jsonl\"), \"r\") as f:\n events = [json.loads(line) for line in f.readlines()]\n \n with open(os.path.join(recording_path, \"metadata.json\"), \"r\") as f:\n metadata = json.load(f)\n \n self.playback(events, metadata)\n \n def playback(self, events: list[dict], metadata: dict):\n if metadata[\"system\"] == \"Windows\":\n fix_windows_dpi_scaling()\n \n mouse_controller = MouseController()\n keyboard_controller = KeyboardController()\n\n if not events:\n self.listener.stop()\n return\n\n presses_to_skip = 0\n releases_to_skip = 0\n \n in_click_sequence = False\n \n for i, event in enumerate(events):\n start_time = time.perf_counter()\n \n if self.stop_playback:\n return\n \n def do_mouse_press(button):\n for j, second_event in enumerate(events[i+1:]):\n # make sure the time between mouse clicks is less than 500ms\n if second_event[\"time_stamp\"] - event[\"time_stamp\"] > 0.5:\n break\n \n if \"x\" in second_event and \"y\" in second_event:\n # if the mouse moves out of the click radius/rectangle, it is not a click sequence\n if math.sqrt((second_event[\"y\"] - event[\"y\"]) ** 2 +\n (second_event[\"x\"] - event[\"x\"]) ** 2) > 4:\n break\n \n if second_event[\"action\"] == \"click\" and second_event[\"pressed\"]:\n for k, third_event in enumerate(events[i+j+2:]):\n if third_event[\"time_stamp\"] - second_event[\"time_stamp\"] > 0.5:\n break\n \n if \"x\" in third_event and \"y\" in third_event:\n if math.sqrt((third_event[\"y\"] - event[\"y\"]) ** 2 +\n (third_event[\"x\"] - event[\"x\"]) ** 2) > 5:\n break\n \n if third_event[\"action\"] == \"click\" and third_event[\"pressed\"]:\n mouse_controller.click(button, 3) \n return 2, 2\n\n mouse_controller.click(button, 2)\n return 1, 1\n \n mouse_controller.press(button)\n return 0, 0\n\n if event[\"action\"] == \"move\":\n mouse_controller.position = (event[\"x\"], event[\"y\"])\n\n elif event[\"action\"] == \"click\":\n button = name_to_button(event[\"button\"])\n \n if event[\"pressed\"]:\n if presses_to_skip == 0:\n presses, releases = do_mouse_press(button) \n presses_to_skip += presses\n releases_to_skip += releases\n \n if presses > 0:\n in_click_sequence = True\n else:\n presses_to_skip -= 1\n else:\n if releases_to_skip == 0:\n mouse_controller.release(button)\n \n if in_click_sequence:\n keyboard_controller.press(Key.shift)\n mouse_controller.click(Button.left)\n keyboard_controller.release(Key.shift)\n in_click_sequence = False\n else:\n releases_to_skip -= 1\n\n elif event[\"action\"] == \"scroll\":\n if metadata[\"system\"] == \"Windows\":\n # for some reason on windows, pynput scroll is correct but pyautogui is not\n mouse_controller.scroll(metadata[\"scroll_direction\"] * event[\"dx\"], metadata[\"scroll_direction\"] * event[\"dy\"])\n else:\n pyautogui.hscroll(clicks=metadata[\"scroll_direction\"] * event[\"dx\"])\n pyautogui.vscroll(clicks=metadata[\"scroll_direction\"] * event[\"dy\"])\n\n elif event[\"action\"] in [\"press\", \"release\"]:\n key = name_to_key(event[\"name\"])\n if event[\"action\"] == \"press\":\n keyboard_controller.press(key)\n else:\n keyboard_controller.release(key)\n \n # sleep for the correct amount of time\n \n end_time = time.perf_counter()\n execution_time = end_time - start_time\n\n if i + 1 < len(events):\n desired_delay = events[i + 1][\"time_stamp\"] - event[\"time_stamp\"]\n delay = desired_delay - execution_time\n if delay < 0:\n print(f\"warning: behind by {-delay * 1000:.3f} ms\")\n elif delay != 0:\n wait_until = time.perf_counter() + delay\n while time.perf_counter() < wait_until:\n pass\n \n self.listener.stop()" }, { "identifier": "get_latest_recording", "path": "ducktrack/playback.py", "snippet": "def get_latest_recording() -> str:\n recordings_dir = get_recordings_dir()\n if not os.path.exists(recordings_dir):\n raise Exception(\"The recordings directory does not exist\")\n \n recordings = [os.path.join(recordings_dir, f) for f in os.listdir(recordings_dir) if os.path.isdir(os.path.join(recordings_dir, f))]\n \n if len(recordings) == 0:\n raise Exception(\"You have no recordings to play back\")\n \n latest_recording = max(recordings, key=os.path.getctime)\n\n return latest_recording" }, { "identifier": "Recorder", "path": "ducktrack/recorder.py", "snippet": "class Recorder(QThread):\n \"\"\"\n Makes recordings.\n \"\"\"\n \n recording_stopped = pyqtSignal()\n\n def __init__(self, natural_scrolling: bool):\n super().__init__()\n \n if system() == \"Windows\":\n fix_windows_dpi_scaling()\n \n self.recording_path = self._get_recording_path()\n \n self._is_recording = False\n self._is_paused = False\n \n self.event_queue = Queue()\n self.events_file = open(os.path.join(self.recording_path, \"events.jsonl\"), \"a\")\n \n self.metadata_manager = MetadataManager(\n recording_path=self.recording_path, \n natural_scrolling=natural_scrolling\n )\n self.obs_client = OBSClient(recording_path=self.recording_path, \n metadata=self.metadata_manager.metadata)\n\n self.mouse_listener = mouse.Listener(\n on_move=self.on_move,\n on_click=self.on_click,\n on_scroll=self.on_scroll)\n \n self.keyboard_listener = keyboard.Listener(\n on_press=self.on_press, \n on_release=self.on_release)\n \n def on_move(self, x, y):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"move\", \n \"x\": x, \n \"y\": y}, block=False)\n \n def on_click(self, x, y, button, pressed):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"click\", \n \"x\": x, \n \"y\": y, \n \"button\": button.name, \n \"pressed\": pressed}, block=False)\n \n def on_scroll(self, x, y, dx, dy):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"scroll\", \n \"x\": x, \n \"y\": y, \n \"dx\": dx, \n \"dy\": dy}, block=False)\n \n def on_press(self, key):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"press\", \n \"name\": key.char if type(key) == KeyCode else key.name}, block=False)\n\n def on_release(self, key):\n if not self._is_paused:\n self.event_queue.put({\"time_stamp\": time.perf_counter(), \n \"action\": \"release\", \n \"name\": key.char if type(key) == KeyCode else key.name}, block=False)\n\n def run(self):\n self._is_recording = True\n \n self.metadata_manager.collect()\n self.obs_client.start_recording()\n \n self.mouse_listener.start()\n self.keyboard_listener.start()\n \n while self._is_recording:\n event = self.event_queue.get()\n self.events_file.write(json.dumps(event) + \"\\n\")\n\n def stop_recording(self):\n if self._is_recording:\n self._is_recording = False\n\n self.metadata_manager.end_collect()\n \n self.mouse_listener.stop()\n self.keyboard_listener.stop()\n \n self.obs_client.stop_recording()\n self.metadata_manager.add_obs_record_state_timings(self.obs_client.record_state_events)\n self.events_file.close()\n self.metadata_manager.save_metadata()\n \n self.recording_stopped.emit()\n \n def pause_recording(self):\n if not self._is_paused and self._is_recording:\n self._is_paused = True\n self.obs_client.pause_recording()\n self.event_queue.put({\"time_stamp\": time.perf_counter(),\n \"action\": \"pause\"}, block=False)\n\n def resume_recording(self):\n if self._is_paused and self._is_recording:\n self._is_paused = False\n self.obs_client.resume_recording()\n self.event_queue.put({\"time_stamp\": time.perf_counter(),\n \"action\": \"resume\"}, block=False)\n\n def _get_recording_path(self) -> str:\n recordings_dir = get_recordings_dir()\n\n if not os.path.exists(recordings_dir):\n os.mkdir(recordings_dir)\n\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n \n recording_path = os.path.join(recordings_dir, f\"recording-{current_time}\")\n os.mkdir(recording_path)\n\n return recording_path" }, { "identifier": "get_recordings_dir", "path": "ducktrack/util.py", "snippet": "def get_recordings_dir() -> str:\n documents_folder = Path.home() / 'Documents' / 'DuckTrack_Recordings'\n return str(documents_folder)" }, { "identifier": "open_file", "path": "ducktrack/util.py", "snippet": "def open_file(path):\n if platform.system() == \"Windows\":\n os.startfile(path)\n elif platform.system() == \"Darwin\":\n subprocess.Popen([\"open\", path])\n else:\n subprocess.Popen([\"xdg-open\", path])" } ]
import os import sys from platform import system from PyQt6.QtCore import QTimer, pyqtSlot from PyQt6.QtGui import QAction, QIcon from PyQt6.QtWidgets import (QApplication, QCheckBox, QDialog, QFileDialog, QFormLayout, QLabel, QLineEdit, QMenu, QMessageBox, QPushButton, QSystemTrayIcon, QTextEdit, QVBoxLayout, QWidget) from .obs_client import close_obs, is_obs_running, open_obs from .playback import Player, get_latest_recording from .recorder import Recorder from .util import get_recordings_dir, open_file
3,316
class TitleDescriptionDialog(QDialog): def __init__(self, parent=None): super().__init__(parent) self.setWindowTitle("Recording Details") layout = QVBoxLayout(self) self.form_layout = QFormLayout() self.title_label = QLabel("Title:") self.title_input = QLineEdit(self) self.form_layout.addRow(self.title_label, self.title_input) self.description_label = QLabel("Description:") self.description_input = QTextEdit(self) self.form_layout.addRow(self.description_label, self.description_input) layout.addLayout(self.form_layout) self.submit_button = QPushButton("Save", self) self.submit_button.clicked.connect(self.accept) layout.addWidget(self.submit_button) def get_values(self): return self.title_input.text(), self.description_input.toPlainText() class MainInterface(QWidget): def __init__(self, app: QApplication): super().__init__() self.tray = QSystemTrayIcon(QIcon(resource_path("assets/duck.png"))) self.tray.show() self.app = app self.init_tray() self.init_window() if not is_obs_running(): self.obs_process = open_obs() def init_window(self): self.setWindowTitle("DuckTrack") layout = QVBoxLayout(self) self.toggle_record_button = QPushButton("Start Recording", self) self.toggle_record_button.clicked.connect(self.toggle_record) layout.addWidget(self.toggle_record_button) self.toggle_pause_button = QPushButton("Pause Recording", self) self.toggle_pause_button.clicked.connect(self.toggle_pause) self.toggle_pause_button.setEnabled(False) layout.addWidget(self.toggle_pause_button) self.show_recordings_button = QPushButton("Show Recordings", self)
class TitleDescriptionDialog(QDialog): def __init__(self, parent=None): super().__init__(parent) self.setWindowTitle("Recording Details") layout = QVBoxLayout(self) self.form_layout = QFormLayout() self.title_label = QLabel("Title:") self.title_input = QLineEdit(self) self.form_layout.addRow(self.title_label, self.title_input) self.description_label = QLabel("Description:") self.description_input = QTextEdit(self) self.form_layout.addRow(self.description_label, self.description_input) layout.addLayout(self.form_layout) self.submit_button = QPushButton("Save", self) self.submit_button.clicked.connect(self.accept) layout.addWidget(self.submit_button) def get_values(self): return self.title_input.text(), self.description_input.toPlainText() class MainInterface(QWidget): def __init__(self, app: QApplication): super().__init__() self.tray = QSystemTrayIcon(QIcon(resource_path("assets/duck.png"))) self.tray.show() self.app = app self.init_tray() self.init_window() if not is_obs_running(): self.obs_process = open_obs() def init_window(self): self.setWindowTitle("DuckTrack") layout = QVBoxLayout(self) self.toggle_record_button = QPushButton("Start Recording", self) self.toggle_record_button.clicked.connect(self.toggle_record) layout.addWidget(self.toggle_record_button) self.toggle_pause_button = QPushButton("Pause Recording", self) self.toggle_pause_button.clicked.connect(self.toggle_pause) self.toggle_pause_button.setEnabled(False) layout.addWidget(self.toggle_pause_button) self.show_recordings_button = QPushButton("Show Recordings", self)
self.show_recordings_button.clicked.connect(lambda: open_file(get_recordings_dir()))
6
2023-10-18 19:34:19+00:00
4k
e4s2023/E4S2023
swap_face_fine/Blender/model_center/blener.py
[ { "identifier": "Referencer", "path": "swap_face_fine/Blender/model_center/referencer.py", "snippet": "class Referencer(nn.Module):\n def __init__(self, args):\n super(Referencer, self).__init__()\n self.args = args\n if args.small_FPN:\n self.FPN = SmallFPN()\n else:\n self.FPN = AdaptiveFeatureGenerator(args)\n self.trainable_tao = nn.Parameter(torch.tensor(1.))\n\n if args.lambda_CYC != 0:\n self.compute_inv = True\n return\n\n def forward(self, img_A, img_T, mask_A, mask_T):\n \"\"\"\n img_A/img_T: (1,3,256,256)\n mask_A/mask_T: (1,256,256)\n \"\"\"\n feats_A = self.FPN(img_A, img_A) # (1,256,64,64)\n\n # Random flip\n if np.random.rand() < 0.5:\n feats_T = self.FPN(img_T, img_T) # (1,256,64,64)\n else:\n feats_T = self.FPN(torch.flip(img_T, dims=[-1]), torch.flip(img_T, dims=[-1]))\n # feats_T = torch.flip(feats_T, dims=[-1])\n\n # Calculate mask parts and extract image pixels\n part_dict_A = get_part_dict(mask_A) # {'head':(1,256,256);...}\n part_dict_T = get_part_dict(mask_T)\n\n grayscale_head_A = get_greyscale_head(img_A, part_dict_A['head']) # (1,256,256)\n inpainting_mask_T = (get_dilated_mask(part_dict_T['head']) - part_dict_T['head']).clamp(0, 1)\n e_AT = get_dilated_mask((part_dict_A['head'] + part_dict_T['head']).clamp(0, 1))\n inpainting_mask_A = (e_AT - part_dict_A['head']).clamp(0, 1)\n img_bg = img_T * (1 - e_AT[:, None])\n\n part_dict_A['inpainting'] = inpainting_mask_A\n part_dict_T['inpainting'] = inpainting_mask_T\n\n color_refs, color_inv_refs_pair = get_color_refer(img_T, feats_A, feats_T, part_dict_A, part_dict_T,\n self.trainable_tao, self.compute_inv, self.args.small_FPN)\n\n if self.args.lambda_CYC2 != 0:\n # color_refs_cro, color_inv_refs_pair_cro = get_color_refer(\n # torch.flip(img_T, dims=[0]),\n # torch.flip(feats_A, dims=[0]),\n # torch.flip(feats_T, dims=[0]),\n # {k: torch.flip(v, dims=[0]) for k, v in part_dict_A.items()},\n # {k: torch.flip(v, dims=[0]) for k, v in part_dict_T.items()},\n # self.trainable_tao, self.compute_inv,\n # self.args.small_FPN)\n\n color_refs_cro, color_inv_refs_pair_cro = get_color_refer(\n torch.flip(img_T, dims=[0]),\n feats_A,\n torch.flip(feats_T, dims=[0]),\n part_dict_A,\n {k: torch.flip(v, dims=[0]) for k, v in part_dict_T.items()},\n self.trainable_tao, self.compute_inv,\n self.args.small_FPN)\n\n if len(color_refs) <= 1:\n head_ref = torch.zeros_like(img_T)\n inpaint_ref = torch.zeros_like(img_T)\n else:\n head_ref = sum([v for k, v in color_refs.items() if k != 'inpainting'])\n inpaint_ref = color_refs['inpainting']\n\n packages = torch.cat([F.upsample_bilinear(torch.cat([head_ref,\n inpaint_ref], dim=1), img_T.size()[-2:]),\n part_dict_A['head'][:, None].float(),\n inpainting_mask_A[:, None].float(),\n grayscale_head_A[:, None],\n img_bg], dim=1)\n return packages, color_inv_refs_pair, color_inv_refs_pair_cro" }, { "identifier": "ResUNet", "path": "swap_face_fine/Blender/model_center/res_u_net.py", "snippet": "class ResUNet(nn.Module):\n def __init__(self, args):\n super(ResUNet, self).__init__()\n\n if args.small_FPN:\n self.input_encoder_layer = InputEncodeLayer(12, 16)\n\n self.res_en_layer2 = ResBlock(16, 32, stride=2)\n self.res_en_layer3 = ResBlock(32, 64, stride=2)\n\n self.res_bridge_layer = ResBlock(64, 128, stride=2)\n\n self.res_de_layer3 = ResBlock(128 + 64, 64)\n self.res_de_layer2 = ResBlock(64 + 32, 32)\n self.res_de_layer1 = ResBlock(32 + 16, 16)\n\n self.output_decoder_layer = nn.Sequential(\n nn.Conv2d(16, 3, kernel_size=1, stride=1, padding=0, bias=True),\n nn.Sigmoid()\n )\n else:\n self.input_encoder_layer = InputEncodeLayer(12, 64)\n\n self.res_en_layer2 = ResBlock(64, 128, stride=2)\n self.res_en_layer3 = ResBlock(128, 256, stride=2)\n\n self.res_bridge_layer = ResBlock(256, 512, stride=2)\n\n self.res_de_layer3 = ResBlock(512 + 256, 256)\n self.res_de_layer2 = ResBlock(256 + 128, 128)\n self.res_de_layer1 = ResBlock(128 + 64, 64)\n\n self.output_decoder_layer = nn.Sequential(\n nn.Conv2d(64, 3, kernel_size=1, stride=1, padding=0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, pkgs):\n en_feat1 = self.input_encoder_layer(pkgs)\n en_feat2 = self.res_en_layer2(en_feat1)\n en_feat3 = self.res_en_layer3(en_feat2)\n\n bridge_feat = self.res_bridge_layer(en_feat3)\n\n de_feat3 = self.res_de_layer3(torch.cat([F.upsample_bilinear(bridge_feat, scale_factor=2), en_feat3], dim=1))\n de_feat2 = self.res_de_layer2(torch.cat([F.upsample_bilinear(de_feat3, scale_factor=2), en_feat2], dim=1))\n de_feat1 = self.res_de_layer1(torch.cat([F.upsample_bilinear(de_feat2, scale_factor=2), en_feat1], dim=1))\n\n pred = self.output_decoder_layer(de_feat1)\n return pred" } ]
import torch import torch.nn as nn from .referencer import Referencer from .res_u_net import ResUNet
1,705
class Blender(nn.Module): def __init__(self, args): super(Blender, self).__init__() self.referencer = Referencer(args)
class Blender(nn.Module): def __init__(self, args): super(Blender, self).__init__() self.referencer = Referencer(args)
self.unet = ResUNet(args)
1
2023-10-15 12:15:01+00:00
4k
riverscn/epghub
epg/plugin/weibo_cctv9.py
[ { "identifier": "search", "path": "epg/plugin/__weibo_search.py", "snippet": "def search(keyword: str, page: int = 1) -> list:\n \"\"\"\n Search weibo by keyword.\n\n Args:\n keyword (str): The keyword to search.\n page (int): The page number to search.\n\n Returns:\n list: The search result.\n \"\"\"\n # 请求参数\n params = {\n \"containerid\": \"100103type=1&q={}\".format(keyword),\n \"page_type\": \"searchall\",\n \"page\": page,\n }\n\n # 发送请求\n try:\n r = requests.get(API_ENDPOINT, headers=headers, params=params, timeout=5)\n except:\n return []\n\n # 解析json数据\n cards = r.json()[\"data\"][\"cards\"]\n\n # 提取微博数据\n weibo_list = []\n for card in cards:\n if \"mblog\" in card[\"card_group\"][0]:\n weibo_list.append(card[\"card_group\"][0][\"mblog\"])\n\n return weibo_list" }, { "identifier": "headers", "path": "epg/plugin/__weibo_search.py", "snippet": "API_ENDPOINT = \"https://m.weibo.cn/api/container/getIndex\"\ndef search(keyword: str, page: int = 1) -> list:" }, { "identifier": "Channel", "path": "epg/model.py", "snippet": "class Channel:\n \"\"\"\n Channel model.\n\n Attributes:\n id (str): The channel id.\n metadata (dict): The channel metadata.\n programs (list[Program]): The programs of the channel.\n\n Methods:\n update(date: date = datetime.today().date()) -> bool: Update channel with new data for the given date.\n now_playing(now: datetime = datetime.now()) -> Program | None: Get the program that is currently playing.\n next_program(now: datetime = datetime.now()) -> Program | None: Get the next program.\n \"\"\"\n\n def __init__(\n self,\n id: str,\n metadata: dict = {},\n update_callable: Callable[[Any, date], bool] | None = None,\n ) -> None:\n self.__id = id\n self.metadata = metadata\n self.metadata.update(\n {\"last_update\": datetime(1970, 1, 1, 0, 0, 0, tzinfo=tz_shanghai)}\n )\n self.__update_callable = update_callable\n self.programs = []\n\n def __eq__(self, other) -> bool:\n if isinstance(other, str):\n return self.__id == other\n if isinstance(other, Channel):\n return self.__id == other.__id\n return False\n\n def __str__(self) -> str:\n return f'Channel(id={self.__id}, name={self.metadata[\"name\"]}, {len(self.programs)} programs)'\n\n @property\n def id(self) -> str:\n return self.__id\n\n @id.setter\n def id(self, value: str) -> None:\n raise AttributeError(\"Cannot set attribute 'id'\")\n\n def update(self, date: date = datetime.today().date()) -> bool | tuple:\n \"\"\"\n Update channel with new data for the given date.\n\n Args:\n date (date): The date for which to update the model. Defaults to today's date.\n\n Returns:\n bool: True if the update was successful, False otherwise.\n \"\"\"\n if self.__update_callable is not None:\n update_result = self.__update_callable(self, date)\n return update_result\n return False\n\n def now_playing(self, now: datetime = datetime.now()) -> Program | None:\n \"\"\"\n Get the program that is currently playing.\n\n Args:\n now (datetime): The time to check. Defaults to current time.\n\n Returns:\n Program: The program that is currently playing, or None if no program is playing.\n \"\"\"\n for program in self.programs:\n if (\n program.start_time.astimezone()\n <= now.astimezone()\n <= program.end_time.astimezone()\n ):\n return program\n return None\n\n def next_program(self, now: datetime = datetime.now()) -> Program | None:\n \"\"\"\n Get the next program.\n\n Args:\n now (datetime): The time to check. Defaults to current time.\n\n Returns:\n Program: The next program, or None if there is no next program.\n \"\"\"\n self.programs.sort(key=lambda x: x.start_time)\n for program in self.programs:\n if program.start_time.astimezone() > now.astimezone():\n return program\n return None\n\n def flush(self, date) -> None:\n \"\"\"\n Flush all programs of date\n \"\"\"\n self.programs = [\n program for program in self.programs if program.start_time.date() != date\n ]\n return None" }, { "identifier": "Program", "path": "epg/model.py", "snippet": "class Program:\n \"\"\"\n Program model.\n\n Attributes:\n title (str): The program title.\n start_time (datetime): The program start time.\n end_time (datetime): The program end time.\n channel (str): The channel id.\n desc (str): The program description.\n episode (str): The program episode.\n \"\"\"\n\n def __init__(\n self,\n title: str,\n start_time: datetime,\n end_time: datetime,\n channel_id: str,\n desc: str = \"\",\n episode: str = \"\",\n sub_title: str = \"\",\n ) -> None:\n self.title = title\n self.sub_title = sub_title\n self.start_time = start_time\n self.end_time = end_time\n self.desc = desc\n self.episode = episode\n self.channel = channel_id\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Program):\n return (\n self.start_time == other.start_time\n and self.end_time == other.end_time\n and self.title == other.title\n )\n return False\n\n def __hash__(self) -> int:\n return hash((self.start_time, self.end_time, self.title))\n\n def __str__(self) -> str:\n return (\n f\"Program({self.channel}, {self.title}, {self.start_time}, {self.end_time})\"\n )" } ]
from .__weibo_search import search as weibo_search from .__weibo_search import headers from datetime import date, datetime, timedelta from epg.model import Channel, Program import re import requests import json
1,739
keyword = "#每日央视纪录片精选#" def update_programs(programs: list[Program], programs_new: list[Program]) -> int: """ Update programs with new programs. Args: programs (list[Program]): The programs to update. programs_new (list[Program]): The new programs. Returns: int: The number of programs updated. """ num_updated_programs = 0 for program_new in programs_new: for program in programs: if program_new.start_time - program.start_time < timedelta(minutes=5): program.title = program_new.title num_updated_programs += 1 break return num_updated_programs
keyword = "#每日央视纪录片精选#" def update_programs(programs: list[Program], programs_new: list[Program]) -> int: """ Update programs with new programs. Args: programs (list[Program]): The programs to update. programs_new (list[Program]): The new programs. Returns: int: The number of programs updated. """ num_updated_programs = 0 for program_new in programs_new: for program in programs: if program_new.start_time - program.start_time < timedelta(minutes=5): program.title = program_new.title num_updated_programs += 1 break return num_updated_programs
def update(channel: Channel, date: date) -> int:
2
2023-10-20 04:35:12+00:00
4k
Aggify/aggify
tests/test_aggify.py
[ { "identifier": "Aggify", "path": "aggify/aggify.py", "snippet": "def last_out_stage_check(method: AggifyType) -> AggifyType:\n def decorator(*args, **kwargs):\n def __init__(self, base_model: Type[Document]):\n def __iter__(self):\n def project(self, **kwargs: QueryParams) -> \"Aggify\":\n def group(self, expression: Union[str, Dict, List, None] = \"id\") -> \"Aggify\":\n def order_by(self, *order_fields: Union[str, List[str]]) -> \"Aggify\":\n def raw(self, raw_query: dict) -> \"Aggify\":\n def add_fields(self, **fields) -> \"Aggify\": # noqa\n def filter(\n self, arg: Union[Q, None] = None, **kwargs: Union[QueryParams, F, list]\n ) -> \"Aggify\":\n def out(self, coll: str, db: Union[str, None] = None) -> \"Aggify\":\n def __to_aggregate(self, query: Dict[str, Any]) -> None:\n def __getitem__(self, index: Union[slice, int]) -> \"Aggify\":\n def unwind(\n self,\n path: str,\n include_array_index: Union[str, None] = None,\n preserve: bool = False,\n ) -> \"Aggify\":\n def annotate(\n self,\n annotate_name: Union[str, None] = None,\n accumulator: Union[str, None] = None,\n f: Union[Union[str, Dict], F, int, None] = None,\n **kwargs,\n ) -> \"Aggify\":\n def _get_field_type_and_accumulator(\n accumulator: str,\n ) -> Tuple[Type, str]:\n def _get_annotate_value(self, f: Union[F, str]) -> Union[Dict, str]:\n def _do_annotate_with_expression(\n annotate: Dict[str, Dict[str, Any]], base_model_fields: Dict[str, Any]\n ) -> Tuple[Dict[str, Dict[str, Any]], List[str]]:\n def __match(self, matches: Dict[str, Any]):\n def __lookup(\n from_collection: str, local_field: str, as_name: str, foreign_field: str = \"_id\"\n ) -> Dict[str, Dict[str, str]]:\n def __combine_sequential_matches(self) -> List[Dict[str, Union[dict, Any]]]:\n def get_field_name_recursively(\n self, field: str, base: Union[CollectionType, None] = None\n ) -> str:\n def lookup(\n self,\n from_collection: CollectionType,\n as_name: str,\n query: Union[List[Q], Union[Q, None], List[\"Aggify\"]] = None,\n let: Union[List[str], None] = None,\n local_field: Union[str, None] = None,\n foreign_field: Union[str, None] = None,\n raw_let: Union[Dict, None] = None,\n ) -> \"Aggify\":\n def get_model_field(model: Type[Document], field: str) -> mongoengine_fields:\n def _replace_base(self, embedded_field) -> str:\n def replace_root(\n self, *, embedded_field: str, merge: Union[Dict, None] = None\n ) -> \"Aggify\":\n def replace_with(\n self, *, embedded_field: str, merge: Union[Dict, None] = None\n ) -> \"Aggify\":\n def redact(self, value1, condition, value2, then_value, else_value):\n def clean_then_else(_then_value, _else_value):\nclass Aggify:" }, { "identifier": "Cond", "path": "aggify/compiler.py", "snippet": "class Cond:\n \"\"\"\n input: Cond(23, '>', 20, 'hi', 'bye')\n return: {'$cond': {'if': {'$gt': [23, 20]}, 'then': 'hi', 'else': 'bye'}}\n \"\"\"\n\n OPERATOR_MAPPING = {\n \">\": \"$gt\",\n \">=\": \"$gte\",\n \"<\": \"$lt\",\n \"<=\": \"$lte\",\n \"==\": \"$eq\",\n \"!=\": \"$ne\",\n }\n\n def __init__(self, value1, condition, value2, then_value, else_value):\n self.value1 = value1\n self.value2 = value2\n self.condition = self._map_condition(condition)\n self.then_value = then_value\n self.else_value = else_value\n\n def _map_condition(self, condition):\n if condition in self.OPERATOR_MAPPING:\n return self.OPERATOR_MAPPING[condition]\n raise InvalidOperator(condition)\n\n def __iter__(self):\n \"\"\"Iterator used by `dict` to create a dictionary from a `Cond` object\n\n With this method we are now able to do this:\n c = Cond(...)\n dict_of_c = dict(c)\n\n instead of c.to_dict()\n\n Returns:\n A tuple of '$cond' and its value\n \"\"\"\n yield (\n \"$cond\",\n {\n \"if\": {self.condition: [self.value1, self.value2]},\n \"then\": self.then_value,\n \"else\": self.else_value,\n },\n )" }, { "identifier": "AggifyValueError", "path": "aggify/exceptions.py", "snippet": "class AggifyValueError(AggifyBaseException):\n def __init__(self, expected_list: List[Type], result: Type):\n self.message = (\n f\"Input is not correctly passed, expected either of {[expected for expected in expected_list]}\"\n f\"but got {result}\"\n )\n self.expecteds = expected_list\n self.result = result\n super().__init__(self.message)" }, { "identifier": "AnnotationError", "path": "aggify/exceptions.py", "snippet": "class AnnotationError(InvalidPipelineStageError):\n pass" }, { "identifier": "OutStageError", "path": "aggify/exceptions.py", "snippet": "class OutStageError(InvalidPipelineStageError):\n def __init__(self, stage):\n self.message = (\n f\"You cannot add a {self!r} pipeline after $out stage! stage : {stage}\"\n )\n super().__init__(self.message)" }, { "identifier": "InvalidArgument", "path": "aggify/exceptions.py", "snippet": "class InvalidArgument(AggifyBaseException):\n def __init__(self, expected_list: list):\n self.message = f\"Input is not correctly passed, expected {[expected for expected in expected_list]}\"\n self.expecteds = expected_list\n super().__init__(self.message)" }, { "identifier": "InvalidField", "path": "aggify/exceptions.py", "snippet": "class InvalidField(AggifyBaseException):\n def __init__(self, field: str):\n self.message = f\"Field {field} does not exists.\"\n super().__init__(self.message)" }, { "identifier": "InvalidOperator", "path": "aggify/exceptions.py", "snippet": "class InvalidOperator(AggifyBaseException):\n def __init__(self, operator: str):\n self.message = f\"Operator {operator} does not exists, please refer to documentation to see all supported operators.\"\n super().__init__(self.message)" }, { "identifier": "AlreadyExistsField", "path": "aggify/exceptions.py", "snippet": "class AlreadyExistsField(AggifyBaseException):\n def __init__(self, field: str):\n self.message = f\"Field {field} already exists.\"\n super().__init__(self.message)" }, { "identifier": "InvalidEmbeddedField", "path": "aggify/exceptions.py", "snippet": "class InvalidEmbeddedField(AggifyBaseException):\n def __init__(self, field: str):\n self.message = f\"Field {field} is not embedded.\"\n super().__init__(self.message)" }, { "identifier": "MongoIndexError", "path": "aggify/exceptions.py", "snippet": "class MongoIndexError(AggifyBaseException):\n def __init__(self):\n self.message = \"Index error is invalid, please use int or slice without step!\"\n super().__init__(self.message)" }, { "identifier": "InvalidProjection", "path": "aggify/exceptions.py", "snippet": "class InvalidProjection(AggifyBaseException):\n def __init__(self):\n self.message = \"You can't use inclusion and exclusion together.\"\n super().__init__(self.message)" }, { "identifier": "InvalidAnnotateExpression", "path": "aggify/exceptions.py", "snippet": "class InvalidAnnotateExpression(AggifyBaseException):\n def __init__(self):\n self.message = \"Invalid expression passed to annotate.\"\n super().__init__(self.message)" } ]
import pytest from mongoengine import Document, IntField, StringField from aggify import Aggify, Cond, F, Q from aggify.exceptions import ( AggifyValueError, AnnotationError, OutStageError, InvalidArgument, InvalidField, InvalidOperator, AlreadyExistsField, InvalidEmbeddedField, MongoIndexError, InvalidProjection, InvalidAnnotateExpression, )
2,565
class BaseModel(Document): # Define your fields here name = StringField(max_length=100) age = IntField() meta = {"allow_inheritance": True, "abstract": True} # This defines a base document model for MongoDB using MongoEngine, with 'name' and 'age' fields. # The 'allow_inheritance' and 'abstract' options ensure it's used as a base class for other documents. class TestAggify: def test__getitem__zero(self): aggify = Aggify(BaseModel) assert aggify[0] def test__getitem__slice(self): aggify = Aggify(BaseModel) thing = aggify[0:10] assert isinstance(thing, Aggify) assert thing.pipelines[-1]["$limit"] == 10 assert thing.pipelines[-2]["$skip"] == 0 def test__getitem__value_error(self): with pytest.raises(AggifyValueError) as err: Aggify(BaseModel)["hello"] # type: ignore # noqa assert "str" in err.__str__(), "wrong type was not detected" def test_filtering_and_projection(self): aggify = Aggify(BaseModel) aggify.filter(age__gte=30).project(name=1, age=1) assert len(aggify.pipelines) == 2 assert aggify.pipelines[1]["$project"] == {"name": 1, "age": 1} assert list(aggify.base_model._fields.keys()) == ["name", "age", "id"] def test_filtering_and_projection_with_deleting_id(self): aggify = Aggify(BaseModel) aggify.filter(age__gte=30).project(name=1, age=1, id=0) assert len(aggify.pipelines) == 2 assert aggify.pipelines[1]["$project"] == {"_id": 0, "name": 1, "age": 1} assert list(aggify.base_model._fields.keys()) == ["name", "age"] def test_filtering_and_ordering(self): aggify = Aggify(BaseModel) aggify.filter(age__gte=30).order_by("-age") assert len(aggify.pipelines) == 2 assert aggify.pipelines[1]["$sort"] == {"age": -1} # Test multiple filters and complex conditions def test_multiple_filters_and_conditions(self): aggify = Aggify(BaseModel) age = F("age") * 2
class BaseModel(Document): # Define your fields here name = StringField(max_length=100) age = IntField() meta = {"allow_inheritance": True, "abstract": True} # This defines a base document model for MongoDB using MongoEngine, with 'name' and 'age' fields. # The 'allow_inheritance' and 'abstract' options ensure it's used as a base class for other documents. class TestAggify: def test__getitem__zero(self): aggify = Aggify(BaseModel) assert aggify[0] def test__getitem__slice(self): aggify = Aggify(BaseModel) thing = aggify[0:10] assert isinstance(thing, Aggify) assert thing.pipelines[-1]["$limit"] == 10 assert thing.pipelines[-2]["$skip"] == 0 def test__getitem__value_error(self): with pytest.raises(AggifyValueError) as err: Aggify(BaseModel)["hello"] # type: ignore # noqa assert "str" in err.__str__(), "wrong type was not detected" def test_filtering_and_projection(self): aggify = Aggify(BaseModel) aggify.filter(age__gte=30).project(name=1, age=1) assert len(aggify.pipelines) == 2 assert aggify.pipelines[1]["$project"] == {"name": 1, "age": 1} assert list(aggify.base_model._fields.keys()) == ["name", "age", "id"] def test_filtering_and_projection_with_deleting_id(self): aggify = Aggify(BaseModel) aggify.filter(age__gte=30).project(name=1, age=1, id=0) assert len(aggify.pipelines) == 2 assert aggify.pipelines[1]["$project"] == {"_id": 0, "name": 1, "age": 1} assert list(aggify.base_model._fields.keys()) == ["name", "age"] def test_filtering_and_ordering(self): aggify = Aggify(BaseModel) aggify.filter(age__gte=30).order_by("-age") assert len(aggify.pipelines) == 2 assert aggify.pipelines[1]["$sort"] == {"age": -1} # Test multiple filters and complex conditions def test_multiple_filters_and_conditions(self): aggify = Aggify(BaseModel) age = F("age") * 2
aggify.filter(Q(name="John") | Q(name="Alice")).project(
0
2023-10-22 07:53:28+00:00
4k
sotopia-lab/sotopia
lmlib/serve/lm_inference.py
[ { "identifier": "SeparatorStyle", "path": "lmlib/utils/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n DOLLY = auto()\n OASST_PYTHIA = auto()\n BAIZE = auto()\n def get_prompt(self) -> str:\n def append_message(self, role: str, message: str) -> None:\n def to_gradio_chatbot(self) -> list[list[str | None]]:\n def copy(self) -> \"Conversation\":\n def dict(self) -> dict[str, Any]:\ndef get_default_conv_template(model_name: str) -> Conversation:\ndef compute_skip_echo_len(\n conv: Conversation, prompt: str, is_chatglm: bool = False\n) -> int:" }, { "identifier": "load_json", "path": "lmlib/utils/data_utils.py", "snippet": "def load_json(path: str) -> dict[str, Any]:\n with open(path, \"r\", encoding=\"utf8\") as fh:\n content: dict[str, Any] = json.load(fh)\n return content" } ]
import abc import os import os.path as osp import warnings import torch import pdb from typing import Any, Dict, List, Optional, Tuple, Union from logzero import logger from peft import PeftModel, set_peft_model_state_dict from transformers import LlamaForCausalLM # type: ignore[attr-defined] from transformers import LlamaTokenizer # type: ignore[attr-defined] from lmlib.utils.conversation import ( SeparatorStyle, compute_skip_echo_len, conv_templates, get_default_conv_template, ) from lmlib.utils.data_utils import load_json from lmlib.model_tools import ( load_and_cache_large_model, load_and_cache_model, ) from argparse import Namespace from peft import get_peft_model from transformers.trainer_utils import get_last_checkpoint
2,307
out = model( input_ids=torch.as_tensor([input_ids], device=device), use_cache=True, encoder_outputs=encoder_outputs, decoder_input_ids=torch.as_tensor( [[token]], device=device ), past_key_values=past_key_values, ) logits = out.logits past_key_values = out.past_key_values else: out = model( input_ids=torch.as_tensor([[token]], device=device), use_cache=True, past_key_values=past_key_values, ) logits = out.logits past_key_values = out.past_key_values last_token_logits = logits[0][-1] if device == "mps": # Switch to CPU by avoiding some bugs in mps backend. last_token_logits = last_token_logits.float().to("cpu") if temperature < 1e-4: token = int(torch.argmax(last_token_logits)) else: probs = torch.softmax(last_token_logits / temperature, dim=-1) token = int(torch.multinomial(probs, num_samples=1)) output_ids.append(token) if token in stop_token_ids: stopped = True else: stopped = False if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped: output = tokenizer.decode(output_ids, skip_special_tokens=True) if stop_str: pos = output.rfind(stop_str, l_prompt) if pos != -1: output = output[:pos] stopped = True yield output if stopped: break del past_key_values class ChatIO(abc.ABC): @abc.abstractmethod def prompt_for_input(self, role: str) -> str: """Prompt for input from a role.""" @abc.abstractmethod def prompt_for_output(self, role: str) -> None: """Prompt for output from a role.""" @abc.abstractmethod def stream_output(self, output_stream: Any, skip_echo_len: int) -> str: """Stream output.""" def chat_loop( args: Any, model_path: str, cache_dir: str, device: str, num_gpus: str, max_gpu_memory: str, load_8bit: bool, conv_template: Optional[str], temperature: float, max_new_tokens: int, chatio: ChatIO, debug: bool, ) -> None: # Model int8 = args.load_8bit lora_path = args.lora_weight_path model, tokenizer = load_gen_model( model_path, cache_dir, large_model=int8, device="cuda:0", lora_path=lora_path, ) is_chatglm = False # "chatglm" in str(type(model)).lower() # Chat if conv_template: conv = conv_templates[conv_template].copy() else: conv = get_default_conv_template("vicuna_character").copy() # import pdb; pdb.set_trace() def chat() -> None: while True: try: inp = chatio.prompt_for_input(conv.roles[0]) except EOFError: inp = "" if not inp: print("exit...") break # import pdb; pdb.set_trace() conv.append_message(conv.roles[0], inp) conv.append_message(conv.roles[1], "") # if is_chatglm: # prompt = conv.messages[conv.offset :] # generate_stream_func = chatglm_generate_stream # else: generate_stream_func = generate_stream prompt = conv.get_prompt()
"""Inference for FastChat models.""" # try: # from transformers import ( # AutoModel, # AutoModelForCausalLM, # AutoModelForSeq2SeqLM, # LlamaForCausalLM, # LlamaTokenizer, # ) # except ImportError: # from transformers import ( # AutoModelForCausalLM, # LLaMATokenizer, # LLamaForCausalLM, # AutoModel, # AutoModelForSeq2SeqLM, # ) # from lmlib.serve.compression import compress_module # from lmlib.serve.monkey_patch_non_inplace import ( # replace_llama_attn_with_non_inplace_operations, # ) # from lmlib.serve.serve_chatglm import chatglm_generate_stream def get_gpu_memory(max_gpus: Union[int, None] = None) -> list[float]: gpu_memory = [] num_gpus = ( torch.cuda.device_count() if max_gpus is None else min(max_gpus, torch.cuda.device_count()) ) for gpu_id in range(num_gpus): with torch.cuda.device(gpu_id): device = torch.cuda.current_device() gpu_properties = torch.cuda.get_device_properties(device) total_memory = gpu_properties.total_memory / (1024**3) # type: ignore allocated_memory = torch.cuda.memory_allocated() / (1024**3) available_memory = total_memory - allocated_memory gpu_memory.append(available_memory) return gpu_memory def load_gen_model( gen_model_name: str, cache_dir: str, large_model: bool = False, device: str = "cuda", lora_path: Union[str, None] = None, margs: Dict[str, Any] = {}, ) -> Any: if large_model: gen_model = load_and_cache_large_model( gen_model_name, cache_dir=cache_dir, device=device ) else: # margs = {"revision": "float16", "torch_dtype": torch.float16, "low_cpu_mem_usage": True} gen_model = load_and_cache_model( gen_model_name, cache_dir=cache_dir, margs=margs ) gen_model = gen_model.to(device) if lora_path is not None: logger.info(f"load lora from {lora_path}") lora_config_json = load_json( osp.join(lora_path, "adapter_config.json") ) lora_config = Namespace(**lora_config_json) gen_model = get_peft_model(gen_model, lora_config) ckpt_path = get_last_checkpoint(lora_path) if ckpt_path is None: gen_model = PeftModel.from_pretrained( gen_model, lora_path, torch_dtype=torch.float16, ) else: checkpoint_name = os.path.join(ckpt_path, "pytorch_model.bin") adapter_weigths = torch.load(checkpoint_name) set_peft_model_state_dict(gen_model, adapter_weigths) gen_tokenizer = LlamaTokenizer.from_pretrained( gen_model_name, cache_dir=cache_dir ) # use the vicuna version of special tokens gen_tokenizer.add_special_tokens( { "bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<unk>", } ) return gen_model, gen_tokenizer @torch.inference_mode() def generate_stream( model: Any, tokenizer: Any, params: Any, device: str, context_len: int = 2048, stream_interval: int = 2, ) -> Any: prompt = params["prompt"] l_prompt = len(prompt) temperature = float(params.get("temperature", 1.0)) max_new_tokens = int(params.get("max_new_tokens", 256)) stop_str = params.get("stop", None) stop_token_ids = params.get("stop_ids", [tokenizer.eos_token_id]) input_ids = tokenizer(prompt).input_ids output_ids = list(input_ids) max_src_len = context_len - max_new_tokens - 8 input_ids = input_ids[-max_src_len:] token = 0 for i in range(max_new_tokens): if i == 0: if model.config.is_encoder_decoder: encoder_outputs = model.encoder( input_ids=torch.as_tensor([input_ids], device=device) ) out = model( torch.as_tensor([input_ids], device=device), decoder_input_ids=torch.as_tensor( [[model.generation_config.decoder_start_token_id]], device=device, ), encoder_outputs=encoder_outputs, use_cache=True, ) logits = out.logits past_key_values = out.past_key_values else: out = model( torch.as_tensor([input_ids], device=device), use_cache=True ) logits = out.logits past_key_values = out.past_key_values else: if model.config.is_encoder_decoder: out = model( input_ids=torch.as_tensor([input_ids], device=device), use_cache=True, encoder_outputs=encoder_outputs, decoder_input_ids=torch.as_tensor( [[token]], device=device ), past_key_values=past_key_values, ) logits = out.logits past_key_values = out.past_key_values else: out = model( input_ids=torch.as_tensor([[token]], device=device), use_cache=True, past_key_values=past_key_values, ) logits = out.logits past_key_values = out.past_key_values last_token_logits = logits[0][-1] if device == "mps": # Switch to CPU by avoiding some bugs in mps backend. last_token_logits = last_token_logits.float().to("cpu") if temperature < 1e-4: token = int(torch.argmax(last_token_logits)) else: probs = torch.softmax(last_token_logits / temperature, dim=-1) token = int(torch.multinomial(probs, num_samples=1)) output_ids.append(token) if token in stop_token_ids: stopped = True else: stopped = False if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped: output = tokenizer.decode(output_ids, skip_special_tokens=True) if stop_str: pos = output.rfind(stop_str, l_prompt) if pos != -1: output = output[:pos] stopped = True yield output if stopped: break del past_key_values class ChatIO(abc.ABC): @abc.abstractmethod def prompt_for_input(self, role: str) -> str: """Prompt for input from a role.""" @abc.abstractmethod def prompt_for_output(self, role: str) -> None: """Prompt for output from a role.""" @abc.abstractmethod def stream_output(self, output_stream: Any, skip_echo_len: int) -> str: """Stream output.""" def chat_loop( args: Any, model_path: str, cache_dir: str, device: str, num_gpus: str, max_gpu_memory: str, load_8bit: bool, conv_template: Optional[str], temperature: float, max_new_tokens: int, chatio: ChatIO, debug: bool, ) -> None: # Model int8 = args.load_8bit lora_path = args.lora_weight_path model, tokenizer = load_gen_model( model_path, cache_dir, large_model=int8, device="cuda:0", lora_path=lora_path, ) is_chatglm = False # "chatglm" in str(type(model)).lower() # Chat if conv_template: conv = conv_templates[conv_template].copy() else: conv = get_default_conv_template("vicuna_character").copy() # import pdb; pdb.set_trace() def chat() -> None: while True: try: inp = chatio.prompt_for_input(conv.roles[0]) except EOFError: inp = "" if not inp: print("exit...") break # import pdb; pdb.set_trace() conv.append_message(conv.roles[0], inp) conv.append_message(conv.roles[1], "") # if is_chatglm: # prompt = conv.messages[conv.offset :] # generate_stream_func = chatglm_generate_stream # else: generate_stream_func = generate_stream prompt = conv.get_prompt()
skip_echo_len = compute_skip_echo_len(conv, prompt)
0
2023-10-23 19:47:26+00:00
4k
Zai-Kun/reverse-engineered-chatgpt
re_gpt/async_chatgpt.py
[ { "identifier": "BackendError", "path": "re_gpt/errors.py", "snippet": "class BackendError(Exception):\n def __init__(self, error_code):\n self.error_code = error_code\n self.message = (\n f\"An error occurred on the backend. Error code: {self.error_code}\"\n )\n super().__init__(self.message)" }, { "identifier": "InvalidSessionToken", "path": "re_gpt/errors.py", "snippet": "class InvalidSessionToken(Exception):\n def __init__(self):\n self.message = \"Invalid session token provided.\"\n super().__init__(self.message)" }, { "identifier": "RetryError", "path": "re_gpt/errors.py", "snippet": "class RetryError(Exception):\n def __init__(self, website, message=\"Exceeded maximum retries\"):\n self.website = website\n self.message = f\"{message} for website: {website}\"\n super().__init__(self.message)" }, { "identifier": "TokenNotProvided", "path": "re_gpt/errors.py", "snippet": "class TokenNotProvided(Exception):\n def __init__(self):\n self.message = \"Token not provided. Please pass your '__Secure-next-auth.session-token' as an argument (e.g., ChatGPT.init(session_token=YOUR_TOKEN)).\"\n super().__init__(self.message)" }, { "identifier": "UnexpectedResponseError", "path": "re_gpt/errors.py", "snippet": "class UnexpectedResponseError(Exception):\n def __init__(self, original_exception, server_response):\n self.original_exception = original_exception\n self.server_response = server_response\n self.message = f\"An unexpected error occurred. Error message: {self.original_exception}.\\nThis is what the server returned: {self.server_response}.\"\n super().__init__(self.message)" }, { "identifier": "InvalidModelName", "path": "re_gpt/errors.py", "snippet": "class InvalidModelName(Exception):\n def __init__(self, model, avalible_models):\n self.model = model\n self.avalible_models = avalible_models\n self.message = f'\"{model}\" is not a valid model. Avalible models: {[model for model in avalible_models]}'\n super().__init__(self.message)" }, { "identifier": "async_get_binary_path", "path": "re_gpt/utils.py", "snippet": "async def async_get_binary_path(session):\n if binary_path is None:\n return None\n\n if not os.path.exists(funcaptcha_bin_folder_path) or not os.path.isdir(\n funcaptcha_bin_folder_path\n ):\n os.mkdir(funcaptcha_bin_folder_path)\n\n if os.path.isfile(binary_path):\n try:\n local_binary_hash = calculate_file_md5(binary_path)\n response = await session.get(latest_release_url)\n json_data = response.json()\n\n for line in json_data[\"body\"].splitlines():\n if line.startswith(current_os):\n latest_binary_hash = line.split(\"=\")[-1]\n break\n\n if local_binary_hash != latest_binary_hash:\n file_url = get_file_url(json_data)\n\n await async_download_binary(session, binary_path, file_url)\n except:\n return binary_path\n else:\n response = await session.get(latest_release_url)\n json_data = response.json()\n file_url = get_file_url(json_data)\n\n await async_download_binary(session, binary_path, file_url)\n\n return binary_path" }, { "identifier": "get_model_slug", "path": "re_gpt/utils.py", "snippet": "def get_model_slug(chat):\n for _, message in chat.get(\"mapping\", {}).items():\n if \"message\" in message:\n role = message[\"message\"][\"author\"][\"role\"]\n if role == \"assistant\":\n return message[\"message\"][\"metadata\"][\"model_slug\"]" } ]
import asyncio import ctypes import inspect import json import uuid from typing import AsyncGenerator, Callable, Optional from curl_cffi.requests import AsyncSession from .errors import ( BackendError, InvalidSessionToken, RetryError, TokenNotProvided, UnexpectedResponseError, InvalidModelName, ) from .utils import async_get_binary_path, get_model_slug
2,643
payload (dict): Payload containing message information. Yields: bytes: Chunk of data received as a response. """ response_queue = asyncio.Queue() async def perform_request(): def content_callback(chunk): response_queue.put_nowait(chunk) url = CHATGPT_API.format("conversation") await self.chatgpt.session.post( url=url, headers=self.chatgpt.build_request_headers(), json=payload, content_callback=content_callback, ) await response_queue.put(None) asyncio.create_task(perform_request()) while True: chunk = await response_queue.get() if chunk is None: break yield chunk async def build_message_payload(self, user_input: str) -> dict: """ Build a payload for sending a user message. Returns: dict: Payload containing message information. """ if self.conversation_id and (self.parent_id is None or self.model is None): await self.fetch_chat() # it will automatically fetch the chat and set the parent id payload = { "conversation_mode": {"conversation_mode": {"kind": "primary_assistant"}}, "conversation_id": self.conversation_id, "action": "next", "arkose_token": await self.arkose_token_generator() if self.chatgpt.generate_arkose_token or MODELS[self.model]["needs_arkose_token"] else None, "force_paragen": False, "history_and_training_disabled": False, "messages": [ { "author": {"role": "user"}, "content": {"content_type": "text", "parts": [user_input]}, "id": str(uuid.uuid4()), "metadata": {}, } ], "model": MODELS[self.model]["slug"], "parent_message_id": str(uuid.uuid4()) if not self.parent_id else self.parent_id, } return payload async def build_message_continuation_payload(self) -> dict: """ Build a payload for continuing ChatGPT's cut off response. Returns: dict: Payload containing message information for continuation. """ payload = { "conversation_mode": {"conversation_mode": {"kind": "primary_assistant"}}, "action": "continue", "arkose_token": await self.arkose_token_generator() if self.chatgpt.generate_arkose_token or MODELS[self.model]["needs_arkose_token"] else None, "conversation_id": self.conversation_id, "force_paragen": False, "history_and_training_disabled": False, "model": MODELS[self.model]["slug"], "parent_message_id": self.parent_id, "timezone_offset_min": -300, } return payload async def arkose_token_generator(self) -> str: """ Generate an Arkose token. Returns: str: Arkose token. """ if not self.chatgpt.tried_downloading_binary: self.chatgpt.binary_path = await async_get_binary_path(self.chatgpt.session) if self.chatgpt.binary_path: self.chatgpt.arkose = ctypes.CDLL(self.chatgpt.binary_path) self.chatgpt.arkose.GetToken.restype = ctypes.c_char_p self.chatgpt.tried_downloading_binary = True if self.chatgpt.binary_path: try: result = self.chatgpt.arkose.GetToken() return ctypes.string_at(result).decode("utf-8") except: pass for _ in range(5): response = await self.chatgpt.session.get(BACKUP_ARKOSE_TOKEN_GENERATOR) if response.text == "null": raise BackendError(error_code=505) try: return response.json()["token"] except: await asyncio.sleep(0.7)
# Constants USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36" CHATGPT_API = "https://chat.openai.com/backend-api/{}" BACKUP_ARKOSE_TOKEN_GENERATOR = "https://arkose-token-generator.zaieem.repl.co/token" MODELS = { "gpt-4": {"slug": "gpt-4", "needs_arkose_token": True}, "gpt-3.5": {"slug": "text-davinci-002-render-sha", "needs_arkose_token": False}, } class AsyncConversation: def __init__(self, chatgpt, conversation_id=None, model=None): self.chatgpt = chatgpt self.conversation_id = conversation_id self.parent_id = None self.model = model async def fetch_chat(self) -> dict: """ Fetches the chat of the conversation from the API. Returns: dict: The JSON response from the API containing the chat if the conversation_id is not none, else returns an empty dict. Raises: UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format """ if not self.conversation_id: return {} url = CHATGPT_API.format(f"conversation/{self.conversation_id}") response = await self.chatgpt.session.get( url=url, headers=self.chatgpt.build_request_headers() ) error = None try: chat = response.json() self.parent_id = list(chat.get("mapping", {}))[-1] model_slug = get_model_slug(chat) self.model = [ key for key, value in MODELS.items() if value["slug"] == model_slug ][0] except Exception as e: error = e if error is not None: raise UnexpectedResponseError(error, response.text) return chat async def chat(self, user_input: str) -> AsyncGenerator[dict, None]: """ As the name implies, chat with ChatGPT. Args: user_input (str): The user's input message. Yields: dict: A dictionary representing assistant responses. Returns: AsyncGenerator[dict, None]: An asynchronous generator object that yields assistant responses. Raises: UnexpectedResponseError: If the response is not a valid JSON object or if the response json is not in the expected format """ payload = await self.build_message_payload(user_input) server_response = ( "" # To store what the server returned for debugging in case of an error ) error = None try: full_message = None while True: response = self.send_message(payload=payload) async for chunk in response: decoded_chunk = chunk.decode() server_response += decoded_chunk for line in decoded_chunk.splitlines(): if not line.startswith("data: "): continue raw_json_data = line[6:] if not (decoded_json := self.decode_raw_json(raw_json_data)): continue if ( "message" in decoded_json and decoded_json["message"]["author"]["role"] == "assistant" ): processed_response = self.filter_response(decoded_json) if full_message: prev_resp_len = len( full_message["message"]["content"]["parts"][0] ) processed_response["content"] = processed_response[ "content" ][prev_resp_len::] yield processed_response full_message = decoded_json self.conversation_id = full_message["conversation_id"] self.parent_id = full_message["message"]["id"] if ( full_message["message"]["metadata"]["finish_details"]["type"] == "max_tokens" ): payload = await self.build_message_continuation_payload() else: break except Exception as e: error = e # raising the error outside the 'except' block to prevent the 'During handling of the above exception, another exception occurred' error if error is not None: raise UnexpectedResponseError(error, server_response) async def send_message(self, payload: dict) -> AsyncGenerator[bytes, None]: """ Send a message payload to the server and receive the response. Args: payload (dict): Payload containing message information. Yields: bytes: Chunk of data received as a response. """ response_queue = asyncio.Queue() async def perform_request(): def content_callback(chunk): response_queue.put_nowait(chunk) url = CHATGPT_API.format("conversation") await self.chatgpt.session.post( url=url, headers=self.chatgpt.build_request_headers(), json=payload, content_callback=content_callback, ) await response_queue.put(None) asyncio.create_task(perform_request()) while True: chunk = await response_queue.get() if chunk is None: break yield chunk async def build_message_payload(self, user_input: str) -> dict: """ Build a payload for sending a user message. Returns: dict: Payload containing message information. """ if self.conversation_id and (self.parent_id is None or self.model is None): await self.fetch_chat() # it will automatically fetch the chat and set the parent id payload = { "conversation_mode": {"conversation_mode": {"kind": "primary_assistant"}}, "conversation_id": self.conversation_id, "action": "next", "arkose_token": await self.arkose_token_generator() if self.chatgpt.generate_arkose_token or MODELS[self.model]["needs_arkose_token"] else None, "force_paragen": False, "history_and_training_disabled": False, "messages": [ { "author": {"role": "user"}, "content": {"content_type": "text", "parts": [user_input]}, "id": str(uuid.uuid4()), "metadata": {}, } ], "model": MODELS[self.model]["slug"], "parent_message_id": str(uuid.uuid4()) if not self.parent_id else self.parent_id, } return payload async def build_message_continuation_payload(self) -> dict: """ Build a payload for continuing ChatGPT's cut off response. Returns: dict: Payload containing message information for continuation. """ payload = { "conversation_mode": {"conversation_mode": {"kind": "primary_assistant"}}, "action": "continue", "arkose_token": await self.arkose_token_generator() if self.chatgpt.generate_arkose_token or MODELS[self.model]["needs_arkose_token"] else None, "conversation_id": self.conversation_id, "force_paragen": False, "history_and_training_disabled": False, "model": MODELS[self.model]["slug"], "parent_message_id": self.parent_id, "timezone_offset_min": -300, } return payload async def arkose_token_generator(self) -> str: """ Generate an Arkose token. Returns: str: Arkose token. """ if not self.chatgpt.tried_downloading_binary: self.chatgpt.binary_path = await async_get_binary_path(self.chatgpt.session) if self.chatgpt.binary_path: self.chatgpt.arkose = ctypes.CDLL(self.chatgpt.binary_path) self.chatgpt.arkose.GetToken.restype = ctypes.c_char_p self.chatgpt.tried_downloading_binary = True if self.chatgpt.binary_path: try: result = self.chatgpt.arkose.GetToken() return ctypes.string_at(result).decode("utf-8") except: pass for _ in range(5): response = await self.chatgpt.session.get(BACKUP_ARKOSE_TOKEN_GENERATOR) if response.text == "null": raise BackendError(error_code=505) try: return response.json()["token"] except: await asyncio.sleep(0.7)
raise RetryError(website=BACKUP_ARKOSE_TOKEN_GENERATOR)
2
2023-10-17 08:34:04+00:00
4k
qualabs/video-headline
utils/cloudfront.py
[ { "identifier": "cloudfront_deleted", "path": "video/signals.py", "snippet": "" }, { "identifier": "Configuration", "path": "configuration/models.py", "snippet": "class Configuration(SingletonModel):\n slack_notifications_url = models.URLField(blank=True, null=True)\n cloud_front_configuration = JSONField(\n blank=True, default={}, verbose_name='CloudFront Configuration'\n )\n\n class Meta:\n verbose_name = 'Global Configuration'" }, { "identifier": "AWSAccount", "path": "organization/models/aws.py", "snippet": "class AWSAccount(models.Model):\n REGION_CHOICES = (\n ('us-east-1', 'us-east-1'),\n ('us-east-2', 'us-east-2'),\n ('us-west-1', 'us-west-1'),\n ('us-west-2', 'us-west-2'),\n ('ap-east-1', 'ap-east-1'),\n ('ap-south-1', 'ap-south-1'),\n ('ap-northeast-2', 'ap-northeast-2'),\n ('ap-southeast-1', 'ap-southeast-1'),\n ('ap-southeast-2', 'ap-southeast-2'),\n ('ap-northeast-1', 'ap-northeast-1'),\n ('ca-central-1', 'ca-central-1'),\n ('eu-central-1', 'eu-central-1'),\n ('eu-west-1', 'eu-west-1'),\n ('eu-west-2', 'eu-west-2'),\n ('eu-west-3', 'eu-west-3'),\n ('eu-north-1', 'eu-north-1'),\n ('me-south-1', 'me-south-1'),\n ('sa-east-1', 'sa-east-1'),\n )\n\n name = models.CharField(max_length=254, verbose_name='Name', blank=True, null=True)\n\n access_key = models.CharField(max_length=254,\n verbose_name='Access Key')\n secret_access_key = EncryptedCharField(max_length=254,\n verbose_name='Secret Access Key')\n region = models.CharField(max_length=254, verbose_name='Region', choices=REGION_CHOICES)\n media_convert_role = models.CharField(max_length=254, verbose_name='MediaConvert Role')\n media_convert_endpoint_url = models.URLField(max_length=254,\n verbose_name='MediaConvert Endpoint URL')\n media_live_role = models.CharField(max_length=254, verbose_name='MediaLive Role')\n account_id = models.CharField(max_length=64, verbose_name='Account Id', blank=False, null=True)\n cf_private_key = EncryptedTextField(verbose_name='CloudFront distributions private key',\n blank=True, null=True)\n cf_key_pair_id = models.CharField(max_length=100,\n verbose_name='CloudFront distributions key pair id',\n blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'AWS Account'\n verbose_name_plural = 'AWS Accounts'" } ]
import boto3 from botocore.exceptions import ClientError from celery import shared_task from django.utils import timezone from video.signals import cloudfront_deleted from configuration.models import Configuration from organization.models import AWSAccount from video.models import LiveVideo from organization.models import Channel from video.models import LiveVideo
2,395
# skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex def update_distribution(organization, dist_id, status = False): """ If Organization is deleted the associated AWS CloudFront distribution should be previously deleted too. Steps: - retrieve distribution config - find & update 'Enabled' field value to disable distribution - send updated configuration See docs on [1]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.get_distribution_config [2]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.update_distribution """ cloudfront = get_cloudfront_client(organization.aws_account) try: # GetDistributionConfig cf_config = cloudfront.get_distribution_config(Id=dist_id) cf_config_etag = cf_config['ETag'] # update 'Enabled' field to False cf_config['DistributionConfig']['Enabled'] = status return cloudfront.update_distribution( Id=dist_id, IfMatch=cf_config_etag, DistributionConfig=cf_config['DistributionConfig'] ) except ClientError as ex: # skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex def update_distribution_status(organization, dist_id, status = False): """ If Organization is deleted the associated AWS CloudFront distribution should be previously deleted too. Steps: - retrieve distribution config - find & update 'Enabled' field value to disable distribution - send updated configuration See docs on [1]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.get_distribution_config [2]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.update_distribution """ cloudfront = get_cloudfront_client(organization.aws_account) try: # GetDistributionConfig cf_config = cloudfront.get_distribution_config(Id=dist_id) cf_config_etag = cf_config['ETag'] # update 'Enabled' field to False cf_config['DistributionConfig']['Enabled'] = status return cloudfront.update_distribution( Id=dist_id, IfMatch=cf_config_etag, DistributionConfig=cf_config['DistributionConfig'] ) except ClientError as ex: # skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex def update_distribution_geoblocking(dist_id, type, location, organization): cloudfront = get_cloudfront_client(organization.aws_account) try: # GetDistributionConfig cf_config = cloudfront.get_distribution_config(Id=dist_id) cf_config_etag = cf_config['ETag'] # Geoblocking cf_config = cf_config['DistributionConfig'] cf_config['Restrictions']['GeoRestriction']['RestrictionType'] = type # Local import to avoid recursive import if type == LiveVideo.GeoType.NONE: location = [] cf_config['Restrictions']['GeoRestriction']['Quantity'] = len(location) cf_config['Restrictions']['GeoRestriction']['Items'] = location # upload new configuration return cloudfront.update_distribution( Id=dist_id, IfMatch=cf_config_etag, DistributionConfig=cf_config ) except ClientError as ex: # skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex @shared_task def delete_distributions(): """ Extract & mark for delete disabled CloudFront distributions. See docs on https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.list_distributions """ video_dist_used = list(Channel.objects.all().values_list('cf_id', flat=True)) live_dist_used = list(LiveVideo.objects.all().values_list('cf_id', flat=True)) dist_used = video_dist_used + live_dist_used
def get_cloudfront_client(aws_account): if aws_account: cloudfront = boto3.client('cloudfront', aws_access_key_id=aws_account.access_key, aws_secret_access_key=aws_account.secret_access_key, region_name=aws_account.region) else: cloudfront = boto3.client('cloudfront') return cloudfront def create_distribution(settings, organization, channel): """ Generate CloudFront distribution, use global default profile configuration. Replace conf settings, supplied values (keys): -id: origin id -domain: S3 bucker domain -target: origin Id -caller: operation unique id minimum set of target keys (from base node) in profile JSON config file: "Origins": { "Items": [{ "Id": origin id, "DomainName": S3 bucker domain }] } "DefaultCacheBehavior": { "TargetOriginId": origin Id, } "CallerReference": operation unique id" See docs on [1]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_distribution [2]: >> default profile configuration on /configuration/cloud_front_configuration.sample """ global_config = Configuration.get_solo() conf_cont = global_config.cloud_front_configuration # get Origins/Items path, update & replace values origin_conf = conf_cont['Origins']['Items'][0] origin_conf['Id'] = settings['id'] origin_conf['DomainName'] = settings['domain'] conf_cont['Origins']['Items'] = [origin_conf] # get & update TargetOriginId path conf_cont['DefaultCacheBehavior']['TargetOriginId'] = settings['target'] # get & update CallerReference path conf_cont['CallerReference'] = settings['caller'] # assign a path if specified if settings.get('path'): origin_conf['OriginPath'] = settings['path'] # change TTL of distribution if specified if settings.get('maxTTL') and settings.get('defaultTTL'): conf_cont['DefaultCacheBehavior']['DefaultTTL'] = settings['defaultTTL'] conf_cont['DefaultCacheBehavior']['MaxTTL'] = settings['maxTTL'] # Tags tags = { 'Items': [{ 'Key': 'vh:org-id', 'Value': str(organization.id), },{ 'Key': 'vh:channel-id', 'Value': str(channel.id), }] } DistributionConfigWithTags = { 'DistributionConfig': conf_cont, 'Tags': tags } # create distribution cloudfront = get_cloudfront_client(organization.aws_account) new_dist = cloudfront.create_distribution_with_tags(DistributionConfigWithTags=DistributionConfigWithTags) # recover & return new CloudFront distribution Id & DomainName return { 'cf_id': new_dist['Distribution']['Id'], 'cf_domain': new_dist['Distribution']['DomainName'] } def tag_distribution(organization, dist_id, tags): """ tags = [(key, value), ... , (key, value)] """ cloudfront = get_cloudfront_client(organization.aws_account) try: dist_arn = cloudfront.get_distribution(Id=dist_id)['Distribution']['ARN'] cloudfront.tag_resource(Resource=dist_arn, Tags={'Items': [{'Key': k, 'Value': v} for k, v in tags]}) except ClientError as ex: # skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex def update_distribution(organization, dist_id, status = False): """ If Organization is deleted the associated AWS CloudFront distribution should be previously deleted too. Steps: - retrieve distribution config - find & update 'Enabled' field value to disable distribution - send updated configuration See docs on [1]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.get_distribution_config [2]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.update_distribution """ cloudfront = get_cloudfront_client(organization.aws_account) try: # GetDistributionConfig cf_config = cloudfront.get_distribution_config(Id=dist_id) cf_config_etag = cf_config['ETag'] # update 'Enabled' field to False cf_config['DistributionConfig']['Enabled'] = status return cloudfront.update_distribution( Id=dist_id, IfMatch=cf_config_etag, DistributionConfig=cf_config['DistributionConfig'] ) except ClientError as ex: # skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex def update_distribution_status(organization, dist_id, status = False): """ If Organization is deleted the associated AWS CloudFront distribution should be previously deleted too. Steps: - retrieve distribution config - find & update 'Enabled' field value to disable distribution - send updated configuration See docs on [1]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.get_distribution_config [2]: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.update_distribution """ cloudfront = get_cloudfront_client(organization.aws_account) try: # GetDistributionConfig cf_config = cloudfront.get_distribution_config(Id=dist_id) cf_config_etag = cf_config['ETag'] # update 'Enabled' field to False cf_config['DistributionConfig']['Enabled'] = status return cloudfront.update_distribution( Id=dist_id, IfMatch=cf_config_etag, DistributionConfig=cf_config['DistributionConfig'] ) except ClientError as ex: # skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex def update_distribution_geoblocking(dist_id, type, location, organization): cloudfront = get_cloudfront_client(organization.aws_account) try: # GetDistributionConfig cf_config = cloudfront.get_distribution_config(Id=dist_id) cf_config_etag = cf_config['ETag'] # Geoblocking cf_config = cf_config['DistributionConfig'] cf_config['Restrictions']['GeoRestriction']['RestrictionType'] = type # Local import to avoid recursive import if type == LiveVideo.GeoType.NONE: location = [] cf_config['Restrictions']['GeoRestriction']['Quantity'] = len(location) cf_config['Restrictions']['GeoRestriction']['Items'] = location # upload new configuration return cloudfront.update_distribution( Id=dist_id, IfMatch=cf_config_etag, DistributionConfig=cf_config ) except ClientError as ex: # skip operation on distribution not exists operation if ex.response['Error']['Code'] == 'NoSuchDistribution': pass else: raise ex @shared_task def delete_distributions(): """ Extract & mark for delete disabled CloudFront distributions. See docs on https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.list_distributions """ video_dist_used = list(Channel.objects.all().values_list('cf_id', flat=True)) live_dist_used = list(LiveVideo.objects.all().values_list('cf_id', flat=True)) dist_used = video_dist_used + live_dist_used
for account in AWSAccount.objects.all():
2
2023-10-17 19:44:32+00:00
4k
Qualcomm-AI-research/geometric-algebra-transformer
tests/gatr/interface/test_translation.py
[ { "identifier": "embed_oriented_plane", "path": "gatr/interface/plane.py", "snippet": "def embed_oriented_plane(\n normal: torch.Tensor, position: Optional[torch.Tensor] = None\n) -> torch.Tensor:\n \"\"\"Embeds an (oriented plane) in the PGA.\n\n Following L. Dorst, the plane is represent as PGA vectors.\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n position : torch.Tensor with shape (..., 3) or None\n One position on the plane. If None, the plane goes through the origin.\n normal : torch.Tensor with shape (..., 3)\n Normal to the plane.\n\n Returns\n -------\n multivector : torch.Tensor with shape (..., 16)\n Embedding into multivector.\n \"\"\"\n\n # Create multivector tensor with same batch shape, same device, same dtype as input\n batch_shape = normal.shape[:-1]\n multivector = torch.zeros(*batch_shape, 16, dtype=normal.dtype, device=normal.device)\n\n # Embedding a plane through origin into vectors\n multivector[..., 2:5] = normal[..., :]\n\n # Shift away from origin by translating\n if position is not None:\n translation = embed_translation(position)\n inverse_translation = embed_translation(-position)\n multivector = geometric_product(\n geometric_product(translation, multivector), inverse_translation\n )\n\n return multivector" }, { "identifier": "extract_oriented_plane", "path": "gatr/interface/plane.py", "snippet": "def extract_oriented_plane(multivector: torch.Tensor) -> torch.Tensor:\n \"\"\"Extracts the normal on an oriented plane from a multivector.\n\n Currently, this function does *not* extract a support point for the plane (or the distance to\n the origin).\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n multivector : torch.Tensor with shape (..., 16)\n Embedding into multivector.\n\n Returns\n -------\n normal : torch.Tensor with shape (..., 3)\n Normal to the plane.\n \"\"\"\n\n return multivector[..., 2:5]" }, { "identifier": "embed_point", "path": "gatr/interface/point.py", "snippet": "def embed_point(coordinates: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds 3D points in multivectors.\n\n We follow the convention used in the reference below and map points to tri-vectors.\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n coordinates : torch.Tensor with shape (..., 3)\n 3D coordinates\n\n Returns\n -------\n multivector : torch.Tensor with shape (..., 16)\n Embedding into multivector.\n \"\"\"\n\n # Create multivector tensor with same batch shape, same device, same dtype as input\n batch_shape = coordinates.shape[:-1]\n multivector = torch.zeros(*batch_shape, 16, dtype=coordinates.dtype, device=coordinates.device)\n\n # Embedding into trivectors\n # Homogeneous coordinates: unphysical component / embedding dim, x_123\n multivector[..., 14] = 1.0\n multivector[..., 13] = -coordinates[..., 0] # x-coordinate embedded in x_023\n multivector[..., 12] = coordinates[..., 1] # y-coordinate embedded in x_013\n multivector[..., 11] = -coordinates[..., 2] # z-coordinate embedded in x_012\n\n return multivector" }, { "identifier": "extract_point", "path": "gatr/interface/point.py", "snippet": "def extract_point(\n multivector: torch.Tensor, divide_by_embedding_dim: bool = True, threshold: float = 1e-3\n) -> torch.Tensor:\n \"\"\"Given a multivector, extract any potential 3D point from the trivector components.\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n multivector : torch.Tensor with shape (..., 16)\n Multivector.\n divide_by_embedding_dim : bool\n Whether to divice by the embedding dim. Proper PGA etiquette would have us do this, but it\n may not be good for NN training.\n threshold : float\n Minimum value of the additional, unphysical component. Necessary to avoid exploding values\n or NaNs when this unphysical component of the homogeneous coordinates becomes small.\n\n Returns\n -------\n coordinates : torch.Tensor with shape (..., 3)\n 3D coordinates corresponding to the trivector components of the multivector.\n \"\"\"\n\n coordinates = torch.cat(\n [-multivector[..., [13]], multivector[..., [12]], -multivector[..., [11]]], dim=-1\n )\n\n # Divide by embedding dim\n if divide_by_embedding_dim:\n embedding_dim = multivector[\n ..., [14]\n ] # Embedding dimension / scale of homogeneous coordinates\n embedding_dim = torch.where(torch.abs(embedding_dim) > threshold, embedding_dim, threshold)\n coordinates = coordinates / embedding_dim\n\n return coordinates" }, { "identifier": "extract_point_embedding_reg", "path": "gatr/interface/point.py", "snippet": "def extract_point_embedding_reg(multivector: torch.Tensor) -> torch.Tensor:\n \"\"\"Given a multivector x, returns |x_{123}| - 1.\n\n Put differently, this is the deviation of the norm of a pseudoscalar component from 1.\n This can be used as a regularization term when predicting point positions, to avoid x_123 to\n be too close to 0.\n\n Parameters\n ----------\n multivector : torch.Tensor with shape (..., 16)\n Multivector.\n\n Returns\n -------\n regularization : torch.Tensor with shape (..., 1)\n |multivector_123| - 1.\n \"\"\"\n\n return torch.abs(multivector[..., [14]]) - 1.0" }, { "identifier": "embed_pseudoscalar", "path": "gatr/interface/pseudoscalar.py", "snippet": "def embed_pseudoscalar(pseudoscalars: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds a pseudoscalar tensor into multivectors.\n\n Parameters\n ----------\n pseudoscalars: torch.Tensor with shape (..., 1)\n Pseudoscalar inputs.\n\n Returns\n -------\n multivectors: torch.Tensor with shape (..., 16)\n Multivector outputs. `multivectors[..., [15]]` is the same as `pseudoscalars`.\n The other components are zero.\n \"\"\"\n\n non_scalar_shape = list(pseudoscalars.shape[:-1]) + [15]\n non_scalar_components = torch.zeros(\n non_scalar_shape, device=pseudoscalars.device, dtype=pseudoscalars.dtype\n )\n embedding = torch.cat((non_scalar_components, pseudoscalars), dim=-1)\n\n return embedding" }, { "identifier": "extract_pseudoscalar", "path": "gatr/interface/pseudoscalar.py", "snippet": "def extract_pseudoscalar(multivectors: torch.Tensor) -> torch.Tensor:\n \"\"\"Extracts pseudoscalar components from multivectors.\n\n Parameters\n ----------\n multivectors: torch.Tensor with shape (..., 16)\n Multivector inputs.\n\n Returns\n -------\n pseudoscalars: torch.Tensor with shape (..., 1)\n Pseudoscalar component of multivectors.\n \"\"\"\n\n return multivectors[..., [15]]" }, { "identifier": "embed_scalar", "path": "gatr/interface/scalar.py", "snippet": "def embed_scalar(scalars: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds a scalar tensor into multivectors.\n\n Parameters\n ----------\n scalars: torch.Tensor with shape (..., 1)\n Scalar inputs.\n\n Returns\n -------\n multivectors: torch.Tensor with shape (..., 16)\n Multivector outputs. `multivectors[..., [0]]` is the same as `scalars`. The other components\n are zero.\n \"\"\"\n\n non_scalar_shape = list(scalars.shape[:-1]) + [15]\n non_scalar_components = torch.zeros(\n non_scalar_shape, device=scalars.device, dtype=scalars.dtype\n )\n embedding = torch.cat((scalars, non_scalar_components), dim=-1)\n\n return embedding" }, { "identifier": "extract_scalar", "path": "gatr/interface/scalar.py", "snippet": "def extract_scalar(multivectors: torch.Tensor) -> torch.Tensor:\n \"\"\"Extracts scalar components from multivectors.\n\n Parameters\n ----------\n multivectors: torch.Tensor with shape (..., 16)\n Multivector inputs.\n\n Returns\n -------\n scalars: torch.Tensor with shape (..., 1)\n Scalar component of multivectors.\n \"\"\"\n\n return multivectors[..., [0]]" }, { "identifier": "embed_translation", "path": "gatr/interface/translation.py", "snippet": "def embed_translation(translation_vector: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds a 3D translation in multivectors.\n\n In our convention, a translation vector is embedded into a combination of the scalar and\n bivector components.\n\n We have (in agreement with Eq. (82) of the reference below) that\n ```\n T(t) = 1 - e_0 / 2 (t_1 e_1 + t_2 e_2 + t_3 e_3) .\n ```\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n translation_vector : torch.Tensor with shape (..., 3)\n Vectorial amount of translation.\n\n Returns\n -------\n multivector : torch.Tensor with shape (..., 16)\n Embedding into multivector.\n \"\"\"\n\n # Create multivector tensor with same batch shape, same device, same dtype as input\n batch_shape = translation_vector.shape[:-1]\n multivector = torch.zeros(\n *batch_shape, 16, dtype=translation_vector.dtype, device=translation_vector.device\n )\n\n # Embedding into trivectors\n multivector[..., 0] = 1.0 # scalar\n multivector[..., 5:8] = (\n -0.5 * translation_vector[..., :]\n ) # Translation vector embedded in x_0i with i = 1, 2, 3\n\n return multivector" }, { "identifier": "extract_translation", "path": "gatr/interface/translation.py", "snippet": "def extract_translation(\n multivector: torch.Tensor, divide_by_embedding_dim=False, threshold: float = 1e-3\n) -> torch.Tensor:\n \"\"\"Given a multivector, extract a 3D translation vector from the bivector components.\n\n References\n ----------\n Leo Dorst, \"A Guided Tour to the Plane-Based Geometric Algebra PGA\",\n https://geometricalgebra.org/downloads/PGA4CS.pdf\n\n Parameters\n ----------\n multivector : torch.Tensor with shape (..., 16)\n Multivector.\n divide_by_embedding_dim : bool\n Whether to divice by the embedding dim. Proper PGA etiquette would have us do this, but it\n may not be good for NN training.\n threshold : float\n Minimum value of the additional, unphysical component. Necessary to avoid exploding values\n or NaNs when this unphysical component of the homogeneous coordinates becomes small.\n\n Returns\n -------\n translation : torch.Tensor with shape (..., 3)\n 3D components of the translation vector.\n \"\"\"\n\n translation_vector = -2.0 * multivector[..., 5:8]\n\n # Divide by embedding dim\n if divide_by_embedding_dim:\n embedding_dim = multivector[..., [0]]\n embedding_dim = torch.where(torch.abs(embedding_dim) > threshold, embedding_dim, threshold)\n translation_vector = translation_vector / embedding_dim\n\n return translation_vector" }, { "identifier": "geometric_product", "path": "gatr/primitives/bilinear.py", "snippet": "def geometric_product(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"Computes the geometric product f(x,y) = xy.\n\n Parameters\n ----------\n x : torch.Tensor with shape (..., 16)\n First input multivector. Batch dimensions must be broadcastable between x and y.\n y : torch.Tensor with shape (..., 16)\n Second input multivector. Batch dimensions must be broadcastable between x and y.\n\n Returns\n -------\n outputs : torch.Tensor with shape (..., 16)\n Result. Batch dimensions are result of broadcasting between x, y, and coeffs.\n \"\"\"\n\n # Select kernel on correct device\n gp = _load_bilinear_basis(\"gp\", device=x.device, dtype=x.dtype)\n\n # Compute geometric product\n outputs = cached_einsum(\"i j k, ... j, ... k -> ... i\", gp, x, y)\n\n return outputs" }, { "identifier": "reverse", "path": "gatr/primitives/linear.py", "snippet": "def reverse(x: torch.Tensor) -> torch.Tensor:\n \"\"\"Computes the reversal of a multivector.\n\n The reversal has the same scalar, vector, and pseudoscalar components, but flips sign in the\n bivector and trivector components.\n\n Parameters\n ----------\n x : torch.Tensor with shape (..., 16)\n Input multivector.\n\n Returns\n -------\n outputs : torch.Tensor with shape (..., 16)\n Output multivector.\n \"\"\"\n return _compute_reversal(device=x.device, dtype=x.dtype) * x" }, { "identifier": "BATCH_DIMS", "path": "tests/helpers/constants.py", "snippet": "BATCH_DIMS = [(7, 9), tuple()]" }, { "identifier": "TOLERANCES", "path": "tests/helpers/constants.py", "snippet": "TOLERANCES = dict(atol=1e-3, rtol=1e-4)" } ]
import pytest import torch from gatr.interface import ( embed_oriented_plane, embed_point, embed_pseudoscalar, embed_scalar, embed_translation, extract_oriented_plane, extract_point, extract_point_embedding_reg, extract_pseudoscalar, extract_scalar, extract_translation, ) from gatr.primitives import geometric_product, reverse from tests.helpers import BATCH_DIMS, TOLERANCES
3,574
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All rights reserved. @pytest.mark.parametrize("batch_dims", BATCH_DIMS) def test_translation_embedding_consistency(batch_dims): """Tests whether translation embeddings into multivectors are cycle consistent.""" translations = torch.randn(*batch_dims, 3) multivectors = embed_translation(translations) translations_reencoded = extract_translation(multivectors)
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All rights reserved. @pytest.mark.parametrize("batch_dims", BATCH_DIMS) def test_translation_embedding_consistency(batch_dims): """Tests whether translation embeddings into multivectors are cycle consistent.""" translations = torch.randn(*batch_dims, 3) multivectors = embed_translation(translations) translations_reencoded = extract_translation(multivectors)
torch.testing.assert_close(translations, translations_reencoded, **TOLERANCES)
14
2023-10-23 15:58:36+00:00
4k
StanislavPetrovV/Wolfenstein-3D-Clone
game_objects/npc.py
[ { "identifier": "GameObject", "path": "game_objects/game_object.py", "snippet": "class GameObject:\n def __init__(self, level_map, tex_id, x, z):\n self.eng = level_map.eng\n self.app = self.eng.app\n self.tex_id = tex_id\n #\n self.pos = glm.vec3(x + H_WALL_SIZE, 0, z + H_WALL_SIZE) # center of the tile\n self.rot = 0\n self.scale = glm.vec3(1)\n #\n self.m_model: glm.mat4 = None\n\n def get_model_matrix(self):\n m_model = glm.translate(glm.mat4(), self.pos)\n m_model = glm.rotate(m_model, self.rot, glm.vec3(0, 1, 0))\n m_model = glm.scale(m_model, self.scale)\n return m_model" }, { "identifier": "Item", "path": "game_objects/item.py", "snippet": "class Item(GameObject):\n def __init__(self, level_map, tex_id, x, z):\n super().__init__(level_map, tex_id, x, z)\n\n self.scale = glm.vec3(ITEM_SETTINGS[tex_id]['scale'])\n #\n self.m_model = self.get_model_matrix()" } ]
from settings import * from game_objects.game_object import GameObject from game_objects.item import Item import random
1,613
# self.animate() # set current texture self.tex_id = self.state_tex_id + self.frame def get_damage(self): self.health -= WEAPON_SETTINGS[self.player.weapon_id]['damage'] self.is_hurt = True # if not self.is_player_spotted: self.is_player_spotted = True def attack(self): if not self.is_player_spotted: return False if glm.length(self.player.position.xz - self.pos.xz) > self.attack_dist: return False dir_to_player = glm.normalize(self.player.position - self.pos) # if self.eng.ray_casting.run(start_pos=self.pos, direction=dir_to_player): self.set_state(state='attack') if self.app.sound_trigger: self.play(self.sound.enemy_attack[self.npc_id]) if random.random() < self.hit_probability: self.player.health -= self.damage # self.play(self.sound.player_hurt) return True def get_path_to_player(self): if not self.is_player_spotted: return None self.path_to_player = self.eng.path_finder.find( start_pos=self.tile_pos, end_pos=self.player.tile_pos ) def move_to_player(self): if not self.path_to_player: return None # set state self.set_state(state='walk') # step to player dir_vec = glm.normalize(glm.vec2(self.path_to_player) + H_WALL_SIZE - self.pos.xz) delta_vec = dir_vec * self.speed * self.app.delta_time # collisions if not self.is_collide(dx=delta_vec[0]): self.pos.x += delta_vec[0] if not self.is_collide(dz=delta_vec[1]): self.pos.z += delta_vec[1] # open door door_map = self.level_map.door_map if self.tile_pos in door_map: door = door_map[self.tile_pos] if door.is_closed and not door.is_moving: door.is_moving = True # self.play(self.sound.open_door) # translate self.m_model = self.get_model_matrix() def is_collide(self, dx=0, dz=0): int_pos = ( int(self.pos.x + dx + (self.size if dx > 0 else -self.size if dx < 0 else 0)), int(self.pos.z + dz + (self.size if dz > 0 else -self.size if dz < 0 else 0)) ) return (int_pos in self.level_map.wall_map or int_pos in (self.level_map.npc_map.keys() - {self.tile_pos})) def update_tile_position(self): self.tile_pos = int(self.pos.x), int(self.pos.z) def ray_to_player(self): if self.is_player_spotted: return None dir_to_player = glm.normalize(self.player.position - self.pos) # if self.eng.ray_casting.run(start_pos=self.pos, direction=dir_to_player): self.is_player_spotted = True # self.play(self.sound.spotted[self.npc_id]) def set_state(self, state): self.num_frames = NPC_SETTINGS[self.npc_id]['num_frames'][state] self.state_tex_id = NPC_SETTINGS[self.npc_id]['state_tex_id'][state] self.frame %= self.num_frames def animate(self): if not (self.is_animate and self.app.anim_trigger): return None self.anim_counter += 1 # if self.anim_counter == self.anim_periods: self.anim_counter = 0 self.frame = (self.frame + 1) % self.num_frames # if self.is_hurt: self.is_hurt = False # elif not self.is_alive and self.frame == self.num_frames - 1: self.is_animate = False # self.to_drop_item() # self.play(self.eng.sound.death[self.npc_id]) def to_drop_item(self): if self.drop_item is not None:
class NPC(GameObject): def __init__(self, level_map, tex_id, x, z): super().__init__(level_map, tex_id, x, z) self.level_map = level_map self.player = self.eng.player self.npc_id = tex_id # self.scale = NPC_SETTINGS[self.npc_id]['scale'] self.speed = NPC_SETTINGS[self.npc_id]['speed'] self.size = NPC_SETTINGS[self.npc_id]['size'] self.attack_dist = NPC_SETTINGS[self.npc_id]['attack_dist'] self.health = NPC_SETTINGS[self.npc_id]['health'] self.damage = NPC_SETTINGS[self.npc_id]['damage'] self.hit_probability = NPC_SETTINGS[self.npc_id]['hit_probability'] self.drop_item = NPC_SETTINGS[self.npc_id]['drop_item'] # self.anim_periods = NPC_SETTINGS[self.npc_id]['anim_periods'] self.anim_counter = 0 self.frame = 0 self.is_animate = True # current state: walk, attack, hurt, death self.num_frames, self.state_tex_id = None, None self.set_state(state='walk') # self.tile_pos: Tuple[int, int] = None # self.is_player_spotted: bool = False self.path_to_player: Tuple[int, int] = None # self.is_alive = True self.is_hurt = False # self.play = self.eng.sound.play self.sound = self.eng.sound # self.m_model = self.get_model_matrix() self.update_tile_position() def update(self): if self.is_hurt: self.set_state(state='hurt') # elif self.health > 0: self.update_tile_position() self.ray_to_player() self.get_path_to_player() # if not self.attack(): self.move_to_player() else: self.is_alive = False self.set_state('death') # self.animate() # set current texture self.tex_id = self.state_tex_id + self.frame def get_damage(self): self.health -= WEAPON_SETTINGS[self.player.weapon_id]['damage'] self.is_hurt = True # if not self.is_player_spotted: self.is_player_spotted = True def attack(self): if not self.is_player_spotted: return False if glm.length(self.player.position.xz - self.pos.xz) > self.attack_dist: return False dir_to_player = glm.normalize(self.player.position - self.pos) # if self.eng.ray_casting.run(start_pos=self.pos, direction=dir_to_player): self.set_state(state='attack') if self.app.sound_trigger: self.play(self.sound.enemy_attack[self.npc_id]) if random.random() < self.hit_probability: self.player.health -= self.damage # self.play(self.sound.player_hurt) return True def get_path_to_player(self): if not self.is_player_spotted: return None self.path_to_player = self.eng.path_finder.find( start_pos=self.tile_pos, end_pos=self.player.tile_pos ) def move_to_player(self): if not self.path_to_player: return None # set state self.set_state(state='walk') # step to player dir_vec = glm.normalize(glm.vec2(self.path_to_player) + H_WALL_SIZE - self.pos.xz) delta_vec = dir_vec * self.speed * self.app.delta_time # collisions if not self.is_collide(dx=delta_vec[0]): self.pos.x += delta_vec[0] if not self.is_collide(dz=delta_vec[1]): self.pos.z += delta_vec[1] # open door door_map = self.level_map.door_map if self.tile_pos in door_map: door = door_map[self.tile_pos] if door.is_closed and not door.is_moving: door.is_moving = True # self.play(self.sound.open_door) # translate self.m_model = self.get_model_matrix() def is_collide(self, dx=0, dz=0): int_pos = ( int(self.pos.x + dx + (self.size if dx > 0 else -self.size if dx < 0 else 0)), int(self.pos.z + dz + (self.size if dz > 0 else -self.size if dz < 0 else 0)) ) return (int_pos in self.level_map.wall_map or int_pos in (self.level_map.npc_map.keys() - {self.tile_pos})) def update_tile_position(self): self.tile_pos = int(self.pos.x), int(self.pos.z) def ray_to_player(self): if self.is_player_spotted: return None dir_to_player = glm.normalize(self.player.position - self.pos) # if self.eng.ray_casting.run(start_pos=self.pos, direction=dir_to_player): self.is_player_spotted = True # self.play(self.sound.spotted[self.npc_id]) def set_state(self, state): self.num_frames = NPC_SETTINGS[self.npc_id]['num_frames'][state] self.state_tex_id = NPC_SETTINGS[self.npc_id]['state_tex_id'][state] self.frame %= self.num_frames def animate(self): if not (self.is_animate and self.app.anim_trigger): return None self.anim_counter += 1 # if self.anim_counter == self.anim_periods: self.anim_counter = 0 self.frame = (self.frame + 1) % self.num_frames # if self.is_hurt: self.is_hurt = False # elif not self.is_alive and self.frame == self.num_frames - 1: self.is_animate = False # self.to_drop_item() # self.play(self.eng.sound.death[self.npc_id]) def to_drop_item(self): if self.drop_item is not None:
self.level_map.item_map[self.tile_pos] = Item(
1
2023-10-22 08:41:55+00:00
4k
tomguluson92/cloth2tex
renderer/cloth_renderer.py
[ { "identifier": "PerspectiveCamera", "path": "renderer/landmark_renderer.py", "snippet": "class PerspectiveCamera(nn.Module):\n\n FOCAL_LENGTH = 50*128\n\n def __init__(self, rotation=None, translation=None,\n focal_length_x=None, focal_length_y=None,\n batch_size=1,\n center=None, dtype=torch.float32):\n super(PerspectiveCamera, self).__init__()\n self.batch_size = batch_size\n self.dtype = dtype\n # Make a buffer so that PyTorch does not complain when creating\n # the camera matrix\n self.register_buffer('zero',\n torch.zeros([batch_size], dtype=dtype))\n\n if focal_length_x is None or type(focal_length_x) == float:\n focal_length_x = torch.full(\n [batch_size],\n self.FOCAL_LENGTH if focal_length_x is None else\n focal_length_x,\n dtype=dtype)\n\n if focal_length_y is None or type(focal_length_y) == float:\n focal_length_y = torch.full(\n [batch_size],\n self.FOCAL_LENGTH if focal_length_y is None else\n focal_length_y,\n dtype=dtype)\n\n self.register_buffer('focal_length_x', focal_length_x)\n self.register_buffer('focal_length_y', focal_length_y)\n\n if center is None:\n center = torch.zeros([batch_size, 2], dtype=dtype)\n self.register_buffer('center', center)\n self.register_buffer('center_fix', center)\n\n if rotation is None:\n rotation = torch.eye(\n 3, dtype=dtype).unsqueeze(dim=0).repeat(batch_size, 1, 1)\n\n rotation = nn.Parameter(rotation, requires_grad=False)\n self.register_parameter('rotation', rotation)\n\n if translation is None:\n translation = torch.zeros([batch_size, 3], dtype=dtype)\n\n translation = nn.Parameter(translation,\n requires_grad=True)\n self.register_parameter('translation', translation)\n \n \n\n def forward(self, points):\n device = points.device\n \n with torch.no_grad():\n camera_mat = torch.zeros([self.batch_size, 2, 2],\n dtype=self.dtype, device=points.device)\n camera_mat[:, 0, 0] = self.focal_length_x\n camera_mat[:, 1, 1] = self.focal_length_y\n\n camera_transform = transform_mat(self.rotation,\n self.translation.unsqueeze(dim=-1))\n homog_coord = torch.ones(list(points.shape)[:-1] + [1],\n dtype=points.dtype,\n device=device)\n # Convert the points to homogeneous coordinates\n points_h = torch.cat([points, homog_coord], dim=-1)\n\n projected_points = torch.einsum('bki,bji->bjk',\n [camera_transform, points_h])\n\n img_points = torch.div(projected_points[:, :, :2],\n projected_points[:, :, 2].unsqueeze(dim=-1))\n img_points = torch.einsum('bki,bji->bjk', [camera_mat, img_points]) \\\n + self.center.unsqueeze(dim=1)\n return img_points" }, { "identifier": "OrthogonalCamera", "path": "renderer/landmark_renderer.py", "snippet": "class OrthogonalCamera(nn.Module):\n\n def __init__(self, \n rotation=None, \n translation=None,\n batch_size=1,\n center=None, \n dtype=torch.float32):\n super(OrthogonalCamera, self).__init__()\n self.batch_size = batch_size\n self.dtype = dtype\n # Make a buffer so that PyTorch does not complain when creating\n # the camera matrix\n self.register_buffer('zero',\n torch.zeros([batch_size], dtype=dtype))\n\n if center is None:\n center = torch.zeros([batch_size, 2], dtype=dtype)\n self.register_buffer('center', center)\n self.register_buffer('center_fix', center)\n\n if rotation is None:\n rotation = torch.eye(\n 3, dtype=dtype).unsqueeze(dim=0).repeat(batch_size, 1, 1)\n\n rotation = nn.Parameter(rotation, requires_grad=False)\n self.register_parameter('rotation', rotation)\n\n if translation is None:\n translation = torch.zeros([batch_size, 3], dtype=dtype)\n\n translation = nn.Parameter(translation,\n requires_grad=True)\n self.register_parameter('translation', translation)\n \n\n def forward(self, points):\n device = points.device\n \n with torch.no_grad():\n camera_mat = torch.zeros([self.batch_size, 2, 2],\n dtype=self.dtype, device=points.device)\n camera_mat[:, 0, 0] = 1\n camera_mat[:, 1, 1] = 1\n\n camera_transform = transform_mat(self.rotation,\n self.translation.unsqueeze(dim=-1))\n homog_coord = torch.ones(list(points.shape)[:-1] + [1],\n dtype=points.dtype,\n device=device)\n # Convert the points to homogeneous coordinates\n points_h = torch.cat([points, homog_coord], dim=-1)\n\n projected_points = torch.einsum('bki,bji->bjk',\n [camera_transform, points_h])\n img_points = projected_points[:, :, :2]\n img_points = torch.einsum('bki,bji->bjk', [camera_mat, img_points]) \\\n + self.center.unsqueeze(dim=1)\n\n return img_points" } ]
import datetime import os import cv2 import torch import numpy as np import matplotlib.pyplot as plt import pytorch3d import torchvision.transforms as transforms import random from PIL import Image from pytorch3d.structures import Meshes from pytorch3d.renderer.mesh import Textures from pytorch3d.renderer import ( look_at_view_transform, BlendParams, OrthographicCameras, FoVOrthographicCameras, FoVPerspectiveCameras, PointLights, AmbientLights, DirectionalLights, Materials, RasterizationSettings, MeshRenderer, MeshRendererWithFragments, MeshRasterizer, SoftPhongShader, HardPhongShader, SoftSilhouetteShader, SoftPhongShader, TexturesVertex, TexturesUV ) from pytorch3d.io import load_obj, load_objs_as_meshes, save_obj from pytorch3d.transforms import RotateAxisAngle, Rotate, axis_angle_to_matrix from renderer.landmark_renderer import PerspectiveCamera, OrthogonalCamera
1,758
# coding: UTF-8 """ clothrenderer """ # Data structures and functions for rendering DEG_TO_RAD = np.pi / 180 class ClothRenderer(object): def __init__(self, objfile, resolution=512, focal_distance=1.6, scale_factor=1): self.device = torch.device("cuda:0") self.img_size = resolution self.render_size = resolution self.renderer, self.renderer_silhouette = self.__get_renderer(self.render_size, focal_distance) print("[Cloth2Tex]", objfile) obj_filename = os.path.join(objfile) verts, faces, aux = load_obj( obj_filename, device=self.device, load_textures=True) self.faces = faces.verts_idx self.verts = verts self.aux = aux self.verts = self.normalize_vertex(verts.clone()) * scale_factor self.center = verts.mean(0) self.scale = max((verts - self.center).abs().max(0)[0])
# coding: UTF-8 """ clothrenderer """ # Data structures and functions for rendering DEG_TO_RAD = np.pi / 180 class ClothRenderer(object): def __init__(self, objfile, resolution=512, focal_distance=1.6, scale_factor=1): self.device = torch.device("cuda:0") self.img_size = resolution self.render_size = resolution self.renderer, self.renderer_silhouette = self.__get_renderer(self.render_size, focal_distance) print("[Cloth2Tex]", objfile) obj_filename = os.path.join(objfile) verts, faces, aux = load_obj( obj_filename, device=self.device, load_textures=True) self.faces = faces.verts_idx self.verts = verts self.aux = aux self.verts = self.normalize_vertex(verts.clone()) * scale_factor self.center = verts.mean(0) self.scale = max((verts - self.center).abs().max(0)[0])
self.landmark_cam = OrthogonalCamera(rotation=self.cameras.R.cuda(), translation=self.cameras.T.cuda()).to(self.device)
1
2023-10-17 11:30:53+00:00
4k
amazon-science/cceval
eval.py
[ { "identifier": "compute_metric_stmt", "path": "eval_metric.py", "snippet": "def compute_metric_stmt(args):\n with open(f\"{args.output_dir}/prediction.jsonl\", \"r\") as f_pred:\n samples = []\n for l in f_pred.readlines():\n samples.append(json.loads(l))\n\n examples = {}\n with open(args.prompt_file, \"r\") as f_in:\n for l in f_in.readlines():\n ex = json.loads(l)\n examples[ex[\"metadata\"][\"task_id\"]] = {\n \"prompt\": ex[\"prompt\"],\n \"groundtruth\": ex[\"groundtruth\"]\n }\n\n assert len(samples) == len(examples), f\"{len(samples)} != {len(examples)}\"\n\n global parser\n ts_lang = \"c_sharp\" if args.language == \"csharp\" else args.language\n language = Language(args.ts_lib, ts_lang)\n parser = Parser()\n parser.set_language(language)\n\n truncated_samples = []\n em_labels = []\n\n print(\"post-processing samples ...\")\n pool = mp.Pool(mp.cpu_count() - 1)\n worker = partial(process_examples, args.language)\n\n with tqdm(total=len(samples)) as pbar:\n for output in pool.imap_unordered(worker, zip(samples, [examples[s[\"task_id\"]] for s in samples])):\n trunc_s, em_label = output\n em_labels.append(em_label)\n truncated_samples.append(trunc_s)\n pbar.update()\n\n exact_match = 0\n with open(f\"{args.output_dir}/prediction_truncated.jsonl\", 'w', encoding=\"utf-8\") as pt, \\\n open(f\"{args.output_dir}/exact_match_idx.jsonl\", 'w') as em:\n for trunc_s, em_label in zip(truncated_samples, em_labels):\n pt.write(json.dumps(trunc_s) + \"\\n\")\n if em_label == 1:\n em.write(f'{trunc_s[\"task_id\"]}\\n')\n exact_match += 1\n\n ### Score calculation\n\n id_em = []\n edit_similarities = []\n detailed_results = []\n\n for idx, trunc_s in enumerate(truncated_samples):\n identifier_em = int(trunc_s[\"pred_ids\"] == trunc_s[\"target_ids\"])\n es = cal_edit_sim([trunc_s[\"target\"]], [trunc_s[\"pred\"]])\n id_tp, id_fp, id_fn = compute_id_match(trunc_s[\"pred_ids\"], trunc_s[\"target_ids\"])\n id_em.append(identifier_em)\n edit_similarities.append(es)\n\n detailed_results.append({\n \"task_id\": trunc_s[\"task_id\"],\n \"em\": em_labels[idx],\n \"es\": es,\n \"id_em\": identifier_em,\n \"id_precision\": id_tp / (id_tp + id_fp) if (id_tp + id_fp) != 0 else 0,\n \"id_recall\": id_tp / (id_tp + id_fn) if (id_tp + id_fn) != 0 else 0,\n \"id_f1\": 2 * id_tp / (2 * id_tp + id_fp + id_fn) if (2 * id_tp + id_fp + id_fn) != 0 else 0,\n })\n\n em_ratio = round(exact_match / len(samples) * 100, 2)\n edit_sim = round(sum(edit_similarities) / len(edit_similarities), 2)\n\n id_em_ratio = round(\n sum(detailed_results[idx]['id_em'] for idx in range(len(detailed_results))) / len(detailed_results) * 100, 2)\n id_precision = round(sum(detailed_results[idx]['id_precision'] for idx in range(len(detailed_results))) / len(\n detailed_results) * 100, 2)\n id_recall = round(\n sum(detailed_results[idx]['id_recall'] for idx in range(len(detailed_results))) / len(detailed_results) * 100,\n 2)\n id_f1 = round(\n sum(detailed_results[idx]['id_f1'] for idx in range(len(detailed_results))) / len(detailed_results) * 100, 2)\n\n print(\n f\"Code Matching: \"\n f\"EM {em_ratio:.2f}, \"\n f\"ES {edit_sim:.2f}\"\n )\n\n print(\n f\"ID matching: \"\n f\"EM {id_em_ratio}, \"\n f\"Precision {id_precision}, \"\n f\"Recall {id_recall}, \"\n f\"F1 {id_f1}\"\n )\n\n with open(f\"{args.output_dir}/detailed_results.json\", 'w') as f:\n for dr in detailed_results:\n f.write(json.dumps(dr) + \"\\n\")\n\n # write the results to a file\n with open(f\"{args.output_dir}/results.json\", 'w') as f:\n res = {\n \"em\": em_ratio,\n \"es\": edit_sim,\n \"id_em\": id_em_ratio,\n \"id_precision\": id_precision,\n \"id_recall\": id_recall,\n \"total\": len(truncated_samples)\n }\n f.write(json.dumps(res, indent=2))" }, { "identifier": "compute_mean_logp", "path": "eval_utils.py", "snippet": "def compute_mean_logp(scores, sequences, pad_token_id):\n assert scores.shape[0] == sequences.shape[0]\n assert scores.shape[1] == sequences.shape[1]\n with torch.no_grad():\n logp_vocab = torch.nn.functional.log_softmax(scores, dim=-1)\n indices = torch.unsqueeze(sequences, dim=-1)\n logp = torch.gather(logp_vocab, dim=-1, index=indices).squeeze(-1)\n sum_logp = torch.cumsum(logp, dim=1) # batch_size, seq_len\n denom = torch.arange(1, sum_logp.shape[1] + 1).reshape(1, -1).to(device=sum_logp.device) # 1, seq_len\n mean_logp = (sum_logp / denom).tolist() # batch_size, seq_len\n sequence_lengths = (sequences != pad_token_id).sum(1).tolist() # batch_size\n mean_logp = [mean_logp[idx][l - 1] for idx, l in enumerate(sequence_lengths)]\n return mean_logp" } ]
import argparse import json import logging import os import numpy as np import torch import custom_generate from accelerate import Accelerator from accelerate.utils import set_seed from datasets import load_dataset from torch.utils.data import DataLoader, SequentialSampler from tqdm import tqdm from transformers import ( AutoTokenizer, AutoModelForCausalLM ) from eval_metric import compute_metric_stmt from eval_utils import compute_mean_logp
3,574
crossfile_context, truncation=True, max_length=args.cfc_seq_length ) features = {"input_ids": [], "attention_mask": []} tokenizer.truncation_side = "left" for idx, prompt in enumerate(examples["prompt"]): allowed_prompt_length = max_prompt_length - len(crossfile_features["input_ids"][idx]) prompt_feats = tokenizer( [prompt], truncation=True, max_length=allowed_prompt_length ) for k, v in prompt_feats.items(): features[k].append(crossfile_features[k][idx] + prompt_feats[k][0]) # pad to max_seq_length tokenizer.padding_side = "left" features = tokenizer.pad(features, padding="max_length", max_length=args.max_seq_length - args.gen_length) features["index"] = examples["index"] return features if args.model_type in ["codelm", "seq2seqlm"]: tokenized_datasets = raw_datasets.map( prepare_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) elif args.model_type == "codelm_cfc": tokenized_datasets = raw_datasets.map( prepare_features_cfc, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) else: raise NotImplementedError("prepare feature functions not implemented for new model type") return tokenized_datasets, index2taskid def model_inference(tokenized_datasets, index2taskid, tokenizer): if args.dtype == 'fp16': dtype = torch.float16 elif args.dtype == 'fp32': dtype = torch.float32 elif args.dtype == 'bf16': dtype = torch.bfloat16 elif args.dtype == 'int8': dtype = torch.int8 else: assert False, f'{args.dtype=} not implemented' if args.model_type in ["codelm", "codelm_cfc"]: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, torch_dtype=dtype, trust_remote_code=True, revision="main" ) else: raise ValueError("Unknown model type") total_samples_cnt = len(tokenized_datasets) logger.info(f"total samples: {total_samples_cnt}") data_sampler = SequentialSampler(tokenized_datasets) dataloader = DataLoader( tokenized_datasets, sampler=data_sampler, collate_fn=custom_data_collator, batch_size=args.batch_size ) model = accelerator.prepare_model(model) dataloader = accelerator.prepare_data_loader(dataloader) if not os.path.isdir(args.output_dir): os.mkdir(args.output_dir) tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else tokenizer.bos_token prompt_length = args.max_seq_length - args.gen_length @torch.no_grad() def generate_completions(batch): output_dict = custom_generate.generate( accelerator.unwrap_model(model), input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_length=args.max_seq_length, temperature=args.temperature, top_k=args.top_k, top_p=args.top_p, do_sample=args.do_sample, num_beams=args.num_beams, num_return_sequences=1, pad_token_id=tokenizer.pad_token_id, return_dict_in_generate=True, output_scores=True ) batch_task_id = batch["index"] batch_pred = accelerator.pad_across_processes( output_dict.sequences, dim=1, pad_index=tokenizer.pad_token_id ) scores = torch.stack(output_dict.scores, dim=1) batch_scores = accelerator.pad_across_processes( scores, dim=1, pad_index=tokenizer.pad_token_id ) # batch_scores.shape = (batch_size x num_gpus x num_return_sequences, max_length) batch_task_id, batch_pred, batch_scores = accelerator.gather((batch_task_id, batch_pred, batch_scores)) batch_pred = batch_pred[:, prompt_length:] generated_texts = tokenizer.batch_decode(batch_pred, skip_special_tokens=True)
# Copyright Amazon.com, Inc. or its affiliates. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) COMMENT_SYMBOL = { "python": "#", "java": "//", "csharp": "//", "typescript": "//" } def custom_data_collator(features): first = features[0] batch = {} for k, v in first.items(): if v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([f[k] for f in features]) elif isinstance(v, np.ndarray): batch[k] = torch.tensor(np.stack([f[k] for f in features])) else: batch[k] = torch.tensor([f[k] for f in features]) if v is not None and isinstance(v, str): batch[k] = [f[k] for f in features] return batch def build_datasets(args, tokenizer): # Initialize the model and tokenizer # when generating, we will use the logits of right-most token to predict the next token # so the padding should be on the left tokenizer.padding_side = "left" tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else tokenizer.bos_token # load the files into Dataset raw_datasets = load_dataset("json", data_files=args.prompt_file, cache_dir=args.cache_dir) raw_datasets = raw_datasets["train"] raw_datasets = raw_datasets.map(lambda example, idx: {'index': idx, **example}, with_indices=True) index2taskid = {idx: md["task_id"] for idx, md in zip(raw_datasets["index"], raw_datasets["metadata"])} column_names = raw_datasets.column_names # Prompt composition def prepare_features(examples): tokenizer.truncation_side = "left" tokenized_inputs = tokenizer( examples["prompt"], padding="max_length", truncation=True, max_length=args.max_seq_length - args.gen_length ) features = {k: t for k, t in tokenized_inputs.items()} features["index"] = examples["index"] return features def prepare_features_cfc(examples): max_prompt_length = args.max_seq_length - args.gen_length use_key = "list" crossfile_context = [] if use_key == "text": crossfile_context = [ex["text"] for ex in examples["crossfile_context"]] else: ls_sym = COMMENT_SYMBOL[args.language] num_chunk_inc_prompt = [] augmented_prompt = 0 for cfc_chunks in examples["crossfile_context"]: cfc_chunks = cfc_chunks["list"] # a list of dict cfc_text = "" if cfc_chunks: # at least 1 relevant cfc_chunk found init_cfc_text = f"{ls_sym} Here are some relevant code fragments from other files of the repo:\n\n" cfc_length = len(tokenizer.tokenize(init_cfc_text)) num_chunk_inc = 0 for cfc_idx, cfc_chunk in enumerate(cfc_chunks): if cfc_chunk["score"] > args.min_cfc_score: add_text = f"{ls_sym} the below code fragment is found in {cfc_chunk['filename']}" + "\n" cfc_lines = cfc_chunk["retrieved_chunk"].split('\n') add_text += "\n".join([f"{ls_sym} {cl}" for cl in cfc_lines if cl]) + "\n\n" # check if adding chunk exceeds max length budget for CFC add_text_len = len(tokenizer.tokenize(add_text)) if cfc_length + add_text_len <= args.cfc_seq_length: cfc_text += add_text cfc_length += add_text_len num_chunk_inc += 1 else: break num_chunk_inc_prompt.append(num_chunk_inc) if num_chunk_inc > 0: cfc_text = init_cfc_text + cfc_text augmented_prompt += 1 crossfile_context.append(cfc_text) logger.info( f"{augmented_prompt} out of {len(examples['crossfile_context'])} prompts are augmented with cross-file context.") tokenizer.truncation_side = "right" crossfile_features = tokenizer( crossfile_context, truncation=True, max_length=args.cfc_seq_length ) features = {"input_ids": [], "attention_mask": []} tokenizer.truncation_side = "left" for idx, prompt in enumerate(examples["prompt"]): allowed_prompt_length = max_prompt_length - len(crossfile_features["input_ids"][idx]) prompt_feats = tokenizer( [prompt], truncation=True, max_length=allowed_prompt_length ) for k, v in prompt_feats.items(): features[k].append(crossfile_features[k][idx] + prompt_feats[k][0]) # pad to max_seq_length tokenizer.padding_side = "left" features = tokenizer.pad(features, padding="max_length", max_length=args.max_seq_length - args.gen_length) features["index"] = examples["index"] return features if args.model_type in ["codelm", "seq2seqlm"]: tokenized_datasets = raw_datasets.map( prepare_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) elif args.model_type == "codelm_cfc": tokenized_datasets = raw_datasets.map( prepare_features_cfc, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) else: raise NotImplementedError("prepare feature functions not implemented for new model type") return tokenized_datasets, index2taskid def model_inference(tokenized_datasets, index2taskid, tokenizer): if args.dtype == 'fp16': dtype = torch.float16 elif args.dtype == 'fp32': dtype = torch.float32 elif args.dtype == 'bf16': dtype = torch.bfloat16 elif args.dtype == 'int8': dtype = torch.int8 else: assert False, f'{args.dtype=} not implemented' if args.model_type in ["codelm", "codelm_cfc"]: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, torch_dtype=dtype, trust_remote_code=True, revision="main" ) else: raise ValueError("Unknown model type") total_samples_cnt = len(tokenized_datasets) logger.info(f"total samples: {total_samples_cnt}") data_sampler = SequentialSampler(tokenized_datasets) dataloader = DataLoader( tokenized_datasets, sampler=data_sampler, collate_fn=custom_data_collator, batch_size=args.batch_size ) model = accelerator.prepare_model(model) dataloader = accelerator.prepare_data_loader(dataloader) if not os.path.isdir(args.output_dir): os.mkdir(args.output_dir) tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else tokenizer.bos_token prompt_length = args.max_seq_length - args.gen_length @torch.no_grad() def generate_completions(batch): output_dict = custom_generate.generate( accelerator.unwrap_model(model), input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_length=args.max_seq_length, temperature=args.temperature, top_k=args.top_k, top_p=args.top_p, do_sample=args.do_sample, num_beams=args.num_beams, num_return_sequences=1, pad_token_id=tokenizer.pad_token_id, return_dict_in_generate=True, output_scores=True ) batch_task_id = batch["index"] batch_pred = accelerator.pad_across_processes( output_dict.sequences, dim=1, pad_index=tokenizer.pad_token_id ) scores = torch.stack(output_dict.scores, dim=1) batch_scores = accelerator.pad_across_processes( scores, dim=1, pad_index=tokenizer.pad_token_id ) # batch_scores.shape = (batch_size x num_gpus x num_return_sequences, max_length) batch_task_id, batch_pred, batch_scores = accelerator.gather((batch_task_id, batch_pred, batch_scores)) batch_pred = batch_pred[:, prompt_length:] generated_texts = tokenizer.batch_decode(batch_pred, skip_special_tokens=True)
mean_logp = compute_mean_logp(batch_scores, batch_pred, tokenizer.pad_token_id)
1
2023-10-16 04:23:03+00:00
4k
uukuguy/multi_loras
multi_loras/dare.py
[ { "identifier": "DeltaWeights", "path": "multi_loras/delta_weights.py", "snippet": "class DeltaWeights:\n \"\"\"\n Functions to compute the delta weights between two models \n \"\"\"\n\n def __init__(\n self,\n base_model: nn.Module = None,\n tuned_model: nn.Module = None,\n params_dict: dict = None,\n exclude_param_names_regex: list = None,\n ):\n \"\"\"\n Task vector. Initialize the task vector from a pretrained model and a tuned model, or\n directly passing the task_vector_param_dict dictionary.\n :param base_model: nn.Module, base model\n :param tuned_model: nn.Module, tuned model\n :param exclude_param_names_regex: list, regular expression of names of parameters that need to be excluded\n :param params_dict: dict, prams dict to initialize self.params_dict\n \"\"\"\n self.params_dict = {}\n\n if params_dict is not None:\n self.params_dict = params_dict\n else:\n base_params_dict = get_model_params(base_model)\n tuned_params_dict = get_model_params(tuned_model)\n for param_name in base_params_dict:\n if not is_exclude_param_name(param_name, exclude_param_names_regex):\n self.params_dict[param_name] = (tuned_params_dict[param_name] - base_params_dict[param_name])\n\n def __add__(self, other):\n \"\"\"\n add task vector\n :param other: DeltaWeights to add, at right side\n :return:\n \"\"\"\n assert isinstance(other, DeltaWeights), \"addition of DeltaWeights can only be done with another DeltaWeights!\"\n new_params_dict = {}\n for param_name in self.params_dict:\n assert (param_name in other.params_dict.keys()), f\"param_name {param_name} is not contained in both params!\"\n new_params_dict[param_name] = (self.params_dict[param_name] + other.param_dict[param_name])\n return DeltaWeights(params_dict=new_params_dict)\n\n def __radd__(self, other):\n \"\"\"\n other + self = self + other\n :param other: DeltaWeights to add, at left side\n :return:\n \"\"\"\n return self.__add__(other)\n\n def combine_with_pretrained_model(self, base_model: nn.Module, scaling_coefficient: float = 1.0):\n \"\"\"\n combine the delta weights with pretrained model\n :param base_model: nn.Module, base model\n :param scaling_coefficient: float, scaling coefficient to merge the delta weights\n :return:\n \"\"\"\n base_params_dict = get_model_params(base_model)\n\n with torch.no_grad():\n merged_params = {}\n for param_name in self.params_dict:\n merged_params[param_name] = (\n base_params_dict[param_name] + scaling_coefficient * self.params_dict[param_name]\n )\n\n return merged_params\n\n def save(self, save_path: str):\n \"\"\"\n save the delta weights to a file\n :param save_path: str, path to save the delta weights\n :return:\n \"\"\"\n os.makedirs(save_path, exist_ok=True)\n for key, param in tqdm(self.params_dict.items(), ncols=100, desc=\"Params\"):\n # conver tensor to numpy array\n layer_param = param.detach().cpu().numpy()\n np.save(f\"{save_path}/{key}.npy\", layer_param)" }, { "identifier": "copy_params_to_model", "path": "multi_loras/delta_weights.py", "snippet": "def copy_params_to_model(params_dict: dict, model: nn.Module):\n \"\"\"\n Copy params to model\n \"\"\"\n for param_name, param_value in model.named_parameters():\n if param_name in params_dict:\n param_value.data.copy_(params_dict[param_name])" } ]
from tqdm import tqdm from transformers import AutoModelForCausalLM, AutoTokenizer from .delta_weights import DeltaWeights, copy_params_to_model import torch import torch.nn as nn
2,118
# DARE (Drop And REscale) # Language Models are Super Mario: Absorbing Abilities from Homologous Models as a Free Lunch # https://arxiv.org/abs/2311.03099 def drop_and_rescale_tensor( input_tensor: torch.Tensor, mask_rate: float, use_rescale: bool, mask_strategy: str ): """ mask the input with mask rate :param input_tensor: Tensor, input tensor :param mask_rate: float, mask rate :param use_rescale: boolean, whether to rescale the input by 1 / (1 - mask_rate) :param mask_strategy: str, mask strategy, can be "random" and "magnitude" :return: """ assert ( 0.0 <= mask_rate <= 1.0 ), f"wrong range of mask_rate {mask_rate}, should be [0.0, 1.0]!" if mask_strategy == "random": mask = torch.bernoulli( torch.full_like(input=input_tensor.float(), fill_value=mask_rate) ).to(input_tensor.device) masked_input_tensor = input_tensor * (1 - mask) else: assert ( mask_strategy == "magnitude" ), f"wrong setting for mask_strategy {mask_strategy}!" original_shape = input_tensor.shape input_tensor = input_tensor.flatten() num_mask_params = int(len(input_tensor) * mask_rate) # Tensor, shape (1, ), # find the num_mask_params-th smallest magnitude element of all the parameters in the model kth_values, _ = input_tensor.abs().kthvalue( k=num_mask_params, dim=0, keepdim=True ) # Tensor, shape (num_total_params, ), # where True is for parameters that we want to perform mask mask = input_tensor.abs() <= kth_values masked_input_tensor = input_tensor * (~mask) masked_input_tensor = masked_input_tensor.reshape(original_shape) if use_rescale and mask_rate != 1.0: masked_input_tensor = torch.div(input=masked_input_tensor, other=1 - mask_rate) return masked_input_tensor def drop_and_rescale_model( tuned_model: nn.Module, base_model: nn.Module, exclude_param_names_regex: list = None, weight_mask_rate: float = 0.85, use_weight_rescale: bool = True, mask_strategy: str = "random", scaling_coefficient: float = 1.0, ): """ mask model weights :param tuned_model: nn.Module, the tuned model :param base_model: nn.Module, the base model :param exclude_param_names_regex: list, regular expression of names of parameters that need to be excluded :param weight_mask_rate: float, weight mask rate :param use_weight_rescale: boolean, whether to rescale the weight by 1 / (1 - weight_mask_rate) :param mask_strategy: str, mask strategy, can be "random" and "magnitude" :return: """ # get weights that need to be masked delta_weights = DeltaWeights( base_model=base_model, tuned_model=tuned_model, exclude_param_names_regex=exclude_param_names_regex, ) model_param_dict = delta_weights.params_dict with torch.no_grad(): dare_params_dict = {} for param_name, param_value in tqdm(model_param_dict.items(), ncols=0): dare_params_dict[param_name] = drop_and_rescale_tensor( input_tensor=param_value, mask_rate=weight_mask_rate, use_rescale=use_weight_rescale, mask_strategy=mask_strategy, ) new_delta_weights = DeltaWeights(params_dict=dare_params_dict) # combine with parameters of the merged model based on scaling coefficient dare_model_weights = new_delta_weights.combine_with_pretrained_model( base_model=base_model, scaling_coefficient=scaling_coefficient ) return dare_model_weights def do_dare(args): """ This function is used to do drop and rescale for the tuned model """ print(f"Loading base model from {args.base_model_name_or_path} ...") base_model = AutoModelForCausalLM.from_pretrained( args.base_model_name_or_path, device_map=args.device_map, trust_remote_code=True ).half() print(f"Loading tuned model from {args.tuned_model_name_or_path} ...") tuned_model = AutoModelForCausalLM.from_pretrained( args.tuned_model_name_or_path, device_map=args.device_map, trust_remote_code=True, ).half() tokenizer = AutoTokenizer.from_pretrained(args.tuned_model_name_or_path, trust_remote_code=True) dare_kwargs = { "weight_mask_rate": args.dare_weight_mask_rate, "use_weight_rescale": args.dare_use_weight_rescale, "mask_strategy": args.dare_mask_strategy, "scaling_coefficient": args.dare_scaling_coefficient, } print( f"Do drop and rescale with {dare_kwargs=} with {args.tuned_model_name_or_path} ..." ) model_weights = drop_and_rescale_model( tuned_model=tuned_model, base_model=base_model, **dare_kwargs, )
#!/usr/bon/env python """ This script is used to do drop and rescale for the tuned model """ default_dare_kwargs = { "weight_mask_rate": 0.85, "use_weight_rescale": True, "mask_strategy": "random", "scaling_coefficient": 1.0, } # DARE (Drop And REscale) # Language Models are Super Mario: Absorbing Abilities from Homologous Models as a Free Lunch # https://arxiv.org/abs/2311.03099 def drop_and_rescale_tensor( input_tensor: torch.Tensor, mask_rate: float, use_rescale: bool, mask_strategy: str ): """ mask the input with mask rate :param input_tensor: Tensor, input tensor :param mask_rate: float, mask rate :param use_rescale: boolean, whether to rescale the input by 1 / (1 - mask_rate) :param mask_strategy: str, mask strategy, can be "random" and "magnitude" :return: """ assert ( 0.0 <= mask_rate <= 1.0 ), f"wrong range of mask_rate {mask_rate}, should be [0.0, 1.0]!" if mask_strategy == "random": mask = torch.bernoulli( torch.full_like(input=input_tensor.float(), fill_value=mask_rate) ).to(input_tensor.device) masked_input_tensor = input_tensor * (1 - mask) else: assert ( mask_strategy == "magnitude" ), f"wrong setting for mask_strategy {mask_strategy}!" original_shape = input_tensor.shape input_tensor = input_tensor.flatten() num_mask_params = int(len(input_tensor) * mask_rate) # Tensor, shape (1, ), # find the num_mask_params-th smallest magnitude element of all the parameters in the model kth_values, _ = input_tensor.abs().kthvalue( k=num_mask_params, dim=0, keepdim=True ) # Tensor, shape (num_total_params, ), # where True is for parameters that we want to perform mask mask = input_tensor.abs() <= kth_values masked_input_tensor = input_tensor * (~mask) masked_input_tensor = masked_input_tensor.reshape(original_shape) if use_rescale and mask_rate != 1.0: masked_input_tensor = torch.div(input=masked_input_tensor, other=1 - mask_rate) return masked_input_tensor def drop_and_rescale_model( tuned_model: nn.Module, base_model: nn.Module, exclude_param_names_regex: list = None, weight_mask_rate: float = 0.85, use_weight_rescale: bool = True, mask_strategy: str = "random", scaling_coefficient: float = 1.0, ): """ mask model weights :param tuned_model: nn.Module, the tuned model :param base_model: nn.Module, the base model :param exclude_param_names_regex: list, regular expression of names of parameters that need to be excluded :param weight_mask_rate: float, weight mask rate :param use_weight_rescale: boolean, whether to rescale the weight by 1 / (1 - weight_mask_rate) :param mask_strategy: str, mask strategy, can be "random" and "magnitude" :return: """ # get weights that need to be masked delta_weights = DeltaWeights( base_model=base_model, tuned_model=tuned_model, exclude_param_names_regex=exclude_param_names_regex, ) model_param_dict = delta_weights.params_dict with torch.no_grad(): dare_params_dict = {} for param_name, param_value in tqdm(model_param_dict.items(), ncols=0): dare_params_dict[param_name] = drop_and_rescale_tensor( input_tensor=param_value, mask_rate=weight_mask_rate, use_rescale=use_weight_rescale, mask_strategy=mask_strategy, ) new_delta_weights = DeltaWeights(params_dict=dare_params_dict) # combine with parameters of the merged model based on scaling coefficient dare_model_weights = new_delta_weights.combine_with_pretrained_model( base_model=base_model, scaling_coefficient=scaling_coefficient ) return dare_model_weights def do_dare(args): """ This function is used to do drop and rescale for the tuned model """ print(f"Loading base model from {args.base_model_name_or_path} ...") base_model = AutoModelForCausalLM.from_pretrained( args.base_model_name_or_path, device_map=args.device_map, trust_remote_code=True ).half() print(f"Loading tuned model from {args.tuned_model_name_or_path} ...") tuned_model = AutoModelForCausalLM.from_pretrained( args.tuned_model_name_or_path, device_map=args.device_map, trust_remote_code=True, ).half() tokenizer = AutoTokenizer.from_pretrained(args.tuned_model_name_or_path, trust_remote_code=True) dare_kwargs = { "weight_mask_rate": args.dare_weight_mask_rate, "use_weight_rescale": args.dare_use_weight_rescale, "mask_strategy": args.dare_mask_strategy, "scaling_coefficient": args.dare_scaling_coefficient, } print( f"Do drop and rescale with {dare_kwargs=} with {args.tuned_model_name_or_path} ..." ) model_weights = drop_and_rescale_model( tuned_model=tuned_model, base_model=base_model, **dare_kwargs, )
copy_params_to_model(model_weights, base_model)
1
2023-10-16 02:39:47+00:00
4k
aws/res
tasks/build.py
[ { "identifier": "BuildTool", "path": "tasks/tools/build_tool.py", "snippet": "class BuildTool:\n \"\"\"\n IDEA Project Build Tool\n Handles building of individual projects under <PROJECT_ROOT>/source/idea/*\n\n Works based on standard idea directory structure:\n <PROJECT_ROOT>/\n + source/\n + idea/\n + <project-name>/\n + src/\n + <projectname>/\n + <projectname>_meta/\n + __init__.py\n + setup.py\n + resources/\n + config/\n + webapp?/\n + scripts/\n\n Build outputs will be available under:\n <PROJECT_ROOT>/\n + build/\n + <project-name>/\n \"\"\"\n\n def __init__(self, c: Context, app_name: str):\n self.c = c\n\n if app_name is None:\n raise idea.exceptions.invalid_params('app_name is required')\n\n app_dir = os.path.join(idea.props.project_source_dir, app_name)\n if not os.path.isdir(app_dir):\n raise idea.exceptions.invalid_params(f'project_dir: {app_dir} not found or does not exist')\n\n self.app_dir = app_dir\n self.release_version = idea.props.idea_release_version\n self._given_app_name = app_name\n self._app_name: Optional[str] = None\n\n @property\n def app_name(self) -> str:\n if self._app_name is not None:\n return self._app_name\n if self.has_src():\n self._app_name = idea.utils.get_package_meta(self.c, self.src_dir, 'name')\n return self._app_name\n else:\n return self._given_app_name\n\n @property\n def app_version(self) -> str:\n return idea.props.idea_release_version\n\n @property\n def output_dir(self) -> str:\n return os.path.join(idea.props.project_build_dir, self.output_archive_basename)\n\n @property\n def output_archive_basename(self) -> str:\n return self.app_name\n\n @property\n def output_archive_name(self) -> str:\n return f'{self.output_archive_basename}.tar.gz'\n\n @property\n def output_archive_file(self) -> str:\n return os.path.join(idea.props.project_build_dir, self.output_archive_name)\n\n @property\n def src_dir(self) -> str:\n return os.path.join(self.app_dir, 'src')\n\n def has_src(self) -> bool:\n return os.path.isdir(self.src_dir)\n\n @property\n def webapp_dir(self) -> str:\n return os.path.join(self.app_dir, 'webapp')\n\n @property\n def webapp_build_dir(self) -> str:\n return os.path.join(self.webapp_dir, 'build')\n\n def has_webapp(self) -> bool:\n return os.path.isdir(self.webapp_dir)\n\n @property\n def node_modules_dir(self) -> str:\n return os.path.join(self.webapp_dir, 'node_modules')\n\n def are_node_modules_installed(self) -> bool:\n return os.path.isdir(self.node_modules_dir)\n\n @property\n def resources_dir(self) -> str:\n return os.path.join(self.app_dir, 'resources')\n\n def has_resources(self) -> bool:\n return os.path.isdir(self.resources_dir)\n\n @property\n def install_dir(self) -> str:\n return os.path.join(self.app_dir, 'install')\n\n def has_install(self) -> bool:\n return os.path.isdir(self.install_dir)\n\n @property\n def config_dir(self) -> str:\n return os.path.join(self.app_dir, 'config')\n\n def has_config(self) -> bool:\n return os.path.isdir(self.config_dir)\n\n @property\n def bootstrap_dir(self) -> str:\n return os.path.join(idea.props.project_source_dir, 'idea-bootstrap')\n\n def find_app_meta_file(self) -> str:\n src_dir = self.src_dir\n files = os.listdir(src_dir)\n for file in files:\n if file.endswith('_meta'):\n return os.path.join(src_dir, file, '__init__.py')\n raise idea.exceptions.build_failed(f'could not find app meta file (__init__.py) in: {src_dir}')\n\n def clean(self):\n if self.has_src():\n src_dist = os.path.join(self.src_dir, 'dist')\n if os.path.isdir(src_dist):\n idea.console.print(f'deleting {src_dist} ...')\n shutil.rmtree(src_dist, ignore_errors=True)\n\n egg_name = self.app_name.replace('-', '_')\n egg_info_name = f'{egg_name}.egg-info'\n src_egg = os.path.join(self.src_dir, egg_info_name)\n if os.path.isdir(src_egg):\n idea.console.print(f'deleting {src_egg} ...')\n shutil.rmtree(os.path.join(self.src_dir, egg_info_name), ignore_errors=True)\n\n if self.has_webapp():\n skip_web = os.environ.get('IDEA_SKIP_WEB_BUILD', '0')\n if skip_web == '0':\n if os.path.isdir(self.webapp_build_dir):\n idea.console.print(f'deleting {self.webapp_build_dir} ...')\n shutil.rmtree(self.webapp_build_dir, ignore_errors=True)\n\n if os.path.isdir(self.output_dir):\n idea.console.print(f'deleting {self.output_dir} ...')\n shutil.rmtree(self.output_dir)\n\n if os.path.isfile(self.output_archive_file):\n idea.console.print(f'deleting {self.output_archive_file} ...')\n os.remove(self.output_archive_file)\n\n if self.app_name == 'idea-administrator':\n files = os.listdir(idea.props.deployment_administrator_dir)\n for file in files:\n if file == 'Dockerfile' or file == 'cfn_params_2_values.sh':\n continue\n file_path = os.path.join(idea.props.deployment_administrator_dir, file)\n if os.path.isfile(file_path):\n idea.console.print(f'deleting {file_path} ...')\n os.remove(os.path.join(idea.props.deployment_administrator_dir, file))\n elif os.path.isdir(file_path):\n idea.console.print(f'deleting {file_path} ...')\n shutil.rmtree(file_path)\n\n def pre_build_src(self):\n if not self.has_src():\n return\n PythonAppMetaFileUpdater(meta_file=self.find_app_meta_file()).update()\n\n def build_src(self):\n if not self.has_src():\n return\n with self.c.cd(self.src_dir):\n self.c.run(f'{idea.utils.idea_python} setup.py sdist')\n\n def pre_build_webapp(self):\n if not self.has_webapp():\n return\n webapp_dir = self.webapp_dir\n\n app_name = self.app_name\n app_version = self.app_version\n release_version = self.release_version\n\n NpmPackageJsonFileUpdater(\n package_json_file=os.path.join(webapp_dir, 'package.json'),\n app_name=app_name,\n app_version=app_version,\n release_version=release_version\n ).update()\n\n WebAppEnvFileUpdater(\n webapp_env_file=os.path.join(webapp_dir, '.env'),\n app_name=app_name,\n app_version=app_version,\n release_version=release_version\n ).update()\n\n def build_webapp(self):\n\n skip_web = os.environ.get('IDEA_SKIP_WEB_BUILD', '0')\n if skip_web == '1':\n return\n\n if not self.has_webapp():\n return\n\n with self.c.cd(self.webapp_dir):\n self.c.run('yarn install && yarn build')\n\n def copy_build_outputs(self):\n\n output_dir = self.output_dir\n shutil.rmtree(output_dir, ignore_errors=True)\n os.makedirs(output_dir, exist_ok=True)\n\n # src (sdist)\n if self.has_src():\n app_name = self.app_name\n # python does not accept server and does some funky normalization on the semver.\n # this is only applicable for pre-releases or dev branches. e.g. 3.0.0-dev.1 gets converted to 3.0.0.dev1\n normalized_python_app_version = idea.utils.get_package_meta(self.c, self.src_dir, 'version')\n sdist_name = f'{app_name}-{normalized_python_app_version}.tar.gz'\n sdist = os.path.join(self.src_dir, 'dist', sdist_name)\n shutil.copy(sdist, os.path.join(output_dir, f'{app_name}-lib.tar.gz'))\n\n # webapp\n if self.has_webapp():\n shutil.copytree(self.webapp_build_dir, os.path.join(output_dir, 'webapp'))\n\n # config\n if self.has_config():\n shutil.copytree(self.config_dir, os.path.join(output_dir, 'config'))\n\n # resources\n if self.has_resources():\n shutil.copytree(self.resources_dir, os.path.join(output_dir, 'resources'))\n shutil.copytree(self.bootstrap_dir, os.path.join(output_dir, 'resources', 'bootstrap'))\n\n def build(self):\n\n idea.console.print_header_block(f'build {self.app_name}')\n\n self.pre_build_src()\n self.build_src()\n self.pre_build_webapp()\n self.build_webapp()\n\n # copy build outputs to project build dir\n self.copy_build_outputs()" }, { "identifier": "cluster_manager", "path": "tasks/apispec.py", "snippet": "@task\ndef cluster_manager(_, output_file=None, server_url=None):\n # type: (Context, str, str) -> None\n \"\"\"\n cluster-manager api spec\n \"\"\"\n from ideadatamodel import (\n OPEN_API_SPEC_ENTRIES_AUTH,\n OPEN_API_SPEC_ENTRIES_EMAIL_TEMPLATES,\n OPEN_API_SPEC_ENTRIES_PROJECTS,\n OPEN_API_SPEC_ENTRIES_CLUSTER_SETTINGS,\n OPEN_API_SPEC_ENTRIES_FILE_BROWSER,\n OPEN_API_SPEC_ENTRIES_FILESYSTEM,\n OPEN_API_SPEC_ENTRIES_SNAPSHOTS\n )\n\n spec_entries = []\n spec_entries += OPEN_API_SPEC_ENTRIES_AUTH\n spec_entries += OPEN_API_SPEC_ENTRIES_EMAIL_TEMPLATES\n spec_entries += OPEN_API_SPEC_ENTRIES_PROJECTS\n spec_entries += OPEN_API_SPEC_ENTRIES_FILESYSTEM\n spec_entries += OPEN_API_SPEC_ENTRIES_CLUSTER_SETTINGS\n spec_entries += OPEN_API_SPEC_ENTRIES_FILE_BROWSER\n spec_entries += OPEN_API_SPEC_ENTRIES_SNAPSHOTS\n\n api_doc_file = os.path.join(idea.props.cluster_manager_project_dir, 'resources', 'api', 'api_doc.yml')\n\n _build_output(\n module=constants.MODULE_CLUSTER_MANAGER,\n api_doc_file=api_doc_file,\n spec_entries=spec_entries,\n server_url=server_url,\n output_file=output_file,\n enable_file_transfer_entries=True\n )" }, { "identifier": "virtual_desktop_controller", "path": "tasks/apispec.py", "snippet": "@task\ndef virtual_desktop_controller(_, output_file=None, server_url=None):\n # type: (Context, str, str) -> None\n \"\"\"\n virtual desktop controller api spec\n \"\"\"\n from ideadatamodel import OPEN_API_SPEC_ENTRIES_VIRTUAL_DESKTOP\n\n spec_entries = OPEN_API_SPEC_ENTRIES_VIRTUAL_DESKTOP\n\n api_doc_file = os.path.join(idea.props.virtual_desktop_project_dir, 'resources', 'api', 'api_doc.yml')\n\n _build_output(\n module=constants.MODULE_VIRTUAL_DESKTOP_CONTROLLER,\n api_doc_file=api_doc_file,\n spec_entries=spec_entries,\n server_url=server_url,\n output_file=output_file\n )" } ]
import tasks.idea as idea import os import shutil from tasks.tools.build_tool import BuildTool from tasks.apispec import ( cluster_manager as apispec_cluster_manager, virtual_desktop_controller as apispec_virtual_desktop_controller ) from invoke import task, Context
3,258
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # with the License. A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # and limitations under the License. @task def data_model(c): """ build data-model """ BuildTool(c, 'idea-data-model').build() @task def sdk(c): # type: (Context) -> None """ build sdk """ BuildTool(c, 'idea-sdk').build() @task def administrator(c): # type: (Context) -> None """ build administrator """ BuildTool(c, 'idea-administrator').build() @task def cluster_manager(c): # type: (Context) -> None """ build cluster manager """ tool = BuildTool(c, 'idea-cluster-manager') tool.build() apispec_cluster_manager(c, output_file=os.path.join(tool.output_dir, 'resources', 'api', 'openapi.yml')) def dcv_connection_gateway(c): # type: (Context) -> None """ build dcv connection gateway """ tool = BuildTool(c, 'idea-dcv-connection-gateway') output_dir = tool.output_dir shutil.rmtree(output_dir, ignore_errors=True) os.makedirs(output_dir, exist_ok=True) shutil.copytree(idea.props.dcv_connection_gateway_dir, os.path.join(tool.output_dir, 'static_resources')) @task def virtual_desktop_controller(c): # type: (Context) -> None """ build virtual desktop controller """ tool = BuildTool(c, 'idea-virtual-desktop-controller') tool.build()
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # with the License. A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # and limitations under the License. @task def data_model(c): """ build data-model """ BuildTool(c, 'idea-data-model').build() @task def sdk(c): # type: (Context) -> None """ build sdk """ BuildTool(c, 'idea-sdk').build() @task def administrator(c): # type: (Context) -> None """ build administrator """ BuildTool(c, 'idea-administrator').build() @task def cluster_manager(c): # type: (Context) -> None """ build cluster manager """ tool = BuildTool(c, 'idea-cluster-manager') tool.build() apispec_cluster_manager(c, output_file=os.path.join(tool.output_dir, 'resources', 'api', 'openapi.yml')) def dcv_connection_gateway(c): # type: (Context) -> None """ build dcv connection gateway """ tool = BuildTool(c, 'idea-dcv-connection-gateway') output_dir = tool.output_dir shutil.rmtree(output_dir, ignore_errors=True) os.makedirs(output_dir, exist_ok=True) shutil.copytree(idea.props.dcv_connection_gateway_dir, os.path.join(tool.output_dir, 'static_resources')) @task def virtual_desktop_controller(c): # type: (Context) -> None """ build virtual desktop controller """ tool = BuildTool(c, 'idea-virtual-desktop-controller') tool.build()
apispec_virtual_desktop_controller(c, output_file=os.path.join(tool.output_dir, 'resources', 'api', 'openapi.yml'))
0
2023-10-20 17:11:30+00:00
4k
cvlab-yonsei/ACLS
tools/train.py
[ { "identifier": "Trainer", "path": "calibrate/engine/trainer.py", "snippet": "class Trainer:\n def __init__(self, cfg: DictConfig) -> None:\n self.cfg = cfg\n self.work_dir = self.cfg.work_dir\n self.device = torch.device(self.cfg.device)\n self.build_data_loader()\n self.build_model()\n self.build_solver()\n self.build_meter()\n\n def build_data_loader(self) -> None:\n # data pipeline\n self.train_loader, self.val_loader = instantiate(self.cfg.data.object.trainval)\n logger.info(\"Data pipeline initialized\")\n\n def build_model(self) -> None:\n # network\n self.model = instantiate(self.cfg.model.object)\n self.model.to(self.device)\n self.loss_func = instantiate(self.cfg.loss.object)\n self.loss_func.to(self.device)\n logger.info(self.loss_func)\n logger.info(\"Model initialized\")\n\n def build_solver(self) -> None:\n # build solver\n self.optimizer = instantiate(\n self.cfg.optim.object, self.model.parameters()\n )\n self.scheduler = instantiate(\n self.cfg.scheduler.object, self.optimizer\n )\n logger.info(\"Solver initialized\")\n\n def start_or_resume(self):\n if self.cfg.train.resume:\n self.start_epoch, self.best_epoch, self.best_score = (\n load_train_checkpoint(\n self.work_dir, self.device, self.model,\n optimizer=self.optimizer,\n scheduler=self.scheduler\n )\n )\n else:\n self.start_epoch, self.best_epoch, self.best_score = 0, -1, None\n self.max_epoch = self.cfg.train.max_epoch\n\n def build_meter(self):\n self.batch_time_meter = AverageMeter()\n self.data_time_meter = AverageMeter()\n self.num_classes = self.cfg.model.num_classes\n if hasattr(self.loss_func, \"names\"):\n self.loss_meter = LossMeter(\n num_terms=len(self.loss_func.names),\n names=self.loss_func.names\n )\n else:\n self.loss_meter = LossMeter()\n self.evaluator = ClassificationEvaluator(self.num_classes)\n self.calibrate_evaluator = CalibrateEvaluator(\n self.num_classes,\n num_bins=self.cfg.calibrate.num_bins,\n device=self.device,\n )\n self.logits_evaluator = LogitsEvaluator()\n # self.probs_evaluator = ProbsEvaluator(self.num_classes)\n\n def reset_meter(self):\n self.batch_time_meter.reset()\n self.data_time_meter.reset()\n self.loss_meter.reset()\n self.evaluator.reset()\n self.calibrate_evaluator.reset()\n self.logits_evaluator.reset()\n\n def log_iter_info(self, iter, max_iter, epoch, phase=\"Train\"):\n log_dict = {}\n log_dict[\"data_time\"] = self.data_time_meter.val\n log_dict[\"batch_time\"] = self.batch_time_meter.val\n log_dict.update(self.loss_meter.get_vals())\n log_dict.update(self.evaluator.curr_score())\n log_dict.update(self.logits_evaluator.curr_score())\n # log_dict.update(self.probs_evaluator.curr_score())\n logger.info(\"{} Iter[{}/{}][{}]\\t{}\".format(\n phase, iter + 1, max_iter, epoch + 1,\n json.dumps(round_dict(log_dict))\n ))\n\n def log_epoch_info(self, epoch, phase=\"Train\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n log_dict[\"lr\"] = get_lr(self.optimizer)\n log_dict.update(self.loss_meter.get_avgs())\n metric, table_data = self.evaluator.mean_score(print=False)\n log_dict.update(metric)\n log_dict.update(self.logits_evaluator.mean_score())\n # log_dict.update(self.probs_evaluator.mean_score())\n logger.info(\"{} Epoch[{}]\\t{}\".format(\n phase, epoch + 1, json.dumps(round_dict(log_dict))\n ))\n\n def log_eval_epoch_info(self, epoch, phase=\"Val\"):\n log_dict = {}\n log_dict[\"samples\"] = self.evaluator.num_samples()\n log_dict.update(self.loss_meter.get_avgs())\n classify_metric, classify_table_data = self.evaluator.mean_score(print=False)\n log_dict.update(classify_metric)\n calibrate_metric, calibrate_table_data = self.calibrate_evaluator.mean_score(print=False)\n log_dict.update(calibrate_metric)\n log_dict.update(self.logits_evaluator.mean_score())\n # log_dict.update(self.probs_evaluator.mean_score())\n logger.info(\"{} Epoch[{}]\\t{}\".format(\n phase, epoch + 1, json.dumps(round_dict(log_dict))\n ))\n logger.info(\"\\n\" + AsciiTable(classify_table_data).table)\n logger.info(\"\\n\" + AsciiTable(calibrate_table_data).table)\n\n\n def train_epoch(self, epoch: int):\n self.reset_meter()\n self.model.train()\n\n max_iter = len(self.train_loader)\n\n end = time.time()\n for i, (inputs, labels) in enumerate(self.train_loader):\n # compute the time for data loading\n self.data_time_meter.update(time.time() - end)\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n outputs = self.model(inputs)\n loss = self.loss_func(outputs, labels)\n if isinstance(loss, tuple):\n loss_total = loss[0]\n else:\n loss_total = loss\n # backward\n self.optimizer.zero_grad()\n loss_total.backward()\n if self.cfg.train.clip_grad_norm:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 2)\n self.optimizer.step()\n # metric\n self.loss_meter.update(loss, inputs.size(0))\n predicts = F.softmax(outputs, dim=1)\n # pred_labels = torch.argmax(predicts, dim=1)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels)\n )\n self.logits_evaluator.update(to_numpy(outputs))\n # self.probs_evaluator.update(to_numpy(predicts))\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n if (i + 1) % self.cfg.log_period == 0:\n self.log_iter_info(i, max_iter, epoch)\n end = time.time()\n self.log_epoch_info(epoch)\n\n @torch.no_grad()\n def eval_epoch(\n self, data_loader, epoch,\n phase=\"Val\",\n temp=1.0,\n post_temp=False\n ):\n self.reset_meter()\n self.model.eval()\n\n max_iter = len(data_loader)\n end = time.time()\n for i, (inputs, labels) in enumerate(data_loader):\n inputs, labels = inputs.to(self.device), labels.to(self.device)\n # forward\n outputs = self.model(inputs)\n if post_temp:\n outputs = outputs / temp\n loss = self.loss_func(outputs, labels)\n # metric\n self.loss_meter.update(loss)\n self.calibrate_evaluator.update(outputs, labels)\n self.logits_evaluator.update(to_numpy(outputs))\n predicts = F.softmax(outputs, dim=1)\n self.evaluator.update(\n to_numpy(predicts), to_numpy(labels)\n )\n # measure elapsed time\n self.batch_time_meter.update(time.time() - end)\n # logging\n if (i + 1) % self.cfg.log_period == 0:\n self.log_iter_info(i, max_iter, epoch, phase)\n end = time.time()\n self.log_eval_epoch_info(epoch, phase)\n\n return self.loss_meter.avg(0), self.evaluator.mean_score(all_metric=False)[0]\n\n def train(self):\n self.start_or_resume()\n logger.info(\n \"Everything is perfect so far. Let's start training. Good luck!\"\n )\n for epoch in range(self.start_epoch, self.max_epoch):\n logger.info(\"=\" * 20)\n logger.info(\" Start epoch {}\".format(epoch + 1))\n logger.info(\"=\" * 20)\n self.train_epoch(epoch)\n val_loss, val_score = self.eval_epoch(self.val_loader, epoch, phase=\"Val\")\n # run lr scheduler\n self.scheduler.step()\n if self.best_score is None or val_score > self.best_score:\n self.best_score, self.best_epoch = val_score, epoch\n best_checkpoint = True\n else:\n best_checkpoint = False\n save_checkpoint(\n self.work_dir, self.model, self.optimizer, self.scheduler,\n epoch=epoch,\n best_checkpoint=best_checkpoint,\n val_score=val_score,\n keep_checkpoint_num=self.cfg.train.keep_checkpoint_num,\n keep_checkpoint_interval=self.cfg.train.keep_checkpoint_interval\n )\n # logging best performance on val so far\n logger.info(\n \"Epoch[{}]\\tBest {} on Val : {:.4f} at epoch {}\".format(\n epoch + 1, self.evaluator.main_metric(),\n self.best_score, self.best_epoch + 1\n )\n )\n\n def post_temperature(self):\n model_with_temp = ModelWithTemperature(self.model, device=self.device)\n model_with_temp.set_temperature(self.val_loader)\n temp = model_with_temp.get_temperature()\n return temp\n\n def test(self):\n logger.info(\"We are almost done : final testing ...\")\n self.test_loader = instantiate(self.cfg.data.object.test)\n # test best pth\n epoch = self.best_epoch\n logger.info(\"#################\")\n logger.info(\" Test at best epoch {}\".format(epoch + 1))\n logger.info(\"#################\")\n logger.info(\"Best epoch[{}] :\".format(epoch + 1))\n load_checkpoint(\n osp.join(self.work_dir, \"best.pth\"), self.model, self.device\n )\n self.eval_epoch(self.test_loader, epoch, phase=\"Test\")\n temp = self.post_temperature()\n self.eval_epoch(self.test_loader, epoch, phase=\"TestPT\", temp=temp, post_temp=True)\n\n def run(self):\n self.train()\n self.test()" }, { "identifier": "set_random_seed", "path": "calibrate/utils/misc.py", "snippet": "def set_random_seed(seed: int = None, deterministic: bool = False):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" } ]
import os import sys import logging import hydra from omegaconf import DictConfig, OmegaConf from omegaconf.omegaconf import open_dict from calibrate.engine import Trainer from calibrate.utils import set_random_seed
2,739
logger = logging.getLogger(__name__) TRAINERS = { "cv": Trainer } @hydra.main(config_path="../configs", config_name="defaults") def main(cfg: DictConfig): logger.info("Launch command : ") logger.info(" ".join(sys.argv)) with open_dict(cfg): cfg.work_dir = os.getcwd() logger.info("\n" + OmegaConf.to_yaml(cfg))
logger = logging.getLogger(__name__) TRAINERS = { "cv": Trainer } @hydra.main(config_path="../configs", config_name="defaults") def main(cfg: DictConfig): logger.info("Launch command : ") logger.info(" ".join(sys.argv)) with open_dict(cfg): cfg.work_dir = os.getcwd() logger.info("\n" + OmegaConf.to_yaml(cfg))
set_random_seed(
1
2023-10-23 09:55:13+00:00
4k
myshell-ai/AIlice
ailice/core/AProcessor.py
[ { "identifier": "config", "path": "ailice/common/AConfig.py", "snippet": "class AConfig():\n def __init__(self):\n def Initialize(self, needOpenaiGPTKey = False):\n def Load(self, configFile: str) -> dict:\n def Store(self, configFile: str):" }, { "identifier": "llmPool", "path": "ailice/core/llm/ALLMPool.py", "snippet": "class ALLMPool():\n def __init__(self):\n def ParseID(self, id):\n def Init(self, llmIDs: [str]):\n def GetModel(self, modelID: str):" }, { "identifier": "promptsManager", "path": "ailice/common/APrompts.py", "snippet": "class APromptsManager():\n def __init__(self):\n def RegisterPrompt(self, promptClass):\n def __getitem__(self, promptName: str):\n def __iter__(self):" }, { "identifier": "clientPool", "path": "ailice/common/ARemoteAccessors.py", "snippet": "class AClientPool():\n def __init__(self):\n def Init(self):\n def GetClient(self, moduleAddr: str):" }, { "identifier": "AConversations", "path": "ailice/core/AConversation.py", "snippet": "class AConversations():\n def __init__(self):\n self.conversations: list[dict[str,str]] = []\n return\n \n def Add(self, role: str, msg: str):\n self.conversations.append({\"role\": role, \"msg\": msg})\n return\n \n def GetConversations(self, frm=0):\n s = (2*frm) if (frm >= 0) or ('ASSISTANT' == self.conversations[-1]['role']) else (2*frm+1)\n return self.conversations[s:]\n \n def __len__(self):\n return (len(self.conversations)+1) // 2\n \n def ToJson(self) -> str:\n return self.conversations" }, { "identifier": "AInterpreter", "path": "ailice/core/AInterpreter.py", "snippet": "class AInterpreter():\n def __init__(self):\n self.actions = {}#nodeType: {\"func\": func}\n self.patterns = {}#nodeType: [(pattern,isEntry)]\n self.TypeMap = {\"str\": str,\n \"int\": int,\n \"float\": float,\n \"bool\": bool}\n return\n \n def RegisterAction(self, nodeType: str, action: dict):\n if \"signatureExpr\" in action:\n funcName, argPairs, retType = ParseSignatureExpr(action['signatureExpr'])\n parameters = [Parameter(name=argName,\n kind=Parameter.POSITIONAL_OR_KEYWORD,\n annotation=self.TypeMap[argType]) for argName, argType in argPairs]\n retAnnotation = inspect.Signature.empty if \"None\" == retType else self.TypeMap[retType]\n signature = Signature(parameters=parameters, return_annotation=retAnnotation)\n self.actions[nodeType] = {k:v for k,v in action.items()}\n self.actions[nodeType][\"signature\"] = signature\n else:\n signature = inspect.signature(action[\"func\"])\n if not all([param.annotation != inspect.Parameter.empty for param in signature.parameters.values()]):\n print(\"Need annotations in registered function. node type: \", nodeType)\n exit()\n self.actions[nodeType] = {k:v for k,v in action.items()}\n self.actions[nodeType][\"signature\"] = signature\n return\n \n def RegisterPattern(self, nodeType: str, pattern: str, isEntry: bool):\n if nodeType not in self.patterns:\n self.patterns[nodeType] = []\n self.patterns[nodeType].append({\"re\": pattern, \"isEntry\": isEntry})\n return\n \n def EndChecker(self, txt: str) -> bool:\n endPatterns = [p['re'] for nodeType,patterns in self.patterns.items() for p in patterns if HasReturnValue(self.actions[nodeType])]\n return any([bool(re.findall(pattern, txt, re.DOTALL)) for pattern in endPatterns])\n \n def GetEntryPatterns(self) -> dict[str,str]:\n return [(nodeType, p['re']) for nodeType,patterns in self.patterns.items() for p in patterns if p[\"isEntry\"]]\n \n def Parse(self, txt: str) -> tuple[str,dict[str,str]]:\n for nodeType, patterns in self.patterns.items():\n for p in patterns:\n m = re.fullmatch(p['re'], txt, re.DOTALL)\n if m:\n return (nodeType, m.groupdict())\n return (None, None)\n\n def CallWithTextArgs(self, action, txtArgs):\n #print(f\"action: {action}, {txtArgs}\")\n signature = action[\"signature\"]\n if set(txtArgs.keys()) != set(signature.parameters.keys()):\n return \"The function call failed because the arguments did not match. txtArgs.keys(): \" + str(txtArgs.keys()) + \". func params: \" + str(signature.parameters.keys())\n paras = dict()\n for k,v in txtArgs.items():\n v = self.Eval(v)\n if str == signature.parameters[k].annotation:\n paras[k] = str(v.strip('\"\\'')) if (len(v) > 0) and (v[0] == v[-1]) and (v[0] in [\"'\",'\"']) else str(v)\n else:\n paras[k] = signature.parameters[k].annotation(v)\n try:\n ret = action['func'](**paras)\n except Exception as e:\n ret = str(e)\n return ret\n \n def Eval(self, txt: str) -> str:\n nodeType, paras = self.Parse(txt)\n if None == nodeType:\n return txt\n else:\n r = self.CallWithTextArgs(self.actions[nodeType], paras)\n return r if r is not None else \"\"\n \n\n def ParseEntries(self, txt_input: str) -> list[str]:\n matches = []\n for nodeType, pattern in self.GetEntryPatterns():\n for match in re.finditer(pattern, txt_input, re.DOTALL):\n matches.append(match)\n \n ret = []\n #Here we assume that a match will not appear multiple times in matches. This is reasonable.\n for match in matches:\n isSubstring = any(\n (m.start() <= match.start()) and (m.end() >= match.end()) and (m is not match)\n for m in matches\n )\n if not isSubstring:\n ret.append(match.group(0))\n return ret\n\n def EvalEntries(self, txt: str) -> str:\n scripts = self.ParseEntries(txt)\n resp = \"\"\n for script in scripts:\n r = self.Eval(script)\n if \"\" != r:\n resp += (r + \"\\n\")\n return resp" } ]
import time from functools import partial from ailice.common.AConfig import config from ailice.core.llm.ALLMPool import llmPool from ailice.common.APrompts import promptsManager from ailice.common.ARemoteAccessors import clientPool from ailice.core.AConversation import AConversations from ailice.core.AInterpreter import AInterpreter
1,666
class AProcessor(): def __init__(self, name, modelID, promptName, outputCB, collection = None): self.name = name self.modelID = modelID
class AProcessor(): def __init__(self, name, modelID, promptName, outputCB, collection = None): self.name = name self.modelID = modelID
self.llm = llmPool.GetModel(modelID)
1
2023-10-16 01:51:14+00:00
4k
Agora-X/Bing-Chat-API
src/bing_chat/chathub.py
[ { "identifier": "DELIMITER", "path": "src/bing_chat/constants.py", "snippet": "DELIMITER = \"\\x1e\"" }, { "identifier": "HEADERS", "path": "src/bing_chat/constants.py", "snippet": "HEADERS = {\n \"accept\": \"application/json\",\n \"accept-language\": \"en-US;q=0.9\",\n \"accept-encoding\": \"gzip, deflate, br, zsdch\",\n \"content-type\": \"application/json\",\n \"sec-ch-ua\": '\"Microsoft Edge\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"',\n \"sec-ch-ua-arch\": '\"x86\"',\n \"sec-ch-ua-bitness\": '\"64\"',\n \"sec-ch-ua-full-version\": '\"117.0.2045.47\"',\n \"sec-ch-ua-full-version-list\": '\"Microsoft Edge\";v=\"117.0.2045.47\", \"Not;A=Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"117.0.5938.132\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-model\": \"\",\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-ch-ua-platform-version\": '\"15.0.0\"',\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-ms-gec-version\": \"1-117.0.2045.47\",\n \"x-ms-client-request-id\": str(uuid.uuid4()),\n \"x-ms-useragent\": \"azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.3 OS/Windows\",\n \"Referer\": \"https://www.bing.com/search?\",\n \"Referrer-Policy\": \"origin-when-cross-origin\",\n \"x-forwarded-for\": FORWARDED_IP,\n}" }, { "identifier": "HEADERS_INIT_CONVER", "path": "src/bing_chat/constants.py", "snippet": "HEADERS_INIT_CONVER = {\n \"authority\": \"www.bing.com\",\n \"accept\": \"application/json\",\n \"accept-language\": \"en-US;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"sec-ch-ua\": '\"Microsoft Edge\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"',\n \"sec-ch-ua-arch\": '\"x86\"',\n \"sec-ch-ua-bitness\": '\"64\"',\n \"sec-ch-ua-full-version\": '\"117.0.2045.47\"',\n \"sec-ch-ua-full-version-list\": '\"Microsoft Edge\";v=\"117.0.2045.47\", \"Not;A=Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"117.0.5938.132\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-model\": '\"\"',\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-ch-ua-platform-version\": '\"15.0.0\"',\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47\",\n \"x-edge-shopping-flag\": \"1\",\n \"x-forwarded-for\": FORWARDED_IP,\n}" }, { "identifier": "Conversation", "path": "src/bing_chat/conversation.py", "snippet": "class Conversation:\n def __init__(\n self,\n proxy: Union[str, None] = None,\n async_mode: bool = False,\n cookies: Union[List[dict], None] = None,\n ) -> None:\n if async_mode:\n return\n self.struct: dict = {\n \"conversationId\": None,\n \"clientId\": None,\n \"conversationSignature\": None,\n \"result\": {\"value\": \"Success\", \"message\": None},\n }\n self.proxy = proxy\n proxy = (\n proxy\n or os.environ.get(\"all_proxy\")\n or os.environ.get(\"ALL_PROXY\")\n or os.environ.get(\"https_proxy\")\n or os.environ.get(\"HTTPS_PROXY\")\n or None\n )\n if proxy is not None and proxy.startswith(\"socks5h://\"):\n proxy = \"socks5://\" + proxy[len(\"socks5h://\") :]\n self.session = httpx.Client(\n proxies=proxy,\n timeout=900,\n headers=HEADERS_INIT_CONVER,\n )\n if cookies:\n for cookie in cookies:\n self.session.cookies.set(cookie[\"name\"], cookie[\"value\"])\n # Send GET request\n response = self.session.get(\n url=os.environ.get(\"BING_PROXY_URL\")\n or \"https://edgeservices.bing.com/edgesvc/turing/conversation/create\",\n )\n if response.status_code != 200:\n print(f\"Status code: {response.status_code}\")\n print(response.text)\n print(response.url)\n raise Exception(\"Authentication failed\")\n try:\n self.struct = response.json()\n if self.struct.get(\"conversationSignature\") is None:\n self.struct[\"conversationSignature\"] = response.headers[\"X-Sydney-Conversationsignature\"]\n self.struct[\"encryptedConversationSignature\"] = response.headers[\"X-Sydney-Encryptedconversationsignature\"]\n except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:\n raise Exception(\n \"Authentication failed. You have not been accepted into the beta.\",\n ) from exc\n if self.struct[\"result\"][\"value\"] == \"UnauthorizedRequest\":\n raise NotAllowedToAccess(self.struct[\"result\"][\"message\"])\n\n @staticmethod\n async def create(\n proxy: Union[str, None] = None,\n cookies: Union[List[dict], None] = None,\n ) -> \"Conversation\":\n self = Conversation(async_mode=True)\n self.struct = {\n \"conversationId\": None,\n \"clientId\": None,\n \"conversationSignature\": None,\n \"result\": {\"value\": \"Success\", \"message\": None},\n }\n self.proxy = proxy\n proxy = (\n proxy\n or os.environ.get(\"all_proxy\")\n or os.environ.get(\"ALL_PROXY\")\n or os.environ.get(\"https_proxy\")\n or os.environ.get(\"HTTPS_PROXY\")\n or None\n )\n if proxy is not None and proxy.startswith(\"socks5h://\"):\n proxy = \"socks5://\" + proxy[len(\"socks5h://\") :]\n transport = httpx.AsyncHTTPTransport(retries=900)\n # Convert cookie format to httpx format\n formatted_cookies = None\n if cookies:\n formatted_cookies = httpx.Cookies()\n for cookie in cookies:\n formatted_cookies.set(cookie[\"name\"], cookie[\"value\"])\n async with httpx.AsyncClient(\n proxies=proxy,\n timeout=30,\n headers=HEADERS_INIT_CONVER,\n transport=transport,\n cookies=formatted_cookies,\n ) as client:\n # Send GET request\n response = await client.get(\n url=os.environ.get(\"BING_PROXY_URL\")\n or \"https://www.bing.com/turing/conversation/create\",\n follow_redirects=True,\n )\n if response.status_code != 200:\n print(f\"Status code: {response.status_code}\")\n print(response.text)\n print(response.url)\n raise Exception(\"Authentication failed\")\n try:\n self.struct = response.json()\n if self.struct.get(\"conversationSignature\") is None:\n self.struct[\"conversationSignature\"] = response.headers[\"X-Sydney-Conversationsignature\"]\n self.struct[\"encryptedConversationSignature\"] = response.headers[\"X-Sydney-Encryptedconversationsignature\"]\n except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:\n print(response.text)\n raise Exception(\n \"Authentication failed. You have not been accepted into the beta.\",\n ) from exc\n if self.struct[\"result\"][\"value\"] == \"UnauthorizedRequest\":\n raise NotAllowedToAccess(self.struct[\"result\"][\"message\"])\n return self" }, { "identifier": "CONVERSATION_STYLE_TYPE", "path": "src/bing_chat/conversation_style.py", "snippet": "CONVERSATION_STYLE_TYPE = Optional[\n Union[ConversationStyle, Literal[\"creative\", \"balanced\", \"precise\"]]\n]" }, { "identifier": "ChatHubRequest", "path": "src/bing_chat/request.py", "snippet": "class ChatHubRequest:\n def __init__(\n self,\n conversation_signature: str,\n encrypted_conversation_signature: str,\n client_id: str,\n conversation_id: str,\n invocation_id: int = 3,\n ) -> None:\n self.struct: dict = {}\n\n self.client_id: str = client_id\n self.conversation_id: str = conversation_id\n self.conversation_signature: str = conversation_signature\n self.encrypted_conversation_signature: str = encrypted_conversation_signature\n self.invocation_id: int = invocation_id\n\n def update(\n self,\n prompt: str,\n conversation_style: CONVERSATION_STYLE_TYPE,\n webpage_context: Union[str, None] = None,\n search_result: bool = False,\n locale: str = guess_locale(),\n ) -> None:\n options = [\n \"deepleo\",\n \"enable_debug_commands\",\n \"disable_emoji_spoken_text\",\n \"enablemm\",\n ]\n if conversation_style:\n if not isinstance(conversation_style, ConversationStyle):\n conversation_style = getattr(ConversationStyle, conversation_style)\n options = conversation_style.value\n message_id = str(uuid.uuid4())\n # Get the current local time\n now_local = datetime.now()\n\n # Get the current UTC time\n now_utc = datetime.utcnow()\n\n # Calculate the time difference between local and UTC time\n timezone_offset = now_local - now_utc\n\n # Get the offset in hours and minutes\n offset_hours = int(timezone_offset.total_seconds() // 3600)\n offset_minutes = int((timezone_offset.total_seconds() % 3600) // 60)\n\n # Format the offset as a string\n offset_string = f\"{offset_hours:+03d}:{offset_minutes:02d}\"\n\n # Get current time\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\") + offset_string\n self.struct = {\n \"arguments\": [\n {\n \"source\": \"cib\",\n \"optionsSets\": options,\n \"allowedMessageTypes\": [\n \"ActionRequest\",\n \"Chat\",\n \"Context\",\n \"InternalSearchQuery\",\n \"InternalSearchResult\",\n \"Disengaged\",\n \"InternalLoaderMessage\",\n \"Progress\",\n \"RenderCardRequest\",\n \"AdsQuery\",\n \"SemanticSerp\",\n \"GenerateContentQuery\",\n \"SearchQuery\",\n ],\n \"sliceIds\": [\n \"winmuid1tf\",\n \"styleoff\",\n \"ccadesk\",\n \"smsrpsuppv4cf\",\n \"ssrrcache\",\n \"contansperf\",\n \"crchatrev\",\n \"winstmsg2tf\",\n \"creatgoglt\",\n \"creatorv2t\",\n \"sydconfigoptt\",\n \"adssqovroff\",\n \"530pstho\",\n \"517opinion\",\n \"418dhlth\",\n \"512sprtic1s0\",\n \"emsgpr\",\n \"525ptrcps0\",\n \"529rweas0\",\n \"515oscfing2s0\",\n \"524vidansgs0\",\n ],\n \"verbosity\": \"verbose\",\n \"traceId\": get_ran_hex(32),\n \"isStartOfSession\": self.invocation_id == 3,\n \"message\": {\n \"locale\": locale,\n \"market\": locale,\n \"region\": locale[-2:], # en-US -> US\n \"locationHints\": get_location_hint_from_locale(locale),\n \"timestamp\": timestamp,\n \"author\": \"user\",\n \"inputMethod\": \"Keyboard\",\n \"text\": prompt,\n \"messageType\": \"Chat\",\n \"messageId\": message_id,\n \"requestId\": message_id,\n },\n \"tone\": conversation_style.name.capitalize(), # Make first letter uppercase\n \"requestId\": message_id,\n \"conversationSignature\": self.conversation_signature,\n \"encryptedConversationSignature\": self.encrypted_conversation_signature,\n \"participant\": {\n \"id\": self.client_id,\n },\n \"conversationId\": self.conversation_id,\n },\n ],\n \"invocationId\": str(self.invocation_id),\n \"target\": \"chat\",\n \"type\": 4,\n }\n if search_result:\n have_search_result = [\n \"InternalSearchQuery\",\n \"InternalSearchResult\",\n \"InternalLoaderMessage\",\n \"RenderCardRequest\",\n ]\n self.struct[\"arguments\"][0][\"allowedMessageTypes\"] += have_search_result\n if webpage_context:\n self.struct[\"arguments\"][0][\"previousMessages\"] = [\n {\n \"author\": \"user\",\n \"description\": webpage_context,\n \"contextType\": \"WebPage\",\n \"messageType\": \"Context\",\n \"messageId\": \"discover-web--page-ping-mriduna-----\",\n },\n ]\n self.invocation_id += 1\n\n # print(timestamp)" }, { "identifier": "append_identifier", "path": "src/bing_chat/utilities.py", "snippet": "def append_identifier(msg: dict) -> str:\n # Convert dict to json string\n return json.dumps(msg, ensure_ascii=False) + DELIMITER" }, { "identifier": "get_ran_hex", "path": "src/bing_chat/utilities.py", "snippet": "def get_ran_hex(length: int = 32) -> str:\n return \"\".join(random.choice(\"0123456789abcdef\") for _ in range(length))" }, { "identifier": "guess_locale", "path": "src/bing_chat/utilities.py", "snippet": "def guess_locale() -> str:\n if sys.platform.startswith(\"win\"):\n return \"en-us\"\n loc, _ = locale.getlocale()\n return loc.replace(\"_\", \"-\") if loc else \"en-us\"" } ]
import asyncio import json import os import ssl import sys import aiohttp import certifi import httpx import urllib.parse from time import time from typing import Generator from typing import List from typing import Union from BingImageCreator import ImageGenAsync from .constants import DELIMITER from .constants import HEADERS from .constants import HEADERS_INIT_CONVER from .conversation import Conversation from .conversation_style import CONVERSATION_STYLE_TYPE from .request import ChatHubRequest from .utilities import append_identifier from .utilities import get_ran_hex from .utilities import guess_locale
3,459
ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) class ChatHub: def __init__( self,
ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) class ChatHub: def __init__( self,
conversation: Conversation,
3
2023-10-19 19:17:05+00:00
4k
city96/ComfyUI_ExtraModels
PixArt/models/PixArt.py
[ { "identifier": "auto_grad_checkpoint", "path": "PixArt/models/utils.py", "snippet": "def _ntuple(n):\n def parse(x):\ndef set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1):\n def set_attr(module):\ndef auto_grad_checkpoint(module, *args, **kwargs):\ndef checkpoint_sequential(functions, step, input, *args, **kwargs):\n def run_function(start, end, functions):\n def forward(input):\ndef get_rel_pos(q_size, k_size, rel_pos):\ndef add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):" }, { "identifier": "t2i_modulate", "path": "PixArt/models/PixArt_blocks.py", "snippet": "def t2i_modulate(x, shift, scale):\n return x * (1 + scale) + shift" }, { "identifier": "CaptionEmbedder", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class CaptionEmbedder(nn.Module):\n \"\"\"\n Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.\n \"\"\"\n\n def __init__(self, in_channels, hidden_size, uncond_prob, act_layer=nn.GELU(approximate='tanh'), token_num=120):\n super().__init__()\n self.y_proj = Mlp(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size, act_layer=act_layer, drop=0)\n self.register_buffer(\"y_embedding\", nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5))\n self.uncond_prob = uncond_prob\n\n def token_drop(self, caption, force_drop_ids=None):\n \"\"\"\n Drops labels to enable classifier-free guidance.\n \"\"\"\n if force_drop_ids is None:\n drop_ids = torch.rand(caption.shape[0]).cuda() < self.uncond_prob\n else:\n drop_ids = force_drop_ids == 1\n caption = torch.where(drop_ids[:, None, None, None], self.y_embedding, caption)\n return caption\n\n def forward(self, caption, train, force_drop_ids=None):\n if train:\n assert caption.shape[2:] == self.y_embedding.shape\n use_dropout = self.uncond_prob > 0\n if (train and use_dropout) or (force_drop_ids is not None):\n caption = self.token_drop(caption, force_drop_ids)\n caption = self.y_proj(caption)\n return caption" }, { "identifier": "WindowAttention", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class WindowAttention(Attention_):\n \"\"\"Multi-head Attention block with relative position embeddings.\"\"\"\n\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n use_rel_pos=False,\n rel_pos_zero_init=True,\n input_size=None,\n **block_kwargs,\n ):\n \"\"\"\n Args:\n dim (int): Number of input channels.\n num_heads (int): Number of attention heads.\n qkv_bias (bool: If True, add a learnable bias to query, key, value.\n rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n input_size (int or None): Input resolution for calculating the relative positional\n parameter size.\n \"\"\"\n super().__init__(dim, num_heads=num_heads, qkv_bias=qkv_bias, **block_kwargs)\n\n self.use_rel_pos = use_rel_pos\n if self.use_rel_pos:\n # initialize relative positional embeddings\n self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, self.head_dim))\n self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, self.head_dim))\n\n if not rel_pos_zero_init:\n nn.init.trunc_normal_(self.rel_pos_h, std=0.02)\n nn.init.trunc_normal_(self.rel_pos_w, std=0.02)\n\n def forward(self, x, mask=None):\n B, N, C = x.shape # 2 4096 1152\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)\n\n if model_management.xformers_enabled():\n q, k, v = qkv.unbind(2)\n\n if getattr(self, 'fp32_attention', False):\n q, k, v = q.float(), k.float(), v.float()\n\n attn_bias = None\n if mask is not None:\n attn_bias = torch.zeros([B * self.num_heads, q.shape[1], k.shape[1]], dtype=q.dtype, device=q.device)\n attn_bias.masked_fill_(mask.squeeze(1).repeat(self.num_heads, 1, 1) == 0, float('-inf'))\n # Switch between torch / xformers attention\n x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias)\n x = x.view(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n else:\n q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0)\n\n q = q * self.scale\n attn = q @ k.transpose(-2, -1)\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n x = attn @ v\n\n x = x.transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x" }, { "identifier": "MultiHeadCrossAttention", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class MultiHeadCrossAttention(nn.Module):\n def __init__(self, d_model, num_heads, attn_drop=0., proj_drop=0., **block_kwargs):\n super(MultiHeadCrossAttention, self).__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n\n self.d_model = d_model\n self.num_heads = num_heads\n self.head_dim = d_model // num_heads\n\n self.q_linear = nn.Linear(d_model, d_model)\n self.kv_linear = nn.Linear(d_model, d_model*2)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(d_model, d_model)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, cond, mask=None):\n # query/value: img tokens; key: condition; mask: if padding tokens\n B, N, C = x.shape\n\n if model_management.xformers_enabled():\n q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim)\n kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim)\n k, v = kv.unbind(2)\n attn_bias = None\n if mask is not None:\n attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens([N] * B, mask)\n x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias)\n x = x.view(B, -1, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n else:\n global competent_attention_implementation\n if not competent_attention_implementation:\n print(\"\"\"\\nYou should REALLY consider installing/enabling xformers.\\nAlternatively, open up ExtraModels/PixArt/models/PixArt_blocks.py and\\n- Fix the attention map on line 77 if you know how to\\n- Add scaled_dot_product_attention on line 150\\n- Send a PR and remove this message on line 32/66-69\\n\"\"\")\n competent_attention_implementation = True\n\n q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim)\n kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim)\n k, v = kv.unbind(2)\n q, k, v = map(lambda t: t.permute(0, 2, 1, 3),(q, k, v),)\n \n attn_mask = None\n if mask is not None and len(mask) > 1:\n # This is probably wrong\n attn_mask = torch.zeros(\n [1, q.shape[1], q.shape[2], v.shape[2]],\n dtype=q.dtype,\n device=q.device\n )\n attn_mask[:, :, (q.shape[2]//2):, mask[0]:] = True\n attn_mask[:, :, :(q.shape[2]//2), :mask[1]] = True\n\n x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p)\n x = x.permute(0, 2, 1, 3).contiguous()\n x = x.view(B, -1, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x" }, { "identifier": "T2IFinalLayer", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class T2IFinalLayer(nn.Module):\n \"\"\"\n The final layer of PixArt.\n \"\"\"\n\n def __init__(self, hidden_size, patch_size, out_channels):\n super().__init__()\n self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)\n self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)\n self.scale_shift_table = nn.Parameter(torch.randn(2, hidden_size) / hidden_size ** 0.5)\n self.out_channels = out_channels\n\n def forward(self, x, t):\n shift, scale = (self.scale_shift_table[None] + t[:, None]).chunk(2, dim=1)\n x = t2i_modulate(self.norm_final(x), shift, scale)\n x = self.linear(x)\n return x" }, { "identifier": "TimestepEmbedder", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class TimestepEmbedder(nn.Module):\n \"\"\"\n Embeds scalar timesteps into vector representations.\n \"\"\"\n\n def __init__(self, hidden_size, frequency_embedding_size=256):\n super().__init__()\n self.mlp = nn.Sequential(\n nn.Linear(frequency_embedding_size, hidden_size, bias=True),\n nn.SiLU(),\n nn.Linear(hidden_size, hidden_size, bias=True),\n )\n self.frequency_embedding_size = frequency_embedding_size\n\n @staticmethod\n def timestep_embedding(t, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param t: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an (N, D) Tensor of positional embeddings.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half)\n args = t[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n return embedding\n\n def forward(self, t):\n t_freq = self.timestep_embedding(t, self.frequency_embedding_size)\n t_emb = self.mlp(t_freq.to(t.dtype))\n return t_emb" }, { "identifier": "LabelEmbedder", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class LabelEmbedder(nn.Module):\n \"\"\"\n Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.\n \"\"\"\n\n def __init__(self, num_classes, hidden_size, dropout_prob):\n super().__init__()\n use_cfg_embedding = dropout_prob > 0\n self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)\n self.num_classes = num_classes\n self.dropout_prob = dropout_prob\n\n def token_drop(self, labels, force_drop_ids=None):\n \"\"\"\n Drops labels to enable classifier-free guidance.\n \"\"\"\n if force_drop_ids is None:\n drop_ids = torch.rand(labels.shape[0]).cuda() < self.dropout_prob\n else:\n drop_ids = force_drop_ids == 1\n labels = torch.where(drop_ids, self.num_classes, labels)\n return labels\n\n def forward(self, labels, train, force_drop_ids=None):\n use_dropout = self.dropout_prob > 0\n if (train and use_dropout) or (force_drop_ids is not None):\n labels = self.token_drop(labels, force_drop_ids)\n embeddings = self.embedding_table(labels)\n return embeddings" }, { "identifier": "FinalLayer", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class FinalLayer(nn.Module):\n \"\"\"\n The final layer of PixArt.\n \"\"\"\n\n def __init__(self, hidden_size, patch_size, out_channels):\n super().__init__()\n self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)\n self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)\n self.adaLN_modulation = nn.Sequential(\n nn.SiLU(),\n nn.Linear(hidden_size, 2 * hidden_size, bias=True)\n )\n\n def forward(self, x, c):\n shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)\n x = modulate(self.norm_final(x), shift, scale)\n x = self.linear(x)\n return x" } ]
import math import torch import torch.nn as nn import os import numpy as np from timm.models.layers import DropPath from timm.models.vision_transformer import PatchEmbed, Mlp from .utils import auto_grad_checkpoint, to_2tuple from .PixArt_blocks import t2i_modulate, CaptionEmbedder, WindowAttention, MultiHeadCrossAttention, T2IFinalLayer, TimestepEmbedder, LabelEmbedder, FinalLayer
3,572
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # GLIDE: https://github.com/openai/glide-text2im # MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py # -------------------------------------------------------- class PixArtBlock(nn.Module): """ A PixArt block with adaptive layer norm (adaLN-single) conditioning. """ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs): super().__init__() self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # GLIDE: https://github.com/openai/glide-text2im # MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py # -------------------------------------------------------- class PixArtBlock(nn.Module): """ A PixArt block with adaptive layer norm (adaLN-single) conditioning. """ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs): super().__init__() self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True,
3
2023-10-20 21:19:44+00:00
4k
aszc-dev/ComfyUI-CoreMLSuite
coreml_suite/models.py
[ { "identifier": "get_model_config", "path": "coreml_suite/config.py", "snippet": "def get_model_config(model_version: ModelVersion):\n unet_config = convert_config(config_map[model_version])\n config = supported_models_base.BASE(unet_config)\n config.latent_format = latent_format_map[model_version]()\n return config" }, { "identifier": "ModelVersion", "path": "coreml_suite/config.py", "snippet": "class ModelVersion(Enum):\n SD15 = \"sd15\"\n SDXL = \"sdxl\"\n SDXL_REFINER = \"sdxl_refiner\"\n LCM = \"lcm\"" }, { "identifier": "extract_residual_kwargs", "path": "coreml_suite/controlnet.py", "snippet": "def extract_residual_kwargs(expected_inputs, control):\n if \"additional_residual_0\" not in expected_inputs.keys():\n return {}\n if control is None:\n return no_control(expected_inputs)\n\n residual_kwargs = {\n \"additional_residual_{}\".format(i): r.cpu().numpy().astype(np.float16)\n for i, r in enumerate(chain(control[\"output\"], control[\"middle\"]))\n }\n return residual_kwargs" }, { "identifier": "chunk_control", "path": "coreml_suite/controlnet.py", "snippet": "def chunk_control(cn, target_size):\n if cn is None:\n return [None] * target_size\n\n num_chunks = ceil(cn[\"output\"][0].shape[0] / target_size)\n\n out = [{\"output\": [], \"middle\": []} for _ in range(num_chunks)]\n\n for k, v in cn.items():\n for i, x in enumerate(v):\n chunks = chunk_batch(x, (target_size, *x.shape[1:]))\n for j, chunk in enumerate(chunks):\n out[j][k].append(chunk)\n\n return out" }, { "identifier": "chunk_batch", "path": "coreml_suite/latents.py", "snippet": "def chunk_batch(input_tensor, target_shape):\n if input_tensor.shape == target_shape:\n return [input_tensor]\n\n batch_size = input_tensor.shape[0]\n target_batch_size = target_shape[0]\n\n num_chunks = batch_size // target_batch_size\n if num_chunks == 0:\n padding = torch.zeros(target_batch_size - batch_size, *target_shape[1:]).to(\n input_tensor.device\n )\n return [torch.cat((input_tensor, padding), dim=0)]\n\n mod = batch_size % target_batch_size\n if mod != 0:\n chunks = list(torch.chunk(input_tensor[:-mod], num_chunks))\n padding = torch.zeros(target_batch_size - mod, *target_shape[1:]).to(\n input_tensor.device\n )\n padded = torch.cat((input_tensor[-mod:], padding), dim=0)\n chunks.append(padded)\n return chunks\n\n chunks = list(torch.chunk(input_tensor, num_chunks))\n return chunks" }, { "identifier": "merge_chunks", "path": "coreml_suite/latents.py", "snippet": "def merge_chunks(chunks, orig_shape):\n merged = torch.cat(chunks, dim=0)\n if merged.shape == orig_shape:\n return merged\n return merged[: orig_shape[0]]" }, { "identifier": "is_lcm", "path": "coreml_suite/lcm/utils.py", "snippet": "def is_lcm(coreml_model):\n return \"timestep_cond\" in coreml_model.expected_inputs" }, { "identifier": "logger", "path": "coreml_suite/logger.py", "snippet": "" } ]
import numpy as np import torch from comfy import model_base from comfy.model_management import get_torch_device from comfy.model_patcher import ModelPatcher from coreml_suite.config import get_model_config, ModelVersion from coreml_suite.controlnet import extract_residual_kwargs, chunk_control from coreml_suite.latents import chunk_batch, merge_chunks from coreml_suite.lcm.utils import is_lcm from coreml_suite.logger import logger
1,666
class CoreMLModelWrapper: def __init__(self, coreml_model): self.coreml_model = coreml_model self.dtype = torch.float16 def __call__(self, x, t, context, control, transformer_options=None, **kwargs): inputs = CoreMLInputs(x, t, context, control, **kwargs) input_list = inputs.chunks(self.expected_inputs) chunked_out = [ self.get_torch_outputs( self.coreml_model(**input_kwargs.coreml_kwargs(self.expected_inputs)), x.device, ) for input_kwargs in input_list ] merged_out = merge_chunks(chunked_out, x.shape) return merged_out @staticmethod def get_torch_outputs(model_output, device): return torch.from_numpy(model_output["noise_pred"]).to(device) @property def expected_inputs(self): return self.coreml_model.expected_inputs @property def is_lcm(self): return is_lcm(self.coreml_model) @property def is_sdxl_base(self): return is_sdxl_base(self.coreml_model) @property def is_sdxl_refiner(self): return is_sdxl_refiner(self.coreml_model) @property def config(self): if self.is_sdxl_base: return get_model_config(ModelVersion.SDXL) if self.is_sdxl_refiner: return get_model_config(ModelVersion.SDXL_REFINER) return get_model_config(ModelVersion.SD15) class CoreMLModelWrapperLCM(CoreMLModelWrapper): def __init__(self, coreml_model): super().__init__(coreml_model) self.config = None class CoreMLInputs: def __init__(self, x, t, context, control, **kwargs): self.x = x self.t = t self.context = context self.control = control self.time_ids = kwargs.get("time_ids") self.text_embeds = kwargs.get("text_embeds") self.ts_cond = kwargs.get("timestep_cond") def coreml_kwargs(self, expected_inputs): sample = self.x.cpu().numpy().astype(np.float16) context = self.context.cpu().numpy().astype(np.float16) context = context.transpose(0, 2, 1)[:, :, None, :] t = self.t.cpu().numpy().astype(np.float16) model_input_kwargs = { "sample": sample, "encoder_hidden_states": context, "timestep": t, } residual_kwargs = extract_residual_kwargs(expected_inputs, self.control) model_input_kwargs |= residual_kwargs # LCM if self.ts_cond is not None: model_input_kwargs["timestep_cond"] = ( self.ts_cond.cpu().numpy().astype(np.float16) ) # SDXL if "text_embeds" in expected_inputs: model_input_kwargs["text_embeds"] = ( self.text_embeds.cpu().numpy().astype(np.float16) ) if "time_ids" in expected_inputs: model_input_kwargs["time_ids"] = ( self.time_ids.cpu().numpy().astype(np.float16) ) return model_input_kwargs def chunks(self, expected_inputs): sample_shape = expected_inputs["sample"]["shape"] timestep_shape = expected_inputs["timestep"]["shape"] hidden_shape = expected_inputs["encoder_hidden_states"]["shape"] context_shape = (hidden_shape[0], hidden_shape[3], hidden_shape[1]) chunked_x = chunk_batch(self.x, sample_shape) ts = list(torch.full((len(chunked_x), timestep_shape[0]), self.t[0])) chunked_context = chunk_batch(self.context, context_shape) chunked_control = [None] * len(chunked_x) if self.control is not None:
class CoreMLModelWrapper: def __init__(self, coreml_model): self.coreml_model = coreml_model self.dtype = torch.float16 def __call__(self, x, t, context, control, transformer_options=None, **kwargs): inputs = CoreMLInputs(x, t, context, control, **kwargs) input_list = inputs.chunks(self.expected_inputs) chunked_out = [ self.get_torch_outputs( self.coreml_model(**input_kwargs.coreml_kwargs(self.expected_inputs)), x.device, ) for input_kwargs in input_list ] merged_out = merge_chunks(chunked_out, x.shape) return merged_out @staticmethod def get_torch_outputs(model_output, device): return torch.from_numpy(model_output["noise_pred"]).to(device) @property def expected_inputs(self): return self.coreml_model.expected_inputs @property def is_lcm(self): return is_lcm(self.coreml_model) @property def is_sdxl_base(self): return is_sdxl_base(self.coreml_model) @property def is_sdxl_refiner(self): return is_sdxl_refiner(self.coreml_model) @property def config(self): if self.is_sdxl_base: return get_model_config(ModelVersion.SDXL) if self.is_sdxl_refiner: return get_model_config(ModelVersion.SDXL_REFINER) return get_model_config(ModelVersion.SD15) class CoreMLModelWrapperLCM(CoreMLModelWrapper): def __init__(self, coreml_model): super().__init__(coreml_model) self.config = None class CoreMLInputs: def __init__(self, x, t, context, control, **kwargs): self.x = x self.t = t self.context = context self.control = control self.time_ids = kwargs.get("time_ids") self.text_embeds = kwargs.get("text_embeds") self.ts_cond = kwargs.get("timestep_cond") def coreml_kwargs(self, expected_inputs): sample = self.x.cpu().numpy().astype(np.float16) context = self.context.cpu().numpy().astype(np.float16) context = context.transpose(0, 2, 1)[:, :, None, :] t = self.t.cpu().numpy().astype(np.float16) model_input_kwargs = { "sample": sample, "encoder_hidden_states": context, "timestep": t, } residual_kwargs = extract_residual_kwargs(expected_inputs, self.control) model_input_kwargs |= residual_kwargs # LCM if self.ts_cond is not None: model_input_kwargs["timestep_cond"] = ( self.ts_cond.cpu().numpy().astype(np.float16) ) # SDXL if "text_embeds" in expected_inputs: model_input_kwargs["text_embeds"] = ( self.text_embeds.cpu().numpy().astype(np.float16) ) if "time_ids" in expected_inputs: model_input_kwargs["time_ids"] = ( self.time_ids.cpu().numpy().astype(np.float16) ) return model_input_kwargs def chunks(self, expected_inputs): sample_shape = expected_inputs["sample"]["shape"] timestep_shape = expected_inputs["timestep"]["shape"] hidden_shape = expected_inputs["encoder_hidden_states"]["shape"] context_shape = (hidden_shape[0], hidden_shape[3], hidden_shape[1]) chunked_x = chunk_batch(self.x, sample_shape) ts = list(torch.full((len(chunked_x), timestep_shape[0]), self.t[0])) chunked_context = chunk_batch(self.context, context_shape) chunked_control = [None] * len(chunked_x) if self.control is not None:
chunked_control = chunk_control(self.control, sample_shape[0])
3
2023-10-23 13:08:00+00:00
4k
aikunyi/FreTS
exp/exp_main.py
[ { "identifier": "data_provider", "path": "data_provider/data_factory.py", "snippet": "def data_provider(args, flag):\n Data = data_dict[args.data]\n timeenc = 0 if args.embed != 'timeF' else 1\n train_only = args.train_only\n\n if flag == 'test':\n shuffle_flag = False\n drop_last = True\n batch_size = args.batch_size\n freq = args.freq\n elif flag == 'pred':\n shuffle_flag = False\n drop_last = False\n batch_size = 1\n freq = args.freq\n Data = Dataset_Pred\n else:\n shuffle_flag = True\n drop_last = True\n batch_size = args.batch_size\n freq = args.freq\n\n data_set = Data(\n root_path=args.root_path,\n data_path=args.data_path,\n flag=flag,\n size=[args.seq_len, args.label_len, args.pred_len],\n features=args.features,\n target=args.target,\n timeenc=timeenc,\n freq=freq,\n train_only=train_only\n )\n print(flag, len(data_set))\n data_loader = DataLoader(\n data_set,\n batch_size=batch_size,\n shuffle=shuffle_flag,\n num_workers=args.num_workers,\n drop_last=drop_last)\n return data_set, data_loader" }, { "identifier": "Exp_Basic", "path": "exp/exp_basic.py", "snippet": "class Exp_Basic(object):\n def __init__(self, args):\n self.args = args\n self.device = self._acquire_device()\n self.model = self._build_model().to(self.device)\n\n def _build_model(self):\n raise NotImplementedError\n return None\n\n def _acquire_device(self):\n if self.args.use_gpu:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(\n self.args.gpu) if not self.args.use_multi_gpu else self.args.devices\n device = torch.device('cuda:{}'.format(self.args.gpu))\n print('Use GPU: cuda:{}'.format(self.args.gpu))\n else:\n device = torch.device('cpu')\n print('Use CPU')\n return device\n\n def _get_data(self):\n pass\n\n def vali(self):\n pass\n\n def train(self):\n pass\n\n def test(self):\n pass" }, { "identifier": "DLinear", "path": "models/DLinear.py", "snippet": "class moving_avg(nn.Module):\nclass series_decomp(nn.Module):\nclass Model(nn.Module):\n def __init__(self, kernel_size, stride):\n def forward(self, x):\n def __init__(self, kernel_size):\n def forward(self, x):\n def __init__(self, configs):\n def forward(self, x):" }, { "identifier": "NLinear", "path": "models/NLinear.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def forward(self, x):" }, { "identifier": "FreTS", "path": "models/FreTS.py", "snippet": "class Model(nn.Module):\n def __init__(self, configs):\n def tokenEmb(self, x):\n def MLP_temporal(self, x, B, N, L):\n def MLP_channel(self, x, B, N, L):\n def FreMLP(self, B, nd, dimension, x, r, i, rb, ib):\n def forward(self, x):\n B, T, N = x.shape" }, { "identifier": "EarlyStopping", "path": "utils/tools.py", "snippet": "class EarlyStopping:\n def __init__(self, patience=7, verbose=False, delta=0):\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_loss_min = np.Inf\n self.delta = delta\n\n def __call__(self, val_loss, model, path):\n score = -val_loss\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_loss, model, path)\n elif score < self.best_score + self.delta:\n self.counter += 1\n print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n self.best_score = score\n self.save_checkpoint(val_loss, model, path)\n self.counter = 0\n\n def save_checkpoint(self, val_loss, model, path):\n if self.verbose:\n print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')\n torch.save(model.state_dict(), path + '/' + 'checkpoint.pth')\n self.val_loss_min = val_loss" }, { "identifier": "adjust_learning_rate", "path": "utils/tools.py", "snippet": "def adjust_learning_rate(optimizer, epoch, args):\n # lr = args.learning_rate * (0.2 ** (epoch // 2))\n if args.lradj == 'type1':\n lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))}\n elif args.lradj == 'type2':\n lr_adjust = {\n 2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6,\n 10: 5e-7, 15: 1e-7, 20: 5e-8\n }\n elif args.lradj == '3':\n lr_adjust = {epoch: args.learning_rate if epoch < 10 else args.learning_rate*0.1}\n elif args.lradj == '4':\n lr_adjust = {epoch: args.learning_rate if epoch < 15 else args.learning_rate*0.1}\n elif args.lradj == '5':\n lr_adjust = {epoch: args.learning_rate if epoch < 25 else args.learning_rate*0.1}\n elif args.lradj == '6':\n lr_adjust = {epoch: args.learning_rate if epoch < 5 else args.learning_rate*0.1} \n if epoch in lr_adjust.keys():\n lr = lr_adjust[epoch]\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print('Updating learning rate to {}'.format(lr))" }, { "identifier": "visual", "path": "utils/tools.py", "snippet": "def visual(true, preds=None, name='./pic/test.pdf'):\n \"\"\"\n Results visualization\n \"\"\"\n plt.figure()\n plt.plot(true, label='GroundTruth', linewidth=2)\n if preds is not None:\n plt.plot(preds, label='Prediction', linewidth=2)\n plt.legend()\n plt.savefig(name, bbox_inches='tight')" }, { "identifier": "test_params_flop", "path": "utils/tools.py", "snippet": "def test_params_flop(model,x_shape):\n \"\"\"\n If you want to thest former's flop, you need to give default value to inputs in model.forward(), the following code can only pass one argument to forward()\n \"\"\"\n model_params = 0\n for parameter in model.parameters():\n model_params += parameter.numel()\n print('INFO: Trainable parameter count: {:.2f}M'.format(model_params / 1000000.0))\n from ptflops import get_model_complexity_info \n with torch.cuda.device(0):\n macs, params = get_model_complexity_info(model.cuda(), x_shape, as_strings=True, print_per_layer_stat=True)\n # print('Flops:' + flops)\n # print('Params:' + params)\n print('{:<30} {:<8}'.format('Computational complexity: ', macs))\n print('{:<30} {:<8}'.format('Number of parameters: ', params))" }, { "identifier": "metric", "path": "utils/metrics.py", "snippet": "def metric(pred, true):\n mae = MAE(pred, true)\n mse = MSE(pred, true)\n rmse = RMSE(pred, true)\n mape = MAPE(pred, true)\n mspe = MSPE(pred, true)\n rse = RSE(pred, true)\n corr = CORR(pred, true)\n\n return mae, mse, rmse, mape, mspe, rse, corr" } ]
from data_provider.data_factory import data_provider from exp.exp_basic import Exp_Basic from models import DLinear, NLinear, FreTS from utils.tools import EarlyStopping, adjust_learning_rate, visual, test_params_flop from utils.metrics import metric from torch import optim import numpy as np import pandas as pd import torch import torch.nn as nn import os import time import warnings import matplotlib.pyplot as plt import numpy as np
2,036
warnings.filterwarnings('ignore') class Exp_Main(Exp_Basic): def __init__(self, args): super(Exp_Main, self).__init__(args) def _build_model(self): model_dict = { 'DLinear': DLinear,
warnings.filterwarnings('ignore') class Exp_Main(Exp_Basic): def __init__(self, args): super(Exp_Main, self).__init__(args) def _build_model(self): model_dict = { 'DLinear': DLinear,
'NLinear': NLinear,
3
2023-10-23 13:15:14+00:00
4k
amitfin/oref_alert
custom_components/oref_alert/binary_sensor.py
[ { "identifier": "expand_areas_and_groups", "path": "custom_components/oref_alert/area_utils.py", "snippet": "def expand_areas_and_groups(areas_and_groups: list[str]) -> list[str]:\n \"\"\"Expand groups (if exists) to areas.\"\"\"\n areas = []\n for area_or_group in areas_and_groups:\n if area_or_group in CITY_ALL_AREAS:\n areas.extend(CITY_ALL_AREAS[area_or_group])\n elif area_or_group in DISTRICT_AREAS:\n areas.extend(DISTRICT_AREAS[area_or_group])\n else:\n areas.append(area_or_group)\n areas.sort()\n return areas" }, { "identifier": "DOMAIN", "path": "custom_components/oref_alert/const.py", "snippet": "DOMAIN: Final = \"oref_alert\"" }, { "identifier": "DATA_COORDINATOR", "path": "custom_components/oref_alert/const.py", "snippet": "DATA_COORDINATOR: Final = \"coordinator\"" }, { "identifier": "TITLE", "path": "custom_components/oref_alert/const.py", "snippet": "TITLE: Final = \"Oref Alert\"" }, { "identifier": "CONF_AREAS", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_AREAS: Final = \"areas\"" }, { "identifier": "CONF_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ALERT_MAX_AGE: Final = \"alert_max_age\"" }, { "identifier": "CONF_OFF_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_OFF_ICON: Final = \"off_icon\"" }, { "identifier": "CONF_ON_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ON_ICON: Final = \"on_icon\"" }, { "identifier": "CONF_SENSORS", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_SENSORS: Final = \"sensors\"" }, { "identifier": "ALL_AREAS_ID_SUFFIX", "path": "custom_components/oref_alert/const.py", "snippet": "ALL_AREAS_ID_SUFFIX: Final = \"all_areas\"" }, { "identifier": "ALL_AREAS_NAME_SUFFIX", "path": "custom_components/oref_alert/const.py", "snippet": "ALL_AREAS_NAME_SUFFIX: Final = \"All Areas\"" }, { "identifier": "ATTR_COUNTRY_ACTIVE_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_COUNTRY_ACTIVE_ALERTS: Final = \"country_active_alerts\"" }, { "identifier": "ATTR_COUNTRY_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_COUNTRY_ALERTS: Final = \"country_alerts\"" }, { "identifier": "ATTR_SELECTED_AREAS_ACTIVE_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_SELECTED_AREAS_ACTIVE_ALERTS: Final = \"selected_areas_active_alerts\"" }, { "identifier": "ATTR_SELECTED_AREAS_ALERTS", "path": "custom_components/oref_alert/const.py", "snippet": "ATTR_SELECTED_AREAS_ALERTS: Final = \"selected_areas_alerts\"" }, { "identifier": "DEFAULT_OFF_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_OFF_ICON: Final = \"mdi:home-outline\"" }, { "identifier": "DEFAULT_ON_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_ON_ICON: Final = \"mdi:home-alert-outline\"" }, { "identifier": "OREF_ALERT_UNIQUE_ID", "path": "custom_components/oref_alert/const.py", "snippet": "OREF_ALERT_UNIQUE_ID: Final = \"oref_alert\"" }, { "identifier": "OrefAlertCoordinatorData", "path": "custom_components/oref_alert/coordinator.py", "snippet": "class OrefAlertCoordinatorData:\n \"\"\"Class for holding coordinator data.\"\"\"\n\n alerts: list[Any]\n active_alerts: list[Any]" }, { "identifier": "OrefAlertDataUpdateCoordinator", "path": "custom_components/oref_alert/coordinator.py", "snippet": "class OrefAlertDataUpdateCoordinator(DataUpdateCoordinator[OrefAlertCoordinatorData]):\n \"\"\"Class to manage fetching Oref Alert data.\"\"\"\n\n def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry):\n \"\"\"Initialize global data updater.\"\"\"\n super().__init__(\n hass,\n LOGGER,\n name=DOMAIN,\n update_interval=timedelta(\n seconds=config_entry.options.get(\n CONF_POLL_INTERVAL, DEFAULT_POLL_INTERVAL\n )\n ),\n )\n self._config_entry: ConfigEntry = config_entry\n self._http_client = async_get_clientsession(hass)\n self._synthetic_alerts: dict[int, dict[Any]] = {}\n\n async def _async_update_data(self) -> OrefAlertCoordinatorData:\n \"\"\"Request the data from Oref servers..\"\"\"\n current, history = await asyncio.gather(\n *[self._async_fetch_url(url) for url in (OREF_ALERTS_URL, OREF_HISTORY_URL)]\n )\n history = history or []\n alerts = self._current_to_history_format(current, history) if current else []\n alerts.extend(history)\n alerts.extend(self._get_synthetic_alerts())\n alerts.sort(key=cmp_to_key(_sort_alerts))\n for unrecognized_area in {alert[\"data\"] for alert in alerts}.difference(AREAS):\n LOGGER.error(\"Alert has an unrecognized area: %s\", unrecognized_area)\n return OrefAlertCoordinatorData(alerts, self._active_alerts(alerts))\n\n async def _async_fetch_url(self, url: str) -> Any:\n \"\"\"Fetch data from Oref servers.\"\"\"\n exc_info = None\n for _ in range(REQUEST_RETRIES):\n try:\n async with self._http_client.get(url, headers=OREF_HEADERS) as response:\n try:\n return await response.json(encoding=\"utf-8-sig\")\n except (JSONDecodeError, ContentTypeError):\n # Empty file is a valid return but not a valid JSON file\n return None\n except Exception as ex: # pylint: disable=broad-except\n exc_info = ex\n raise exc_info\n\n def _current_to_history_format(\n self, current: dict[str, Any], history: list[dict[str, Any]]\n ) -> list[dict[str, str]]:\n \"\"\"Convert current alerts payload to history format.\"\"\"\n now = dt_util.now(IST).strftime(\"%Y-%m-%d %H:%M:%S\")\n category = int(current[\"cat\"])\n history_last_minute_alerts = self._recent_alerts(\n history, REAL_TIME_ALERT_LOGIC_WINDOW\n )\n previous_last_minute_alerts = (\n self._recent_alerts(self.data.active_alerts, REAL_TIME_ALERT_LOGIC_WINDOW)\n if self.data\n else []\n )\n alerts = []\n for area in current[\"data\"]:\n for history_recent_alert in history_last_minute_alerts:\n if _compare_fields(history_recent_alert, area, category):\n # The alert is already in the history list. No need to add it twice.\n break\n else:\n for previous_recent_alert in previous_last_minute_alerts:\n if _compare_fields(previous_recent_alert, area, category):\n # The alert was already added, so take the original timestamp.\n alerts.append(previous_recent_alert)\n break\n else:\n alerts.append(\n {\n \"alertDate\": now,\n \"title\": current[\"title\"],\n \"data\": area,\n \"category\": category,\n }\n )\n return alerts\n\n def _active_alerts(self, alerts: list[Any]) -> list[Any]:\n \"\"\"Return the list of active alerts.\"\"\"\n return self._recent_alerts(\n alerts, self._config_entry.options[CONF_ALERT_MAX_AGE]\n )\n\n def _recent_alerts(self, alerts: list[Any], max_age: int) -> list[Any]:\n \"\"\"Return the list of recent alerts, assuming the input is sorted.\"\"\"\n earliest_alert = dt_util.now().timestamp() - max_age * 60\n recent_alerts = []\n for alert in alerts:\n if (\n dt_util.parse_datetime(alert[\"alertDate\"])\n .replace(tzinfo=IST)\n .timestamp()\n < earliest_alert\n ):\n break\n recent_alerts.append(alert)\n return recent_alerts\n\n def add_synthetic_alert(self, area: str, duration: int) -> None:\n \"\"\"Add a synthetic alert for testing purposes.\"\"\"\n now = dt_util.now(IST)\n self._synthetic_alerts[int(now.timestamp()) + duration] = {\n \"alertDate\": now.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"title\": \"התרעה סינטטית לצורכי בדיקות\",\n \"data\": area,\n \"category\": 1,\n }\n\n def _get_synthetic_alerts(self) -> list[dict[Any]]:\n \"\"\"Return the list of synthetic alerts.\"\"\"\n now = dt_util.now().timestamp()\n for expired in [\n timestamp for timestamp in self._synthetic_alerts if timestamp < now\n ]:\n del self._synthetic_alerts[expired]\n return self._synthetic_alerts.values()" } ]
from typing import Any from collections.abc import Mapping from homeassistant.components import binary_sensor from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity from .area_utils import expand_areas_and_groups from .const import ( DOMAIN, DATA_COORDINATOR, TITLE, CONF_AREAS, CONF_ALERT_MAX_AGE, CONF_OFF_ICON, CONF_ON_ICON, CONF_SENSORS, ALL_AREAS_ID_SUFFIX, ALL_AREAS_NAME_SUFFIX, ATTR_COUNTRY_ACTIVE_ALERTS, ATTR_COUNTRY_ALERTS, ATTR_SELECTED_AREAS_ACTIVE_ALERTS, ATTR_SELECTED_AREAS_ALERTS, DEFAULT_OFF_ICON, DEFAULT_ON_ICON, OREF_ALERT_UNIQUE_ID, ) from .coordinator import OrefAlertCoordinatorData, OrefAlertDataUpdateCoordinator
2,580
"""Support for representing daily schedule as binary sensors.""" from __future__ import annotations async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Initialize config entry.""" coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR] async_add_entities( [ AlertSensor(name, config_entry, coordinator) for name in [None] + list(config_entry.options.get(CONF_SENSORS, {}).keys()) ] + [AlertSensorAllAreas(config_entry, coordinator)] ) class AlertSensorBase( CoordinatorEntity[OrefAlertDataUpdateCoordinator], binary_sensor.BinarySensorEntity ): """Representation of the alert sensor base.""" _attr_has_entity_name = True _attr_device_class = binary_sensor.BinarySensorDeviceClass.SAFETY _entity_component_unrecorded_attributes = frozenset( { ATTR_COUNTRY_ACTIVE_ALERTS, ATTR_COUNTRY_ALERTS, ATTR_SELECTED_AREAS_ACTIVE_ALERTS, ATTR_SELECTED_AREAS_ALERTS, CONF_AREAS, CONF_ALERT_MAX_AGE, } ) def __init__( self, config_entry: ConfigEntry, coordinator: OrefAlertDataUpdateCoordinator, ) -> None: """Initialize object with defaults.""" super().__init__(coordinator) self._config_entry = config_entry self._on_icon = self._config_entry.options.get(CONF_ON_ICON, DEFAULT_ON_ICON) self._off_icon = self._config_entry.options.get(CONF_OFF_ICON, DEFAULT_OFF_ICON) self._data: OrefAlertCoordinatorData = coordinator.data @callback def _handle_coordinator_update(self) -> None: """Take the data from the coordinator.""" self._data = self.coordinator.data super()._handle_coordinator_update() @property def icon(self): """Return the sensor icon.""" return self._on_icon if self.is_on else self._off_icon class AlertSensor(AlertSensorBase): """Representation of the alert sensor.""" def __init__( self, name: str | None, config_entry: ConfigEntry, coordinator: OrefAlertDataUpdateCoordinator, ) -> None: """Initialize object with defaults.""" super().__init__(config_entry, coordinator) if not name:
"""Support for representing daily schedule as binary sensors.""" from __future__ import annotations async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Initialize config entry.""" coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR] async_add_entities( [ AlertSensor(name, config_entry, coordinator) for name in [None] + list(config_entry.options.get(CONF_SENSORS, {}).keys()) ] + [AlertSensorAllAreas(config_entry, coordinator)] ) class AlertSensorBase( CoordinatorEntity[OrefAlertDataUpdateCoordinator], binary_sensor.BinarySensorEntity ): """Representation of the alert sensor base.""" _attr_has_entity_name = True _attr_device_class = binary_sensor.BinarySensorDeviceClass.SAFETY _entity_component_unrecorded_attributes = frozenset( { ATTR_COUNTRY_ACTIVE_ALERTS, ATTR_COUNTRY_ALERTS, ATTR_SELECTED_AREAS_ACTIVE_ALERTS, ATTR_SELECTED_AREAS_ALERTS, CONF_AREAS, CONF_ALERT_MAX_AGE, } ) def __init__( self, config_entry: ConfigEntry, coordinator: OrefAlertDataUpdateCoordinator, ) -> None: """Initialize object with defaults.""" super().__init__(coordinator) self._config_entry = config_entry self._on_icon = self._config_entry.options.get(CONF_ON_ICON, DEFAULT_ON_ICON) self._off_icon = self._config_entry.options.get(CONF_OFF_ICON, DEFAULT_OFF_ICON) self._data: OrefAlertCoordinatorData = coordinator.data @callback def _handle_coordinator_update(self) -> None: """Take the data from the coordinator.""" self._data = self.coordinator.data super()._handle_coordinator_update() @property def icon(self): """Return the sensor icon.""" return self._on_icon if self.is_on else self._off_icon class AlertSensor(AlertSensorBase): """Representation of the alert sensor.""" def __init__( self, name: str | None, config_entry: ConfigEntry, coordinator: OrefAlertDataUpdateCoordinator, ) -> None: """Initialize object with defaults.""" super().__init__(config_entry, coordinator) if not name:
self._attr_name = TITLE
3
2023-10-18 11:16:41+00:00
4k
apple/ml-nvas3d
demo/generate_demo_data.py
[ { "identifier": "render_ir_parallel_room_idx", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def render_ir_parallel_room_idx(room: str,\n source_idx_list: T.List[int],\n receiver_idx_list: T.List[int],\n filename: str = None,\n grid_distance=1.0,\n batch_size: int = 64,\n sample_rate: float = 48000,\n use_default_material: bool = False,\n channel_type='Ambisonics' # Binaural\n ) -> T.List[torch.Tensor]:\n \"\"\"\n Run render_ir parallely for all elements of all_pair(source_idx_list, receiver_idx_list)\n \"\"\"\n\n grid_points = load_room_grid(room, grid_distance=grid_distance)['grid_points']\n\n source_idx_pair_list, receiver_idx_pair_list = all_pairs(source_idx_list, receiver_idx_list) # only for filename\n receiver_points = grid_points[receiver_idx_list]\n source_points = grid_points[source_idx_list]\n\n source_points_pair, receiver_points_pair = all_pairs(source_points, receiver_points)\n\n room_list = [room] * len(source_points_pair)\n if filename is not None:\n filename_list = [f'{filename}_{room}_{source_idx}_{receiver_idx}.wav'\n for source_idx, receiver_idx in zip(source_idx_pair_list, receiver_idx_pair_list)]\n else:\n filename_list = None\n\n # Render IR for grid points\n ir_list = render_rir_parallel(room_list,\n source_points_pair,\n receiver_points_pair,\n filename_list,\n batch_size=batch_size,\n sample_rate=sample_rate,\n use_default_material=use_default_material,\n channel_type=channel_type)\n\n return ir_list, source_idx_pair_list, receiver_idx_pair_list" }, { "identifier": "create_scene", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def create_scene(room: str,\n receiver_position: T.Tuple[float, float, float] = [0.0, 0.0, 0.0],\n sample_rate: float = 48000,\n image_size: T.Tuple[int, int] = (512, 256),\n include_visual_sensor: bool = True,\n hfov: float = 90.0\n ) -> Scene:\n \"\"\"\n Create a soundspaces scene to render IR.\n \"\"\"\n\n # Note: Make sure mp3d room is downloaded\n with suppress_stdout_and_stderr():\n # Create a receiver\n receiver = Receiver(\n position=receiver_position,\n rotation=0,\n sample_rate=sample_rate\n )\n\n scene = Scene(\n room,\n [None], # placeholder for source class\n receiver=receiver,\n include_visual_sensor=include_visual_sensor,\n add_source_mesh=False,\n device=torch.device('cpu'),\n add_source=False,\n image_size=image_size,\n hfov=hfov\n )\n\n return scene" }, { "identifier": "load_room_grid", "path": "soundspaces_nvas3d/utils/aihabitat_utils.py", "snippet": "def load_room_grid(\n room: str,\n grid_distance: float\n) -> T.Dict:\n \"\"\"\n Load grid data for a specified room. If the grid data does not exist, it generates one.\n\n Args:\n - room: Name of the room.\n - grid_distance: The spacing between grid points.\n\n Returns:\n - A dictionary containing grid information for the specified room.\n \"\"\"\n\n grid_distance_str = str(grid_distance).replace(\".\", \"_\")\n dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'\n filename_grid = f'{dirname_grid}/grid_{room}.npy'\n if not os.path.exists(filename_grid):\n os.makedirs(dirname_grid, exist_ok=True)\n print(f'Computing grid_{room}...')\n from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points\n grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)\n\n # load grid\n grid_info = np.load(filename_grid, allow_pickle=True).item()\n\n return grid_info" }, { "identifier": "wiener_deconv_list", "path": "soundspaces_nvas3d/utils/audio_utils.py", "snippet": "def wiener_deconv_list(\n signal: T.List[torch.Tensor],\n kernel: T.List[torch.Tensor],\n snr: float,\n is_cpu: bool = False\n) -> torch.Tensor:\n \"\"\"\n wiener_deconv for list input.\n\n Args:\n - signal (torch.Tensor): List of signals.\n - kernel (torch.Tensor): List of kernels.\n - snr (float): Signal-to-noise ratio.\n - is_cpu (bool, optional): Flag to determine if the operation should be on the CPU.\n\n Returns:\n - torch.Tensor: Deconvolved signal.\n \"\"\"\n\n M = len(signal)\n if isinstance(signal, list):\n signal = torch.stack(signal).reshape(M, -1)\n assert signal.shape[0] == M\n kernel = torch.stack(kernel).reshape(M, -1)\n snr /= abs(kernel).max()\n\n if is_cpu:\n signal = signal.detach().cpu()\n kernel = kernel.detach().cpu()\n\n n_batch, n_samples = signal.shape\n\n # Pad the signals and kernels to avoid circular convolution\n padded_signal = F.pad(signal, (0, kernel.shape[-1] - 1))\n padded_kernel = F.pad(kernel, (0, signal.shape[-1] - 1))\n\n # Compute the Fourier transforms\n signal_fr = torch.fft.rfft(padded_signal, dim=-1)\n kernel_fr = torch.fft.rfft(padded_kernel, dim=-1)\n\n # Compute the Wiener filter in the frequency domain\n wiener_filter_fr = torch.conj(kernel_fr) / (torch.abs(kernel_fr)**2 + 1 / snr)\n\n # Apply the Wiener filter\n filtered_signal_fr = wiener_filter_fr * signal_fr\n\n # Compute the inverse Fourier transform\n filtered_signal = torch.fft.irfft(filtered_signal_fr, dim=-1)\n\n # Crop the filtered signals to the original size\n filtered_signal = filtered_signal[:, :n_samples]\n\n filtered_signal_list = [filtered_signal[i] for i in range(filtered_signal.size(0))]\n\n return filtered_signal_list" }, { "identifier": "clip_two", "path": "nvas3d/utils/audio_utils.py", "snippet": "def clip_two(audio1, audio2):\n \"\"\"\n Clips two audio signals to the same length.\n\n Args:\n audio1: First audio signal.\n audio2: Second audio signal.\n\n Returns: \n - Two audio signals of the same length.\n \"\"\"\n\n length_diff = audio1.shape[-1] - audio2.shape[-1]\n\n if length_diff == 0:\n return audio1, audio2\n elif length_diff > 0:\n audio1 = audio1[..., :audio2.shape[-1]]\n elif length_diff < 0:\n audio2 = audio2[..., :audio1.shape[-1]]\n\n return audio1, audio2" }, { "identifier": "normalize", "path": "nvas3d/utils/utils.py", "snippet": "def normalize(audio, norm='peak'):\n if norm == 'peak':\n peak = abs(audio).max()\n if peak != 0:\n return audio / peak\n else:\n return audio\n elif norm == 'rms':\n if torch.is_tensor(audio):\n audio = audio.numpy()\n audio_without_padding = np.trim_zeros(audio, trim='b')\n rms = np.sqrt(np.mean(np.square(audio_without_padding))) * 100\n if rms != 0:\n return audio / rms\n else:\n return audio\n else:\n raise NotImplementedError" }, { "identifier": "load_ir_source_receiver", "path": "nvas3d/utils/generate_dataset_utils.py", "snippet": "def load_ir_source_receiver(\n ir_dir: str,\n room: str,\n source_idx: int,\n receiver_idx_list: T.List[int],\n ir_length: int\n) -> T.List[torch.Tensor]:\n \"\"\"\n Load impulse responses for specific source and receivers in a room.\n\n Args:\n - ir_dir: Directory containing impulse response files.\n - room: Name of the room.\n - source_idx: Index of the source.\n - receiver_idx_list: List of receiver indices.\n - ir_length: Length of the impulse response to be loaded.\n\n Returns:\n - List of loaded impulse responses (first channel only).\n \"\"\"\n\n ir_list = []\n for receiver_idx in receiver_idx_list:\n filename_ir = f'{ir_dir}/{room}/ir_{room}_{source_idx}_{receiver_idx}.wav'\n ir, _ = torchaudio.load(filename_ir)\n if ir[0].shape[0] > ir_length:\n ir0 = ir[0][:ir_length]\n else:\n ir0 = F.pad(ir[0], (0, ir_length - ir[0].shape[0]))\n ir_list.append(ir0)\n\n return ir_list" }, { "identifier": "save_audio_list", "path": "nvas3d/utils/generate_dataset_utils.py", "snippet": "def save_audio_list(\n filename: str,\n audio_list: T.List[torch.Tensor],\n sample_rate: int,\n audio_format: str\n):\n \"\"\"\n Save a list of audio tensors to files.\n\n Args:\n - filename: Filename to save audio.\n - audio_list: List of audio tensors to save.\n - sample_rate: Sample rate of audio.\n - audio_format: File format to save audio.\n \"\"\"\n\n for idx_audio, audio in enumerate(audio_list):\n torchaudio.save(f'{filename}_{idx_audio+1}.{audio_format}', audio.unsqueeze(0), sample_rate)" }, { "identifier": "compute_reverb", "path": "nvas3d/utils/generate_dataset_utils.py", "snippet": "def compute_reverb(\n source_audio: torch.Tensor,\n ir_list: T.List[torch.Tensor],\n padding: str = 'valid'\n) -> T.List[torch.Tensor]:\n \"\"\"\n Compute reverberated audio signals by convolving source audio with impulse responses.\n\n Args:\n - source_audio: Source audio signal (dry) to be reverberated.\n - ir_list: List of impulse responses for reverberation.\n - padding: Padding mode for convolution ('valid' or 'full').\n\n Returns:\n - A list of reverberated audio signals.\n \"\"\"\n\n reverb_list = []\n for ir in ir_list:\n reverb = fftconvolve(source_audio, ir, padding)\n reverb_list.append(torch.from_numpy(reverb))\n\n return reverb_list" } ]
import os import json import random import argparse import subprocess import typing as T import torch import torchaudio from soundspaces_nvas3d.utils.ss_utils import render_ir_parallel_room_idx, create_scene from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid from soundspaces_nvas3d.utils.audio_utils import wiener_deconv_list from nvas3d.utils.audio_utils import clip_two from nvas3d.utils.utils import normalize from nvas3d.utils.generate_dataset_utils import load_ir_source_receiver, save_audio_list, compute_reverb
2,813
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def generate_rir( args: argparse.Namespace, room: str, source_idx_list: T.List[int], receiver_idx_list: T.List[int] ): """ Generates and saves Room Impulse Response (RIR) data for pairs of source_idx_list and receiver_idx_list. Args: - args: Parsed command line arguments for dirname and grid distance. - room: Name of the room. - source_idx_list: List of source indices. - receiver_idx_list: List of receiver indices. """ ir_dir = f'data/{args.dataset_dir}/temp/ir/grid_{str(args.grid_distance).replace(".", "_")}' filename_ir = f'{ir_dir}/{room}/ir' os.makedirs(filename_ir, exist_ok=True)
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def generate_rir( args: argparse.Namespace, room: str, source_idx_list: T.List[int], receiver_idx_list: T.List[int] ): """ Generates and saves Room Impulse Response (RIR) data for pairs of source_idx_list and receiver_idx_list. Args: - args: Parsed command line arguments for dirname and grid distance. - room: Name of the room. - source_idx_list: List of source indices. - receiver_idx_list: List of receiver indices. """ ir_dir = f'data/{args.dataset_dir}/temp/ir/grid_{str(args.grid_distance).replace(".", "_")}' filename_ir = f'{ir_dir}/{room}/ir' os.makedirs(filename_ir, exist_ok=True)
render_ir_parallel_room_idx(room, source_idx_list, receiver_idx_list, filename_ir, args.grid_distance)
0
2023-10-19 05:35:54+00:00
4k
virevolai/logos-shift-client
logos_shift_client/logos_shift.py
[ { "identifier": "BohitaClient", "path": "logos_shift_client/bohita.py", "snippet": "class BohitaClient:\n def __init__(self, api_key: str):\n if api_key is None:\n logging.warning(\n \"No API KEY provided. No data will be sent to Bohita and automatic routing will not happen\"\n )\n self.headers = None\n else:\n self.headers = {\n \"Content-Type\": \"application/json\",\n \"Bohita-Auth\": f\"Bearer {api_key}\",\n }\n self.async_client = httpx.AsyncClient(headers=self.headers, timeout=TIMEOUT)\n\n def post_instrumentation_data(self, data, dataset):\n if not self.headers:\n return\n try:\n response = requests.post(\n f\"{BASE_URL}/instrumentation/\",\n headers=self.headers,\n json={**data, \"dataset\": dataset},\n timeout=TIMEOUT,\n )\n response.raise_for_status()\n except requests.RequestException as e:\n logger.error(\"Failed to post instrumentation data: %s\", str(e))\n\n async def post_instrumentation_data_async(self, data, dataset):\n if not self.headers:\n return\n try:\n response = await self.async_client.post(\n f\"{BASE_URL}/instrumentation/\", json={**data, \"dataset\": dataset}\n )\n response.raise_for_status()\n except httpx.RequestError as e:\n logger.error(\"Failed to post instrumentation data: %s\", str(e))\n\n def get_config(self):\n if not self.headers:\n return {}\n try:\n response = requests.get(\n f\"{BASE_URL}/config\", headers=self.headers, timeout=TIMEOUT\n )\n response.raise_for_status()\n return response.json()\n except requests.RequestException as e:\n logger.error(\"Failed to get configuration: %s\", str(e))\n return {}\n\n async def get_config_async(self):\n if not self.headers:\n return {}\n try:\n response = await self.async_client.get(f\"{BASE_URL}/config\")\n response.raise_for_status()\n return response.json()\n except httpx.RequestError as e:\n logger.error(\"Failed to get configuration: %s\", str(e))\n return {}\n\n def predict(self, **kwargs):\n if not self.headers:\n return\n try:\n response = requests.post(\n f\"{BASE_URL}/predict\",\n headers=self.headers,\n json=kwargs,\n timeout=TIMEOUT,\n )\n response.raise_for_status()\n return response.json()\n except requests.RequestException as e:\n logger.error(\"Failed to make prediction: %s\", str(e))\n\n async def predict_async(self, **kwargs):\n if not self.headers:\n return\n try:\n response = await self.async_client.post(f\"{BASE_URL}/predict\", json=kwargs)\n response.raise_for_status()\n return response.json()\n except httpx.RequestError as e:\n logger.error(\"Failed to make prediction: %s\", str(e))" }, { "identifier": "APIRouter", "path": "logos_shift_client/router.py", "snippet": "class APIRouter:\n \"\"\"\n APIRouter is responsible for routing API calls based on the provided configuration.\n\n It supports three modes:\n - \"never\": Always use the old API.\n - \"random\": Randomly choose between the old and new API based on a threshold.\n - \"user_based\": Decide based on a hash of the user ID.\n\n Attributes:\n bohita_client (BohitaClient): The client used to communicate with the Bohita platform.\n threshold (float): The percentage of requests to route to the new API. Default is 0.1 (10%).\n mode (str): The routing mode. Can be \"never\", \"random\", or \"user_based\". Default is \"never\".\n call_count (int): The number of API calls made.\n conf_frequency (int): How frequently to fetch configuration updates from the server.\n\n Examples:\n >>> router = APIRouter(bohita_client, threshold=0.2, mode=\"random\")\n >>> api_to_call = router.get_api_to_call(old_api_func)\n \"\"\"\n\n def __init__(self, bohita_client=None, threshold=0.1, mode=\"never\"):\n \"\"\"\n Initializes a new instance of APIRouter.\n\n Args:\n bohita_client (Optional[BohitaClient]): An instance of BohitaClient used to communicate with the Bohita platform.\n threshold (float): The percentage of requests to route to the new API. Default is 0.1 (10%).\n mode (str): The routing mode. Can be \"never\", \"random\", or \"user_based\". Default is \"never\".\n \"\"\"\n self.bohita_client = bohita_client\n if not 0 <= threshold <= 1:\n raise ValueError(\"Threshold must be between 0 and 1\")\n self.threshold = threshold # precentage of requests to new API\n self.mode = mode # \"never\", \"random\" or \"user_based\"\n self.call_count, self.conf_frequency = (\n 0,\n 1_000,\n ) # How frequently to fetch config\n logger.info(f\"Initialized {mode} router\")\n self._get_configuration()\n\n async def _get_configuration_common(self, is_async):\n \"\"\"\n Fetches the routing configuration from the Bohita platform and updates the router's settings.\n\n This method is called periodically based on the conf_frequency setting.\n \"\"\"\n try:\n logger.info(\"Checking for config updates\")\n if is_async:\n config = await self.bohita_client.get_config_async()\n else:\n config = self.bohita_client.get_config()\n self.threshold = config.get(\"threshold\", self.threshold)\n self.mode = config.get(\"mode\", self.mode)\n self.conf_frequency = config.get(\"frequency\", self.conf_frequency)\n logger.info(\"Configuration updated successfully\")\n except Exception as e:\n logger.warning(\"Could not get configuration from server: %s\", str(e))\n logger.warning(\"If the problem persists, this instance might be stale\")\n\n def _get_configuration(self):\n asyncio.run(self._get_configuration_common(False))\n\n async def _get_configuration_async(self):\n await self._get_configuration_common(True)\n\n def _get_user_hash(self, user_id):\n return int(hashlib.md5(str(user_id).encode()).hexdigest(), 16)\n\n def should_route_to_new_api(self, user_id=None):\n \"\"\"\n Determines whether the next API call should be routed to the new API based on the current mode and threshold.\n\n Args:\n user_id (Optional[str]): The user ID for user-based routing. Required if mode is \"user_based\".\n\n Returns:\n bool: True if the call should be routed to the new API, False otherwise.\n \"\"\"\n if self.mode == \"random\":\n return random.random() < self.threshold\n elif self.mode == \"user_based\":\n if user_id:\n return self._get_user_hash(user_id) % 100 < self.threshold * 100\n return False\n\n def get_api_to_call(self, old_api_func, user_id=None):\n \"\"\"\n Determines which API function to call based on the routing configuration.\n\n Args:\n old_api_func (callable): The old API function.\n user_id (Optional[str]): The user ID for user-based routing.\n\n Returns:\n callable: The API function to call.\n \"\"\"\n self.call_count += 1\n if self.call_count % self.conf_frequency == 0:\n self._get_configuration()\n if self.should_route_to_new_api(user_id):\n return self.call_new_api\n return old_api_func\n\n async def get_api_to_call_async(self, old_api_func, user_id=None):\n \"\"\"\n Determines which API function to call based on the routing configuration.\n\n Args:\n old_api_func (callable): The old API function.\n user_id (Optional[str]): The user ID for user-based routing.\n\n Returns:\n callable: The API function to call.\n \"\"\"\n self.call_count += 1\n if self.call_count % self.conf_frequency == 0:\n await self._get_configuration_async()\n if self.should_route_to_new_api(user_id):\n return self.call_new_api_async\n return old_api_func\n\n async def call_new_api_async(self, **kwargs):\n await self.bohita_client.predict_async(**kwargs)\n\n def call_new_api(self, **kwargs):\n self.bohita_client.predict(**kwargs)" } ]
import asyncio import logging import threading import time import uuid from pathlib import Path from collections import deque from typing import Optional, Union from tenacity import retry, wait_fixed from .bohita import BohitaClient from .router import APIRouter
2,308
logger = logging.getLogger(__name__) MAX_ENTRIES = 10 CHECK_SECONDS = 5 class SingletonMeta(type): _instances = {} _lock = threading.Lock() def __call__(cls, *args, **kwargs): with cls._lock: if cls not in cls._instances: instance = super().__call__(*args, **kwargs) cls._instances[cls] = instance return cls._instances[cls] class BufferManager(metaclass=SingletonMeta): """ A singleton class responsible for managing data buffers and sending data to a remote server. Attributes: bohita_client: An instance of BohitaClient used to send data to the remote server. check_seconds: The interval in seconds between checks to send data from the buffers. filepath: The file path for local data storage. If None, data is not stored locally. buffers: A list of data buffers. thread: The thread responsible for sending data from the buffers. """ _instance = None lock = threading.Lock() def __init__( self,
logger = logging.getLogger(__name__) MAX_ENTRIES = 10 CHECK_SECONDS = 5 class SingletonMeta(type): _instances = {} _lock = threading.Lock() def __call__(cls, *args, **kwargs): with cls._lock: if cls not in cls._instances: instance = super().__call__(*args, **kwargs) cls._instances[cls] = instance return cls._instances[cls] class BufferManager(metaclass=SingletonMeta): """ A singleton class responsible for managing data buffers and sending data to a remote server. Attributes: bohita_client: An instance of BohitaClient used to send data to the remote server. check_seconds: The interval in seconds between checks to send data from the buffers. filepath: The file path for local data storage. If None, data is not stored locally. buffers: A list of data buffers. thread: The thread responsible for sending data from the buffers. """ _instance = None lock = threading.Lock() def __init__( self,
bohita_client: BohitaClient,
0
2023-10-20 00:00:38+00:00
4k
kwonathan/language-models-trajectory-generators
env.py
[ { "identifier": "Robot", "path": "robot.py", "snippet": "class Robot:\n\n def __init__(self, args):\n\n if args.robot == \"sawyer\":\n self.base_start_position = config.base_start_position_sawyer\n self.base_start_orientation_q = p.getQuaternionFromEuler(config.base_start_orientation_e_sawyer)\n self.joint_start_positions = config.joint_start_positions_sawyer\n self.id = p.loadURDF(\"sawyer_robot/sawyer_description/urdf/sawyer.urdf\", self.base_start_position, self.base_start_orientation_q, useFixedBase=True)\n self.robot = \"sawyer\"\n self.ee_index = config.ee_index_sawyer\n elif args.robot == \"franka\":\n self.base_start_position = config.base_start_position_franka\n self.base_start_orientation_q = p.getQuaternionFromEuler(config.base_start_orientation_e_franka)\n self.joint_start_positions = config.joint_start_positions_franka\n self.id = p.loadURDF(\"franka_robot/panda.urdf\", self.base_start_position, self.base_start_orientation_q, useFixedBase=True)\n self.robot = \"franka\"\n self.ee_index = config.ee_index_franka\n self.ee_start_position = config.ee_start_position\n self.ee_start_orientation_e = config.ee_start_orientation_e\n self.ee_current_position = config.ee_start_position\n self.ee_current_orientation_e = config.ee_start_orientation_e\n\n self.gripper_open = True\n self.trajectory_step = 1\n\n i = 0\n for j in range(p.getNumJoints(self.id)):\n joint_type = p.getJointInfo(self.id, j)[2]\n if joint_type == p.JOINT_PRISMATIC or joint_type == p.JOINT_REVOLUTE:\n p.resetJointState(self.id, j, self.joint_start_positions[i])\n i += 1\n\n\n\n def move(self, env, ee_target_position, ee_target_orientation_e, gripper_open, is_trajectory):\n\n if self.robot == \"sawyer\":\n gripper1_index = None\n gripper2_index = None\n gripper_target_position = config.gripper_goal_position_open_sawyer if gripper_open else config.gripper_goal_position_closed_sawyer\n elif self.robot == \"franka\":\n gripper1_index = 9\n gripper2_index = 10\n gripper_target_position = config.gripper_goal_position_open_franka if gripper_open else config.gripper_goal_position_closed_franka\n\n min_joint_positions = [p.getJointInfo(self.id, i)[8] for i in range(p.getNumJoints(self.id)) if p.getJointInfo(self.id, i)[2] == p.JOINT_PRISMATIC or p.getJointInfo(self.id, i)[2] == p.JOINT_REVOLUTE]\n max_joint_positions = [p.getJointInfo(self.id, i)[9] for i in range(p.getNumJoints(self.id)) if p.getJointInfo(self.id, i)[2] == p.JOINT_PRISMATIC or p.getJointInfo(self.id, i)[2] == p.JOINT_REVOLUTE]\n joint_ranges = [abs(max_joint_position - min_joint_position) for min_joint_position, max_joint_position in zip(min_joint_positions, max_joint_positions)]\n rest_poses = list((np.array(max_joint_positions) + np.array(min_joint_positions)) / 2)\n\n ee_target_orientation_q = p.getQuaternionFromEuler(ee_target_orientation_e)\n\n ee_current_position = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[0]\n ee_current_orientation_q = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[1]\n ee_current_orientation_e = p.getEulerFromQuaternion(ee_current_orientation_q)\n gripper1_current_position = p.getJointState(self.id, gripper1_index)[0]\n gripper2_current_position = p.getJointState(self.id, gripper2_index)[0]\n\n time_step = 0\n\n while (not (ee_current_position[0] <= ee_target_position[0] + config.margin_error and ee_current_position[0] >= ee_target_position[0] - config.margin_error and\n ee_current_position[1] <= ee_target_position[1] + config.margin_error and ee_current_position[1] >= ee_target_position[1] - config.margin_error and\n ee_current_position[2] <= ee_target_position[2] + config.margin_error and ee_current_position[2] >= ee_target_position[2] - config.margin_error and\n ee_current_orientation_e[0] <= ee_target_orientation_e[0] + config.margin_error and ee_current_orientation_e[0] >= ee_target_orientation_e[0] - config.margin_error and\n ee_current_orientation_e[1] <= ee_target_orientation_e[1] + config.margin_error and ee_current_orientation_e[1] >= ee_target_orientation_e[1] - config.margin_error and\n ee_current_orientation_e[2] <= ee_target_orientation_e[2] + config.margin_error and ee_current_orientation_e[2] >= ee_target_orientation_e[2] - config.margin_error and\n gripper1_current_position <= gripper_target_position + config.gripper_margin_error and gripper1_current_position >= gripper_target_position - config.gripper_margin_error and\n gripper2_current_position <= gripper_target_position + config.gripper_margin_error and gripper2_current_position >= gripper_target_position - config.gripper_margin_error)):\n\n target_joint_positions = p.calculateInverseKinematics(self.id, self.ee_index, ee_target_position, targetOrientation=ee_target_orientation_q, lowerLimits=min_joint_positions, upperLimits=max_joint_positions, jointRanges=joint_ranges, restPoses=rest_poses, maxNumIterations=500)\n\n if self.robot == \"sawyer\":\n pass\n elif self.robot == \"franka\":\n p.setJointMotorControlArray(self.id, range(7), p.POSITION_CONTROL, targetPositions=target_joint_positions[:-2], forces=[config.arm_movement_force_franka] * 7)\n p.setJointMotorControl2(self.id, gripper1_index, p.POSITION_CONTROL, targetPosition=gripper_target_position, force=config.gripper_movement_force_franka)\n p.setJointMotorControl2(self.id, gripper2_index, p.POSITION_CONTROL, targetPosition=gripper_target_position, force=config.gripper_movement_force_franka)\n\n env.update()\n self.get_camera_image(\"head\", env, save_camera_image=is_trajectory, rgb_image_path=config.rgb_image_trajectory_path.format(step=self.trajectory_step), depth_image_path=config.depth_image_trajectory_path.format(step=self.trajectory_step))\n if is_trajectory:\n self.trajectory_step += 1\n\n ee_current_position = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[0]\n ee_current_orientation_q = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[1]\n ee_current_orientation_e = p.getEulerFromQuaternion(ee_current_orientation_q)\n gripper1_new_position = p.getJointState(self.id, gripper1_index)[0]\n gripper2_new_position = p.getJointState(self.id, gripper2_index)[0]\n\n self.ee_current_position = ee_current_position\n self.ee_current_orientation_e = ee_current_orientation_e\n self.gripper_open = gripper_open\n\n if ((ee_current_position[0] <= ee_target_position[0] + config.margin_error and ee_current_position[0] >= ee_target_position[0] - config.margin_error and\n ee_current_position[1] <= ee_target_position[1] + config.margin_error and ee_current_position[1] >= ee_target_position[1] - config.margin_error and\n ee_current_position[2] <= ee_target_position[2] + config.margin_error and ee_current_position[2] >= ee_target_position[2] - config.margin_error and\n ee_current_orientation_e[0] <= ee_target_orientation_e[0] + config.margin_error and ee_current_orientation_e[0] >= ee_target_orientation_e[0] - config.margin_error and\n ee_current_orientation_e[1] <= ee_target_orientation_e[1] + config.margin_error and ee_current_orientation_e[1] >= ee_target_orientation_e[1] - config.margin_error and\n ee_current_orientation_e[2] <= ee_target_orientation_e[2] + config.margin_error and ee_current_orientation_e[2] >= ee_target_orientation_e[2] - config.margin_error) and\n (not gripper_open) and\n math.isclose(gripper1_new_position, gripper1_current_position, rel_tol=config.rel_tol, abs_tol=config.abs_tol) and\n math.isclose(gripper2_new_position, gripper2_current_position, rel_tol=config.rel_tol, abs_tol=config.abs_tol)):\n break\n\n gripper1_current_position = gripper1_new_position\n gripper2_current_position = gripper2_new_position\n\n time_step += 1\n\n if is_trajectory:\n if time_step > 0:\n break\n else:\n if time_step > 99:\n break\n\n\n\n def get_camera_image(self, camera, env, save_camera_image, rgb_image_path, depth_image_path):\n\n if camera == \"wrist\":\n camera_position = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[0]\n camera_orientation_q = p.getLinkState(self.id, self.ee_index, computeForwardKinematics=True)[1]\n elif camera == \"head\":\n camera_position = config.head_camera_position\n camera_orientation_q = p.getQuaternionFromEuler(config.head_camera_orientation_e)\n\n projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near_plane, far_plane)\n rotation_matrix = np.array(p.getMatrixFromQuaternion(camera_orientation_q)).reshape(3, 3)\n\n if camera == \"wrist\":\n init_camera_vector = [0, 0, 1]\n init_up_vector = [1, 0, 0]\n elif camera == \"head\":\n init_camera_vector = [0, 0, 1]\n init_up_vector = [-1, 0, 0]\n\n camera_vector = rotation_matrix.dot(init_camera_vector)\n up_vector = rotation_matrix.dot(init_up_vector)\n view_matrix = p.computeViewMatrix(camera_position, camera_position + camera_vector, up_vector)\n\n image = p.getCameraImage(config.image_width, config.image_height, viewMatrix=view_matrix, projectionMatrix=projection_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL)\n\n rgb_buffer = image[2]\n depth_buffer = image[3]\n\n if save_camera_image:\n rgb_image = Image.fromarray(rgb_buffer)\n rgb_image.save(rgb_image_path)\n\n n = config.near_plane\n f = config.far_plane\n depth_array = 2 * n * f / (f + n - (2 * depth_buffer - 1.0) * (f - n))\n\n save_image(torch.Tensor(depth_array), depth_image_path)\n\n return camera_position, camera_orientation_q" }, { "identifier": "OK", "path": "config.py", "snippet": "OK = \"\\033[92m\"" }, { "identifier": "PROGRESS", "path": "config.py", "snippet": "PROGRESS = \"\\033[93m\"" }, { "identifier": "FAIL", "path": "config.py", "snippet": "FAIL = \"\\033[91m\"" }, { "identifier": "ENDC", "path": "config.py", "snippet": "ENDC = \"\\033[0m\"" }, { "identifier": "CAPTURE_IMAGES", "path": "config.py", "snippet": "CAPTURE_IMAGES = 1" }, { "identifier": "ADD_BOUNDING_CUBES", "path": "config.py", "snippet": "ADD_BOUNDING_CUBES = 2" }, { "identifier": "ADD_TRAJECTORY_POINTS", "path": "config.py", "snippet": "ADD_TRAJECTORY_POINTS = 3" }, { "identifier": "EXECUTE_TRAJECTORY", "path": "config.py", "snippet": "EXECUTE_TRAJECTORY = 4" }, { "identifier": "OPEN_GRIPPER", "path": "config.py", "snippet": "OPEN_GRIPPER = 5" }, { "identifier": "CLOSE_GRIPPER", "path": "config.py", "snippet": "CLOSE_GRIPPER = 6" }, { "identifier": "TASK_COMPLETED", "path": "config.py", "snippet": "TASK_COMPLETED = 7" }, { "identifier": "RESET_ENVIRONMENT", "path": "config.py", "snippet": "RESET_ENVIRONMENT = 8" } ]
import pybullet as p import numpy as np import pybullet_data import time import config from robot import Robot from config import OK, PROGRESS, FAIL, ENDC from config import CAPTURE_IMAGES, ADD_BOUNDING_CUBES, ADD_TRAJECTORY_POINTS, EXECUTE_TRAJECTORY, OPEN_GRIPPER, CLOSE_GRIPPER, TASK_COMPLETED, RESET_ENVIRONMENT
3,151
class Environment: def __init__(self, args): self.mode = args.mode def load(self): p.resetDebugVisualizerCamera(config.camera_distance, config.camera_yaw, config.camera_pitch, config.camera_target_position) object_start_position = config.object_start_position object_start_orientation_q = p.getQuaternionFromEuler(config.object_start_orientation_e) object_model = p.loadURDF("ycb_assets/002_master_chef_can.urdf", object_start_position, object_start_orientation_q, useFixedBase=False, globalScaling=config.global_scaling) if self.mode == "default": p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0) def update(self): p.stepSimulation() time.sleep(config.control_dt) def run_simulation_environment(args, env_connection, logger): # Environment set-up logger.info(PROGRESS + "Setting up environment..." + ENDC) physics_client = p.connect(p.GUI) p.setAdditionalSearchPath(pybullet_data.getDataPath()) p.setGravity(0, 0, -9.81) plane = p.loadURDF("plane.urdf") env = Environment(args) env.load() robot = Robot(args) robot.move(env, robot.ee_start_position, robot.ee_start_orientation_e, gripper_open=True, is_trajectory=False)
class Environment: def __init__(self, args): self.mode = args.mode def load(self): p.resetDebugVisualizerCamera(config.camera_distance, config.camera_yaw, config.camera_pitch, config.camera_target_position) object_start_position = config.object_start_position object_start_orientation_q = p.getQuaternionFromEuler(config.object_start_orientation_e) object_model = p.loadURDF("ycb_assets/002_master_chef_can.urdf", object_start_position, object_start_orientation_q, useFixedBase=False, globalScaling=config.global_scaling) if self.mode == "default": p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0) def update(self): p.stepSimulation() time.sleep(config.control_dt) def run_simulation_environment(args, env_connection, logger): # Environment set-up logger.info(PROGRESS + "Setting up environment..." + ENDC) physics_client = p.connect(p.GUI) p.setAdditionalSearchPath(pybullet_data.getDataPath()) p.setGravity(0, 0, -9.81) plane = p.loadURDF("plane.urdf") env = Environment(args) env.load() robot = Robot(args) robot.move(env, robot.ee_start_position, robot.ee_start_orientation_e, gripper_open=True, is_trajectory=False)
env_connection_message = OK + "Finished setting up environment!" + ENDC
1
2023-10-18 16:38:09+00:00
4k
kvablack/susie
susie/model.py
[ { "identifier": "sampling", "path": "susie/sampling.py", "snippet": "def q_sample(x_0, log_snr, noise):\ndef model_predict(state, x, y, prompt_embeds, t, use_ema=True):\ndef sample_step(\n rng,\n state,\n x,\n y,\n prompt_embeds,\n uncond_y,\n uncond_prompt_embeds,\n t,\n t_next,\n log_snr_fn,\n context_w,\n prompt_w,\n eta,\n):\ndef sample_loop(\n rng,\n state,\n y,\n prompt_embeds,\n uncond_y,\n uncond_prompt_embeds,\n *,\n log_snr_fn,\n num_timesteps,\n context_w=1.0,\n prompt_w=1.0,\n eta=0.0,\n):\n def scan_fn(carry, t_combined):" }, { "identifier": "scheduling", "path": "susie/scheduling.py", "snippet": "def lnpoch(a, b):\ndef linear_log_snr(t, *, beta_start=0.001, beta_end=0.02, num_timesteps=1000):\ndef scaled_linear_log_snr(t, *, beta_start=0.00085, beta_end=0.012, num_timesteps=1000):\ndef cosine_log_snr(t, s: float = 0.008, d: float = 0.008):\ndef create_log_snr_fn(config):\ndef create_ema_decay_fn(config):\n def ema_decay_schedule(step):" }, { "identifier": "replicate", "path": "susie/jax_utils.py", "snippet": "def replicate(x: Any, devices: Sequence[jax.Device]) -> jax.Array:\n \"\"\"Replicate an array across devices. Works in multi-host setting.\"\"\"\n sharding = jax.sharding.PositionalSharding(devices).replicate()\n x = jax.tree_map(jnp.array, x)\n return jax.tree_map(\n lambda arr: jax.make_array_from_callback(\n arr.shape, sharding, lambda index: arr[index]\n ),\n x,\n )" } ]
import os import time import einops as eo import jax import jax.numpy as jnp import ml_collections import numpy as np import orbax.checkpoint import wandb from functools import partial from typing import Any, Callable, List, Optional, Tuple from absl import logging from diffusers.models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from flax.core.frozen_dict import FrozenDict from flax.training.train_state import TrainState from jax.lax import with_sharding_constraint as wsc from transformers import CLIPTokenizer, FlaxCLIPTextModel from susie import sampling, scheduling from susie.jax_utils import replicate
2,490
scale, lambda: latents / vae.config.scaling_factor, lambda: latents ) sample = vae.apply({"params": vae_params}, latents, method=vae.decode) sample = eo.rearrange(sample, "(n x) h w c -> n h w (x c)", n=batch_size) return sample return partial(vae_encode, vae_params), partial(vae_decode, vae_params) def load_text_encoder( path: str, ) -> Tuple[ Callable[[List[str]], np.ndarray], Callable[[np.ndarray], List[str]], Callable[[jax.Array], jax.Array], ]: if ":" in path: path, revision = path.split(":") else: revision = None text_encoder = FlaxCLIPTextModel.from_pretrained( path, subfolder="text_encoder", revision=revision ) tokenizer = CLIPTokenizer.from_pretrained( path, subfolder="tokenizer", revision=revision ) def tokenize(s: List[str]) -> np.ndarray: return tokenizer(s, padding="max_length", return_tensors="np").input_ids untokenize = partial(tokenizer.batch_decode, skip_special_tokens=True) @jax.jit def text_encode(params, prompt_ids): return text_encoder(prompt_ids, params=params)[0] return tokenize, untokenize, partial(text_encode, text_encoder.params) def load_pretrained_unet( path: str, in_channels: int ) -> Tuple[FlaxUNet2DConditionModel, dict]: model_def, params = FlaxUNet2DConditionModel.from_pretrained( path, dtype=np.float32, subfolder="unet" ) # same issue, they commit the params to the CPU, which totally messes stuff # up downstream... params = jax.device_get(params) # add extra parameters to conv_in if necessary old_conv_in = params["conv_in"]["kernel"] h, w, cin, cout = old_conv_in.shape logging.info(f"Adding {in_channels - cin} channels to conv_in") params["conv_in"]["kernel"] = np.zeros( (h, w, in_channels, cout), dtype=old_conv_in.dtype ) params["conv_in"]["kernel"][:, :, :cin, :] = old_conv_in # monkey-patch __call__ to use channels-last model_def.__call__ = lambda self, sample, *args, **kwargs: eo.rearrange( FlaxUNet2DConditionModel.__call__( self, eo.rearrange(sample, "b h w c -> b c h w"), *args, **kwargs ).sample, "b c h w -> b h w c", ) return model_def, params def create_sample_fn( path: str, wandb_run_name: Optional[str] = None, num_timesteps: int = 50, prompt_w: float = 7.5, context_w: float = 2.5, eta: float = 0.0, pretrained_path: str = "runwayml/stable-diffusion-v1-5:flax", ) -> Callable[[np.ndarray, str], np.ndarray]: if ( os.path.exists(path) and os.path.isdir(path) and "checkpoint" in os.listdir(path) ): # this is an orbax checkpoint assert wandb_run_name is not None # load config from wandb api = wandb.Api() run = api.run(wandb_run_name) config = ml_collections.ConfigDict(run.config) # load params params = orbax.checkpoint.PyTreeCheckpointer().restore(path, item=None) assert "params_ema" not in params # load model model_def = create_model_def(config.model) else: # assume this is in HuggingFace format model_def, params = load_pretrained_unet(path, in_channels=8) # hardcode scheduling config to be "scaled_linear" (used by Stable Diffusion) config = {"scheduling": {"noise_schedule": "scaled_linear"}} state = EmaTrainState( step=0, apply_fn=model_def.apply, params=None, params_ema=params, tx=None, opt_state=None, ) del params # load encoders vae_encode, vae_decode = load_vae(pretrained_path) tokenize, untokenize, text_encode = load_text_encoder(pretrained_path) uncond_prompt_embed = text_encode(tokenize([""])) # (1, 77, 768) log_snr_fn = scheduling.create_log_snr_fn(config["scheduling"])
class EmaTrainState(TrainState): params_ema: FrozenDict[str, Any] @partial(jax.jit, donate_argnums=0) def apply_ema_decay(self, ema_decay): params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_decay + p * (1.0 - ema_decay), self.params_ema, self.params, ) return self.replace(params_ema=params_ema) def create_model_def(config: dict) -> FlaxUNet2DConditionModel: model, unused_kwargs = FlaxUNet2DConditionModel.from_config( dict(config), return_unused_kwargs=True ) if unused_kwargs: logging.warning(f"FlaxUNet2DConditionModel unused kwargs: {unused_kwargs}") # monkey-patch __call__ to use channels-last model.__call__ = lambda self, sample, *args, **kwargs: eo.rearrange( FlaxUNet2DConditionModel.__call__( self, eo.rearrange(sample, "b h w c -> b c h w"), *args, **kwargs ).sample, "b c h w -> b h w c", ) return model def load_vae( path: str, ) -> Tuple[ Callable[[jax.Array, jax.Array, bool], jax.Array], Callable[[jax.Array, bool], jax.Array], ]: if ":" in path: path, revision = path.split(":") else: revision = None vae, vae_params = FlaxAutoencoderKL.from_pretrained( path, subfolder="vae", revision=revision ) # monkey-patch encode to use channels-last (it returns a FlaxDiagonalGaussianDistribution object, which is already # channels-last) vae.encode = lambda self, sample, *args, **kwargs: FlaxAutoencoderKL.encode( self, eo.rearrange(sample, "b h w c -> b c h w"), *args, **kwargs ).latent_dist # monkey-patch decode to use channels-last (it already accepts channels-last input) vae.decode = lambda self, latents, *args, **kwargs: eo.rearrange( FlaxAutoencoderKL.decode(self, latents, *args, **kwargs).sample, "b c h w -> b h w c", ) # HuggingFace places vae_params committed onto the CPU -_- # this one took me awhile to figure out... vae_params = jax.device_get(vae_params) @jax.jit def vae_encode(vae_params, key, sample, scale=False): # handle the case where `sample` is multiple images stacked batch_size = sample.shape[0] sample = eo.rearrange(sample, "n h w (x c) -> (n x) h w c", c=3) latents = vae.apply({"params": vae_params}, sample, method=vae.encode).sample( key ) latents = eo.rearrange(latents, "(n x) h w c -> n h w (x c)", n=batch_size) latents = jax.lax.cond( scale, lambda: latents * vae.config.scaling_factor, lambda: latents ) return latents @jax.jit def vae_decode(vae_params, latents, scale=True): # handle the case where `latents` is multiple images stacked batch_size = latents.shape[0] latents = eo.rearrange( latents, "n h w (x c) -> (n x) h w c", c=vae.config.latent_channels ) latents = jax.lax.cond( scale, lambda: latents / vae.config.scaling_factor, lambda: latents ) sample = vae.apply({"params": vae_params}, latents, method=vae.decode) sample = eo.rearrange(sample, "(n x) h w c -> n h w (x c)", n=batch_size) return sample return partial(vae_encode, vae_params), partial(vae_decode, vae_params) def load_text_encoder( path: str, ) -> Tuple[ Callable[[List[str]], np.ndarray], Callable[[np.ndarray], List[str]], Callable[[jax.Array], jax.Array], ]: if ":" in path: path, revision = path.split(":") else: revision = None text_encoder = FlaxCLIPTextModel.from_pretrained( path, subfolder="text_encoder", revision=revision ) tokenizer = CLIPTokenizer.from_pretrained( path, subfolder="tokenizer", revision=revision ) def tokenize(s: List[str]) -> np.ndarray: return tokenizer(s, padding="max_length", return_tensors="np").input_ids untokenize = partial(tokenizer.batch_decode, skip_special_tokens=True) @jax.jit def text_encode(params, prompt_ids): return text_encoder(prompt_ids, params=params)[0] return tokenize, untokenize, partial(text_encode, text_encoder.params) def load_pretrained_unet( path: str, in_channels: int ) -> Tuple[FlaxUNet2DConditionModel, dict]: model_def, params = FlaxUNet2DConditionModel.from_pretrained( path, dtype=np.float32, subfolder="unet" ) # same issue, they commit the params to the CPU, which totally messes stuff # up downstream... params = jax.device_get(params) # add extra parameters to conv_in if necessary old_conv_in = params["conv_in"]["kernel"] h, w, cin, cout = old_conv_in.shape logging.info(f"Adding {in_channels - cin} channels to conv_in") params["conv_in"]["kernel"] = np.zeros( (h, w, in_channels, cout), dtype=old_conv_in.dtype ) params["conv_in"]["kernel"][:, :, :cin, :] = old_conv_in # monkey-patch __call__ to use channels-last model_def.__call__ = lambda self, sample, *args, **kwargs: eo.rearrange( FlaxUNet2DConditionModel.__call__( self, eo.rearrange(sample, "b h w c -> b c h w"), *args, **kwargs ).sample, "b c h w -> b h w c", ) return model_def, params def create_sample_fn( path: str, wandb_run_name: Optional[str] = None, num_timesteps: int = 50, prompt_w: float = 7.5, context_w: float = 2.5, eta: float = 0.0, pretrained_path: str = "runwayml/stable-diffusion-v1-5:flax", ) -> Callable[[np.ndarray, str], np.ndarray]: if ( os.path.exists(path) and os.path.isdir(path) and "checkpoint" in os.listdir(path) ): # this is an orbax checkpoint assert wandb_run_name is not None # load config from wandb api = wandb.Api() run = api.run(wandb_run_name) config = ml_collections.ConfigDict(run.config) # load params params = orbax.checkpoint.PyTreeCheckpointer().restore(path, item=None) assert "params_ema" not in params # load model model_def = create_model_def(config.model) else: # assume this is in HuggingFace format model_def, params = load_pretrained_unet(path, in_channels=8) # hardcode scheduling config to be "scaled_linear" (used by Stable Diffusion) config = {"scheduling": {"noise_schedule": "scaled_linear"}} state = EmaTrainState( step=0, apply_fn=model_def.apply, params=None, params_ema=params, tx=None, opt_state=None, ) del params # load encoders vae_encode, vae_decode = load_vae(pretrained_path) tokenize, untokenize, text_encode = load_text_encoder(pretrained_path) uncond_prompt_embed = text_encode(tokenize([""])) # (1, 77, 768) log_snr_fn = scheduling.create_log_snr_fn(config["scheduling"])
sample_loop = partial(sampling.sample_loop, log_snr_fn=log_snr_fn)
0
2023-10-17 05:05:57+00:00
4k
skywalker023/fantom
eval_fantom.py
[ { "identifier": "GPT3BaseAgent", "path": "agents/gpt.py", "snippet": "class GPT3BaseAgent():\n def __init__(self, kwargs: dict):\n openai.api_key = os.getenv('OPENAI_API_KEY')\n self.args = SimpleNamespace(**kwargs)\n self._set_default_args()\n\n def _set_default_args(self):\n if not hasattr(self.args, 'engine'):\n self.args.engine = \"text-davinci-003\"\n if not hasattr(self.args, 'temperature'):\n self.args.temperature = 0.9\n if not hasattr(self.args, 'max_tokens'):\n self.args.max_tokens = 256\n if not hasattr(self.args, 'top_p'):\n self.args.top_p = 0.9\n if not hasattr(self.args, 'frequency_penalty'):\n self.args.frequency_penalty = 0.7\n if not hasattr(self.args, 'presence_penalty'):\n self.args.presence_penalty = 0\n\n def generate(self, prompt):\n while True:\n try:\n completion = openai.Completion.create(\n engine=self.args.engine,\n prompt=prompt,\n temperature=self.args.temperature,\n max_tokens=self.args.max_tokens,\n top_p=self.args.top_p,\n frequency_penalty=self.args.frequency_penalty,\n presence_penalty=self.args.presence_penalty,\n stop=self.args.stop_tokens if hasattr(self.args, 'stop_tokens') else None,\n logprobs=self.args.logprobs if hasattr(self.args, 'logprobs') else 0,\n echo=self.args.echo if hasattr(self.args, 'echo') else False\n )\n break\n except (RuntimeError, openai.error.RateLimitError, openai.error.ServiceUnavailableError, openai.error.APIError, openai.error.APIConnectionError) as e:\n print(\"Error: {}\".format(e))\n time.sleep(2)\n continue\n\n return completion\n\n def parse_basic_text(self, response):\n output = response['choices'][0]['text'].strip()\n\n return output\n\n def parse_ordered_list(self, numbered_items):\n ordered_list = numbered_items.split(\"\\n\")\n output = [item.split(\".\")[-1].strip() for item in ordered_list if item.strip() != \"\"]\n\n return output\n\n def interact(self, prompt):\n response = self.generate(prompt)\n output = self.parse_basic_text(response)\n\n return output" }, { "identifier": "ConversationalGPTBaseAgent", "path": "agents/gpt.py", "snippet": "class ConversationalGPTBaseAgent(GPT3BaseAgent):\n def __init__(self, kwargs: dict):\n super().__init__(kwargs)\n\n def _set_default_args(self):\n if not hasattr(self.args, 'model'):\n self.args.model = \"gpt-4-0613\"\n if not hasattr(self.args, 'temperature'):\n self.args.temperature = 0.9\n if not hasattr(self.args, 'max_tokens'):\n self.args.max_tokens = 256\n if not hasattr(self.args, 'top_p'):\n self.args.top_p = 0.9\n if not hasattr(self.args, 'frequency_penalty'):\n self.args.frequency_penalty = 0.7\n if not hasattr(self.args, 'presence_penalty'):\n self.args.presence_penalty = 0\n\n def generate(self, prompt):\n while True:\n try:\n completion = openai.ChatCompletion.create(\n model=self.args.model,\n messages=[{\"role\": \"user\", \"content\": \"{}\".format(prompt)}]\n )\n break\n except (openai.error.APIError, openai.error.RateLimitError) as e: \n print(\"Error: {}\".format(e))\n time.sleep(2)\n continue\n\n return completion\n\n def parse_basic_text(self, response):\n output = response['choices'][0].message.content.strip()\n\n return output" }, { "identifier": "FlanT5Agent", "path": "agents/huggingface.py", "snippet": "class FlanT5Agent(HuggingFaceAgent):\n def __init__(self, args):\n super().__init__(args)\n self.tokenizer = T5Tokenizer.from_pretrained(\"google/\" + args.model)\n self.model = T5ForConditionalGeneration.from_pretrained(\"google/\" + args.model, device_map=\"auto\")" }, { "identifier": "FlanUL2Agent", "path": "agents/huggingface.py", "snippet": "class FlanUL2Agent(HuggingFaceAgent):\n def __init__(self, args):\n super().__init__(args)\n self.tokenizer = AutoTokenizer.from_pretrained(\"google/flan-ul2\")\n self.model = T5ForConditionalGeneration.from_pretrained(\"google/flan-ul2\", device_map=\"auto\", load_in_8bit=True)" }, { "identifier": "MistralAIAgent", "path": "agents/huggingface.py", "snippet": "class MistralAIAgent(HuggingFaceAgent):\n def __init__(self, args):\n super().__init__(args)\n if 'instruct' in self.args.model.lower():\n model_name = \"Mistral-7B-Instruct-v0.1\"\n else:\n model_name = \"Mistral-7B-v0.1\"\n self.tokenizer = AutoTokenizer.from_pretrained(\"mistralai/\" + model_name)\n self.model = AutoModelForCausalLM.from_pretrained(\"mistralai/\" + model_name, device_map=\"auto\")\n self.tokenizer.pad_token = self.tokenizer.eos_token \n\n def preprocess_text(self, text):\n return self.tokenizer.apply_chat_template([{\"role\": \"user\", \"content\": text}], tokenize=False, add_generation_prompt=True)\n\n def postprocess_output(self, response):\n return response.split(\"[/INST]\")[-1].strip()" }, { "identifier": "ZephyrAgent", "path": "agents/huggingface.py", "snippet": "class ZephyrAgent(HuggingFaceAgent):\n def __init__(self, args):\n super().__init__(args)\n self.tokenizer = AutoTokenizer.from_pretrained(\"HuggingFaceH4/\" + self.args.model)\n self.model = AutoModelForCausalLM.from_pretrained(\"HuggingFaceH4/\" + self.args.model, device_map=\"auto\")\n\n def preprocess_text(self, text):\n return self.tokenizer.apply_chat_template([{\"role\": \"user\", \"content\": text}], tokenize=False, add_generation_prompt=True)\n\n def postprocess_output(self, response):\n return response.split(\"\\n<|assistant|>\\n\")[-1].strip()" }, { "identifier": "TogetherAIAgent", "path": "agents/together_ai.py", "snippet": "class TogetherAIAgent():\n def __init__(self, kwargs: dict):\n self.api_key = together.api_key = os.getenv('TOGETHERAI_API_KEY')\n self.args = SimpleNamespace(**kwargs)\n self._set_default_args()\n self.args.model = \"togethercomputer/\" + self.args.model.removesuffix(\"-tg\")\n\n def _set_default_args(self):\n if not hasattr(self.args, 'model'):\n self.args.model = \"togethercomputer/llama-2-70b-chat\"\n if not hasattr(self.args, 'temperature'):\n self.args.temperature = 0.0\n if not hasattr(self.args, 'max_tokens'):\n self.args.max_tokens = 256\n if not hasattr(self.args, 'top_p'):\n self.args.top_p = 0.95\n if not hasattr(self.args, 'repetition_penalty'):\n self.args.repetition_penalty = 1.0\n\n def generate(self, prompt):\n while True:\n try:\n output = together.Complete.create(\n prompt = prompt, \n model = self.args.model, \n max_tokens = self.args.max_tokens,\n temperature = self.args.temperature,\n top_k = 1,\n top_p = self.args.top_p,\n repetition_penalty = 1.0,\n stop = ['</s>']\n )\n break\n except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout, requests.exceptions.JSONDecodeError) as e:\n print(\"Error: {}\\nRetrying...\".format(e))\n time.sleep(2)\n continue\n\n return output\n\n def parse_basic_text(self, response):\n return response['output']['choices'][0]['text'].strip()\n\n def interact(self, prompt):\n while True:\n try:\n response = self.generate(prompt)\n output = self.parse_basic_text(response)\n break\n except:\n print(\"Error: Retrying...\")\n time.sleep(2)\n continue\n\n return output" } ]
import os import json import argparse import random import evaluate import torch import pandas as pd import colorful as cf import task.dataset_loader as loader from pathlib import Path from collections import Counter from torch.utils.data import DataLoader, Dataset from tqdm import tqdm from sentence_transformers import SentenceTransformer from sklearn.metrics.pairwise import cosine_similarity from agents.gpt import GPT3BaseAgent, ConversationalGPTBaseAgent from agents.huggingface import FlanT5Agent, FlanUL2Agent, MistralAIAgent, ZephyrAgent from agents.together_ai import TogetherAIAgent
2,586
tqdm.pandas() cf.use_true_colors() cf.use_style('monokai') PROJECT_HOME = Path(__file__).parent.resolve() DATA_DIR = 'data' DATA_DIR_PATH = os.path.join(PROJECT_HOME, DATA_DIR) EVAL_DIR_PATH = os.path.join(DATA_DIR_PATH, 'results') RANDOM_SEED = 99 random.seed(RANDOM_SEED) class FantomDataset(Dataset): def __init__(self, texts, args): self.texts = texts def __len__(self): return len(self.texts) def __getitem__(self, index): text = self.texts[index] return text class FantomEvalAgent(): def __init__(self, args): self.args = args self.prompt_header = "This is a theory-of-mind test. Please answer the question regarding facts or beliefs, based on the following in-person conversation between individuals who have just met.\n\n" self.output_filename_suffix = '_{}_input_{}_cot-{}.json'.format(self.args.conversation_input_type, self.args.model, self.args.use_cot) self.load_fantom() self.setup_fantom() self.model = self.load_model() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.embedder = SentenceTransformer('sentence-transformers/all-roberta-large-v1').to(self.device) def load_fantom(self): self.fantom_df = loader.load() def respond(self, prompt): response = self.model.interact(prompt) return response def load_model(self): if self.args.model.startswith("text-"): model = GPT3BaseAgent({'engine': self.args.model, 'temperature': 0, 'top_p': 0.95, 'frequency_penalty': 0.0, 'presence_penalty': 0.0}) elif self.args.model.startswith("gpt-"): model = ConversationalGPTBaseAgent({'model': self.args.model, 'temperature': 0, 'top_p': 0.95, 'frequency_penalty': 0.0, 'presence_penalty': 0.0}) elif self.args.model.startswith('flan-t5'): model = FlanT5Agent(self.args) elif self.args.model.startswith('flan-ul2'): model = FlanUL2Agent(self.args) elif self.args.model.endswith('-tg'):
tqdm.pandas() cf.use_true_colors() cf.use_style('monokai') PROJECT_HOME = Path(__file__).parent.resolve() DATA_DIR = 'data' DATA_DIR_PATH = os.path.join(PROJECT_HOME, DATA_DIR) EVAL_DIR_PATH = os.path.join(DATA_DIR_PATH, 'results') RANDOM_SEED = 99 random.seed(RANDOM_SEED) class FantomDataset(Dataset): def __init__(self, texts, args): self.texts = texts def __len__(self): return len(self.texts) def __getitem__(self, index): text = self.texts[index] return text class FantomEvalAgent(): def __init__(self, args): self.args = args self.prompt_header = "This is a theory-of-mind test. Please answer the question regarding facts or beliefs, based on the following in-person conversation between individuals who have just met.\n\n" self.output_filename_suffix = '_{}_input_{}_cot-{}.json'.format(self.args.conversation_input_type, self.args.model, self.args.use_cot) self.load_fantom() self.setup_fantom() self.model = self.load_model() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.embedder = SentenceTransformer('sentence-transformers/all-roberta-large-v1').to(self.device) def load_fantom(self): self.fantom_df = loader.load() def respond(self, prompt): response = self.model.interact(prompt) return response def load_model(self): if self.args.model.startswith("text-"): model = GPT3BaseAgent({'engine': self.args.model, 'temperature': 0, 'top_p': 0.95, 'frequency_penalty': 0.0, 'presence_penalty': 0.0}) elif self.args.model.startswith("gpt-"): model = ConversationalGPTBaseAgent({'model': self.args.model, 'temperature': 0, 'top_p': 0.95, 'frequency_penalty': 0.0, 'presence_penalty': 0.0}) elif self.args.model.startswith('flan-t5'): model = FlanT5Agent(self.args) elif self.args.model.startswith('flan-ul2'): model = FlanUL2Agent(self.args) elif self.args.model.endswith('-tg'):
model = TogetherAIAgent(self.args.__dict__)
6
2023-10-21 22:49:56+00:00
4k
turingmotors/openlenda
yolox/models/darknet.py
[ { "identifier": "BaseConv", "path": "yolox/models/network_blocks.py", "snippet": "class BaseConv(nn.Module):\n \"\"\"A Conv2d -> Batchnorm -> silu/leaky relu block\"\"\"\n\n def __init__(\n self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n ):\n super().__init__()\n # same padding\n pad = (ksize - 1) // 2\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=ksize,\n stride=stride,\n padding=pad,\n groups=groups,\n bias=bias,\n )\n self.bn = nn.BatchNorm2d(out_channels)\n self.act = get_activation(act, inplace=True)\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))" }, { "identifier": "CSPLayer", "path": "yolox/models/network_blocks.py", "snippet": "class CSPLayer(nn.Module):\n \"\"\"C3 in yolov5, CSP Bottleneck with 3 convolutions\"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n n=1,\n shortcut=True,\n expansion=0.5,\n depthwise=False,\n act=\"silu\",\n ):\n \"\"\"\n Args:\n in_channels (int): input channels.\n out_channels (int): output channels.\n n (int): number of Bottlenecks. Default value: 1.\n \"\"\"\n # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n hidden_channels = int(out_channels * expansion) # hidden channels\n self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)\n module_list = [\n Bottleneck(\n hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act\n )\n for _ in range(n)\n ]\n self.m = nn.Sequential(*module_list)\n\n def forward(self, x):\n x_1 = self.conv1(x)\n x_2 = self.conv2(x)\n x_1 = self.m(x_1)\n x = torch.cat((x_1, x_2), dim=1)\n return self.conv3(x)" }, { "identifier": "DWConv", "path": "yolox/models/network_blocks.py", "snippet": "class DWConv(nn.Module):\n \"\"\"Depthwise Conv + Conv\"\"\"\n\n def __init__(self, in_channels, out_channels, ksize, stride=1, act=\"silu\"):\n super().__init__()\n self.dconv = BaseConv(\n in_channels,\n in_channels,\n ksize=ksize,\n stride=stride,\n groups=in_channels,\n act=act,\n )\n self.pconv = BaseConv(\n in_channels, out_channels, ksize=1, stride=1, groups=1, act=act\n )\n\n def forward(self, x):\n x = self.dconv(x)\n return self.pconv(x)" }, { "identifier": "Focus", "path": "yolox/models/network_blocks.py", "snippet": "class Focus(nn.Module):\n \"\"\"Focus width and height information into channel space.\"\"\"\n\n def __init__(self, in_channels, out_channels, ksize=1, stride=1, act=\"silu\"):\n super().__init__()\n self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act)\n\n def forward(self, x):\n # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)\n patch_top_left = x[..., ::2, ::2]\n patch_top_right = x[..., ::2, 1::2]\n patch_bot_left = x[..., 1::2, ::2]\n patch_bot_right = x[..., 1::2, 1::2]\n x = torch.cat(\n (\n patch_top_left,\n patch_bot_left,\n patch_top_right,\n patch_bot_right,\n ),\n dim=1,\n )\n return self.conv(x)" }, { "identifier": "ResLayer", "path": "yolox/models/network_blocks.py", "snippet": "class ResLayer(nn.Module):\n \"Residual layer with `in_channels` inputs.\"\n\n def __init__(self, in_channels: int):\n super().__init__()\n mid_channels = in_channels // 2\n self.layer1 = BaseConv(\n in_channels, mid_channels, ksize=1, stride=1, act=\"lrelu\"\n )\n self.layer2 = BaseConv(\n mid_channels, in_channels, ksize=3, stride=1, act=\"lrelu\"\n )\n\n def forward(self, x):\n out = self.layer2(self.layer1(x))\n return x + out" }, { "identifier": "SPPBottleneck", "path": "yolox/models/network_blocks.py", "snippet": "class SPPBottleneck(nn.Module):\n \"\"\"Spatial pyramid pooling layer used in YOLOv3-SPP\"\"\"\n\n def __init__(\n self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation=\"silu\"\n ):\n super().__init__()\n hidden_channels = in_channels // 2\n self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation)\n self.m = nn.ModuleList(\n [\n nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)\n for ks in kernel_sizes\n ]\n )\n conv2_channels = hidden_channels * (len(kernel_sizes) + 1)\n self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation)\n\n def forward(self, x):\n x = self.conv1(x)\n x = torch.cat([x] + [m(x) for m in self.m], dim=1)\n x = self.conv2(x)\n return x" } ]
from torch import nn from .network_blocks import BaseConv, CSPLayer, DWConv, Focus, ResLayer, SPPBottleneck
1,738
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) Megvii Inc. All rights reserved. class Darknet(nn.Module): # number of blocks from dark2 to dark5. depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]} def __init__( self, depth, in_channels=3, stem_out_channels=32, out_features=("dark3", "dark4", "dark5"), ): """ Args: depth (int): depth of darknet used in model, usually use [21, 53] for this param. in_channels (int): number of input channels, for example, use 3 for RGB image. stem_out_channels (int): number of output channels of darknet stem. It decides channels of darknet layer2 to layer5. out_features (Tuple[str]): desired output layer name. """ super().__init__() assert out_features, "please provide output features of Darknet" self.out_features = out_features self.stem = nn.Sequential(
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) Megvii Inc. All rights reserved. class Darknet(nn.Module): # number of blocks from dark2 to dark5. depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]} def __init__( self, depth, in_channels=3, stem_out_channels=32, out_features=("dark3", "dark4", "dark5"), ): """ Args: depth (int): depth of darknet used in model, usually use [21, 53] for this param. in_channels (int): number of input channels, for example, use 3 for RGB image. stem_out_channels (int): number of output channels of darknet stem. It decides channels of darknet layer2 to layer5. out_features (Tuple[str]): desired output layer name. """ super().__init__() assert out_features, "please provide output features of Darknet" self.out_features = out_features self.stem = nn.Sequential(
BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"),
0
2023-10-20 08:12:26+00:00
4k
tiejundong/FlexPose
FlexPose/preprocess/aug_pseudo_apo.py
[ { "identifier": "delmkdir", "path": "FlexPose/utils/common.py", "snippet": "def delmkdir(path, remove_old=True):\n isexist = os.path.exists(path)\n if not isexist:\n os.makedirs(path)\n if isexist == True and remove_old:\n shutil.rmtree(path)\n os.makedirs(path)" }, { "identifier": "read_mol_from_pdbbind", "path": "FlexPose/utils/pdbbind_preprocess.py", "snippet": "def read_mol_from_pdbbind(data_path, pdb_id):\n ligand_mol2_path = f'{data_path}/{pdb_id}/{pdb_id}_ligand.mol2'\n ligand_mol = Chem.MolFromMol2File(ligand_mol2_path)\n if ligand_mol == None:\n ligand_pdbpath = f'{data_path}/{pdb_id}/{pdb_id}_ligand.pdb'\n ligand_smiles_path = f'{data_path}/{pdb_id}/{pdb_id}_ligand.smi'\n ligand_smiles = open(ligand_smiles_path, 'r').readlines()[0].split('\\t')[0]\n ligand_mol = read_mol_with_pdb_smi(ligand_pdbpath, ligand_smiles)\n return ligand_mol" }, { "identifier": "get_true_posi", "path": "FlexPose/utils/pdbbind_preprocess.py", "snippet": "def get_true_posi(mol):\n mol_conf = mol.GetConformer()\n node_posi = np.array([mol_conf.GetAtomPosition(int(idx)) for idx in range(mol.GetNumAtoms())])\n return node_posi" } ]
import os import shutil import sys import argparse import numpy as np import scipy.spatial import random import pickle import pyrosetta from ray.util.multiprocessing import Pool from einops import rearrange from pyrosetta import rosetta from pyrosetta.rosetta import core from modeller import environ from modeller.scripts import complete_pdb from FlexPose.utils.common import delmkdir from FlexPose.utils.pdbbind_preprocess import read_mol_from_pdbbind, get_true_posi
2,120
tf.push_back(core.pack.task.operation.RestrictToRepacking()) restrict_to_focus = core.pack.task.operation.OperateOnResidueSubset(core.pack.task.operation.PreventRepackingRLT(), res_selector, True) # True indicates flipping the selection tf.push_back(restrict_to_focus) # pyrosetta.toolbox.generate_resfile.generate_resfile_from_pose(original_pose, f'{sub_MC_path}/protein_resfile', # pack=True, design=False, input_sc=False) # tf.push_back(core.pack.task.operation.ReadResfile(f'{sub_MC_path}/protein_resfile')) # print(tf.create_task_and_apply_taskoperations(pose)) # test tf # packer = pyrosetta.rosetta.protocols.minimization_packing.PackRotamersMover() # packer = pyrosetta.rosetta.protocols.minimization_packing.MinPackMover() # packer.task_factory(tf) # packer.apply(pose) # get FastRelax mm = core.kinematics.MoveMap() mm.set_jump(False) for i in range(1, pose.size()+1): if i in res_list: mm.set_chi(i, True) mm.set_bb(i, flexbb) else: mm.set_chi(i, False) mm.set_bb(i, False) # mmf = core.select.movemap.MoveMapFactory() # mmf.all_bb(False) # mmf.all_bondangles(False) # mmf.all_bondlengths(False) # mmf.all_branches(False) # mmf.all_chi(False) # mmf.all_jumps(False) # mmf.all_nu(False) # mmf.set_cartesian(False) # mmf.add_bb_action(core.select.movemap.move_map_action.mm_enable, pocket_selector) # mmf.add_chi_action(core.select.movemap.move_map_action.mm_enable, pocket_selector) # mm = mmf.create_movemap_from_pose(pose) fr = pyrosetta.rosetta.protocols.relax.FastRelax() # fr.max_iter(100) fr.constrain_relax_to_start_coords(False) fr.set_movemap_disables_packing_of_fixed_chi_positions(True) fr.set_task_factory(tf) fr.set_movemap(mm) # fr.set_movemap_factory(mmf) fr.cartesian(False) fr.set_scorefxn(core.scoring.ScoreFunctionFactory.create_score_function('ref2015_cart')) fr.min_type('dfpmin_armijo_nonmonotone') # For non-Cartesian scorefunctions, use "dfpmin_armijo_nonmonotone", else lbfgs_armijo_nonmonotone return fr def get_torsion(pose): bb_torsion = [] sc_torsion = [] for i in range(1, pose.size() + 1): try: res = pose.residue(i) assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL'] phi_psi = [pose.phi(i), pose.psi(i)] chi = [c for c in res.chi()] bb_torsion.append(phi_psi) sc_torsion.append(chi) except: bb_torsion.append([None]) sc_torsion.append([None]) return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion} def try_gen_pose(task, pose): try: task.apply(pose) return True except: return False def run_single_pdbbind(tup_in): pdbbind_path, apobind_path, MC_path, pdb_id, n_rand_pert, n_fixbb_repack, n_flexbb_repack, rand_pert_range = tup_in sub_pdbbind_path = f'{pdbbind_path}/{pdb_id}' sub_apobind_path = f'{apobind_path}/{pdb_id}' sub_MC_path = f'{MC_path}/{pdb_id}' delmkdir(sub_MC_path) #################################################################################################################### # repaire protein #################################################################################################################### env_ = environ() env_.libs.topology.read(file='$(LIB)/top_heav.lib') env_.libs.parameters.read(file='$(LIB)/par.lib') pdb_m = complete_pdb(env_, f'{sub_pdbbind_path}/{pdb_id}_protein.pdb') pdb_m.write(f'{sub_MC_path}/protein_modeller.pdb') os.system(f'grep HETATM {sub_pdbbind_path}/{pdb_id}_protein.pdb >> {sub_MC_path}/protein_modeller.pdb') # add ion os.system(f'grep -v END {sub_MC_path}/protein_modeller.pdb > {sub_MC_path}/protein_repaired.pdb') if os.path.exists(f'{sub_apobind_path}/{pdb_id}_apo_added.pdb'): have_apo = True else: have_apo = False if have_apo: pdb_m = complete_pdb(env_, f'{sub_apobind_path}/{pdb_id}_apo_added.pdb') pdb_m.write(f'{sub_MC_path}/protein_modeller.pdb') os.system(f'grep -v END {sub_MC_path}/protein_modeller.pdb > {sub_MC_path}/protein_repaired_apo.pdb') #################################################################################################################### # init rosetta #################################################################################################################### # check https://new.rosettacommons.org/docs/latest/full-options-list for opts # -ex3 -ex4 -ex1aro -ex2aro opts = '-ex1 true -packing:ex1:level 1 -ex2 true -packing:ex2:level 1 -extrachi_cutoff 0 -ignore_unrecognized_res true -relax:default_repeats 3' pyrosetta.distributed.init(opts) original_pose = pyrosetta.io.pose_from_pdb(f'{sub_MC_path}/protein_repaired.pdb') original_pose.dump_pdb(f'{sub_MC_path}/origin.pdb') if have_apo: apo_pose = pyrosetta.io.pose_from_pdb(f'{sub_MC_path}/protein_repaired_apo.pdb') # select local res ligand_mol = read_mol_from_pdbbind(pdbbind_path, pdb_id)
sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2])) def random_sc(pose, res_list=None, pert=180): # random chi if isinstance(res_list, type(None)): res_list = range(1, pose.size() + 1) for i in res_list: res = pose.residue(i) for chino, chi in enumerate(res.chi(), start=1): res.set_chi(chino, chi + random.uniform(-pert, pert)) def get_FastRelax(pose, res_list=None, flexbb=True): if isinstance(res_list, type(None)): res_list = range(1, pose.size() + 1) res_selector = core.select.residue_selector.ResidueIndexSelector(','.join([str(i) for i in res_list])) # get TaskFactory tf = core.pack.task.TaskFactory() tf.push_back(core.pack.task.operation.InitializeFromCommandline()) # tf.push_back(core.pack.task.operation.IncludeCurrent()) tf.push_back(core.pack.task.operation.NoRepackDisulfides()) tf.push_back(core.pack.task.operation.RestrictToRepacking()) restrict_to_focus = core.pack.task.operation.OperateOnResidueSubset(core.pack.task.operation.PreventRepackingRLT(), res_selector, True) # True indicates flipping the selection tf.push_back(restrict_to_focus) # pyrosetta.toolbox.generate_resfile.generate_resfile_from_pose(original_pose, f'{sub_MC_path}/protein_resfile', # pack=True, design=False, input_sc=False) # tf.push_back(core.pack.task.operation.ReadResfile(f'{sub_MC_path}/protein_resfile')) # print(tf.create_task_and_apply_taskoperations(pose)) # test tf # packer = pyrosetta.rosetta.protocols.minimization_packing.PackRotamersMover() # packer = pyrosetta.rosetta.protocols.minimization_packing.MinPackMover() # packer.task_factory(tf) # packer.apply(pose) # get FastRelax mm = core.kinematics.MoveMap() mm.set_jump(False) for i in range(1, pose.size()+1): if i in res_list: mm.set_chi(i, True) mm.set_bb(i, flexbb) else: mm.set_chi(i, False) mm.set_bb(i, False) # mmf = core.select.movemap.MoveMapFactory() # mmf.all_bb(False) # mmf.all_bondangles(False) # mmf.all_bondlengths(False) # mmf.all_branches(False) # mmf.all_chi(False) # mmf.all_jumps(False) # mmf.all_nu(False) # mmf.set_cartesian(False) # mmf.add_bb_action(core.select.movemap.move_map_action.mm_enable, pocket_selector) # mmf.add_chi_action(core.select.movemap.move_map_action.mm_enable, pocket_selector) # mm = mmf.create_movemap_from_pose(pose) fr = pyrosetta.rosetta.protocols.relax.FastRelax() # fr.max_iter(100) fr.constrain_relax_to_start_coords(False) fr.set_movemap_disables_packing_of_fixed_chi_positions(True) fr.set_task_factory(tf) fr.set_movemap(mm) # fr.set_movemap_factory(mmf) fr.cartesian(False) fr.set_scorefxn(core.scoring.ScoreFunctionFactory.create_score_function('ref2015_cart')) fr.min_type('dfpmin_armijo_nonmonotone') # For non-Cartesian scorefunctions, use "dfpmin_armijo_nonmonotone", else lbfgs_armijo_nonmonotone return fr def get_torsion(pose): bb_torsion = [] sc_torsion = [] for i in range(1, pose.size() + 1): try: res = pose.residue(i) assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL'] phi_psi = [pose.phi(i), pose.psi(i)] chi = [c for c in res.chi()] bb_torsion.append(phi_psi) sc_torsion.append(chi) except: bb_torsion.append([None]) sc_torsion.append([None]) return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion} def try_gen_pose(task, pose): try: task.apply(pose) return True except: return False def run_single_pdbbind(tup_in): pdbbind_path, apobind_path, MC_path, pdb_id, n_rand_pert, n_fixbb_repack, n_flexbb_repack, rand_pert_range = tup_in sub_pdbbind_path = f'{pdbbind_path}/{pdb_id}' sub_apobind_path = f'{apobind_path}/{pdb_id}' sub_MC_path = f'{MC_path}/{pdb_id}' delmkdir(sub_MC_path) #################################################################################################################### # repaire protein #################################################################################################################### env_ = environ() env_.libs.topology.read(file='$(LIB)/top_heav.lib') env_.libs.parameters.read(file='$(LIB)/par.lib') pdb_m = complete_pdb(env_, f'{sub_pdbbind_path}/{pdb_id}_protein.pdb') pdb_m.write(f'{sub_MC_path}/protein_modeller.pdb') os.system(f'grep HETATM {sub_pdbbind_path}/{pdb_id}_protein.pdb >> {sub_MC_path}/protein_modeller.pdb') # add ion os.system(f'grep -v END {sub_MC_path}/protein_modeller.pdb > {sub_MC_path}/protein_repaired.pdb') if os.path.exists(f'{sub_apobind_path}/{pdb_id}_apo_added.pdb'): have_apo = True else: have_apo = False if have_apo: pdb_m = complete_pdb(env_, f'{sub_apobind_path}/{pdb_id}_apo_added.pdb') pdb_m.write(f'{sub_MC_path}/protein_modeller.pdb') os.system(f'grep -v END {sub_MC_path}/protein_modeller.pdb > {sub_MC_path}/protein_repaired_apo.pdb') #################################################################################################################### # init rosetta #################################################################################################################### # check https://new.rosettacommons.org/docs/latest/full-options-list for opts # -ex3 -ex4 -ex1aro -ex2aro opts = '-ex1 true -packing:ex1:level 1 -ex2 true -packing:ex2:level 1 -extrachi_cutoff 0 -ignore_unrecognized_res true -relax:default_repeats 3' pyrosetta.distributed.init(opts) original_pose = pyrosetta.io.pose_from_pdb(f'{sub_MC_path}/protein_repaired.pdb') original_pose.dump_pdb(f'{sub_MC_path}/origin.pdb') if have_apo: apo_pose = pyrosetta.io.pose_from_pdb(f'{sub_MC_path}/protein_repaired_apo.pdb') # select local res ligand_mol = read_mol_from_pdbbind(pdbbind_path, pdb_id)
ligand_coor = get_true_posi(ligand_mol)
2
2023-10-19 22:03:51+00:00
4k
openvpi/SingingVocoders
train.py
[ { "identifier": "read_full_config", "path": "utils/config_utils.py", "snippet": "def read_full_config(config_path: pathlib.Path) -> dict:\n config_path = config_path.resolve()\n config_path_str = config_path.as_posix()\n if config_path in loaded_config_files:\n return loaded_config_files[config_path_str]\n\n with open(config_path, 'r', encoding='utf8') as f:\n config = yaml.safe_load(f)\n if 'base_config' not in config:\n loaded_config_files[config_path_str] = config\n return config\n\n if not isinstance(config['base_config'], list):\n config['base_config'] = [config['base_config']]\n squashed_config = {}\n for base_config in config['base_config']:\n c_path = pathlib.Path(base_config)\n full_base_config = read_full_config(c_path)\n override_dict(squashed_config, full_base_config)\n override_dict(squashed_config, config)\n squashed_config.pop('base_config')\n loaded_config_files[config_path_str] = squashed_config\n return squashed_config" }, { "identifier": "print_config", "path": "utils/config_utils.py", "snippet": "@lightning.pytorch.utilities.rank_zero.rank_zero_only\ndef print_config(config: dict):\n for i, (k, v) in enumerate(sorted(config.items())):\n print(f\"\\033[0;33m{k}\\033[0m: {v}\", end='')\n if i < len(config) - 1:\n print(\", \", end=\"\")\n if i % 5 == 4:\n print()\n print()" }, { "identifier": "DsModelCheckpoint", "path": "utils/training_utils.py", "snippet": "class DsModelCheckpoint(ModelCheckpoint):\n def __init__(\n self,\n *args,\n permanent_ckpt_start,\n permanent_ckpt_interval,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.permanent_ckpt_start = permanent_ckpt_start or 0\n self.permanent_ckpt_interval = permanent_ckpt_interval or 0\n self.enable_permanent_ckpt = self.permanent_ckpt_start > 0 and self.permanent_ckpt_interval > 9\n\n self._verbose = self.verbose\n self.verbose = False\n\n def state_dict(self):\n ret = super().state_dict()\n ret.pop('dirpath')\n return ret\n\n def load_state_dict(self, state_dict) -> None:\n super().load_state_dict(state_dict)\n\n def on_validation_end(self, trainer: \"pl.Trainer\", pl_module: \"pl.LightningModule\") -> None:\n if trainer.lightning_module.skip_immediate_ckpt_save:\n trainer.lightning_module.skip_immediate_ckpt_save = False\n return\n self.last_val_step = trainer.global_step\n super().on_validation_end(trainer, pl_module)\n\n def _update_best_and_save(\n self, current: torch.Tensor, trainer: \"pl.Trainer\", monitor_candidates: Dict[str, torch.Tensor]\n ) -> None:\n k = len(self.best_k_models) + 1 if self.save_top_k == -1 else self.save_top_k\n\n del_filepath = None\n _op = max if self.mode == \"min\" else min\n while len(self.best_k_models) > k and k > 0:\n self.kth_best_model_path = _op(self.best_k_models, key=self.best_k_models.get) # type: ignore[arg-type]\n self.kth_value = self.best_k_models[self.kth_best_model_path]\n\n del_filepath = self.kth_best_model_path\n self.best_k_models.pop(del_filepath)\n filepath = self._get_metric_interpolated_filepath_name(monitor_candidates, trainer, del_filepath)\n if del_filepath is not None and filepath != del_filepath:\n self._remove_checkpoint(trainer, del_filepath)\n\n if len(self.best_k_models) == k and k > 0:\n self.kth_best_model_path = _op(self.best_k_models, key=self.best_k_models.get) # type: ignore[arg-type]\n self.kth_value = self.best_k_models[self.kth_best_model_path]\n\n super()._update_best_and_save(current, trainer, monitor_candidates)\n\n def _save_checkpoint(self, trainer: \"pl.Trainer\", filepath: str) -> None:\n filepath = (Path(self.dirpath) / Path(filepath).name).resolve()\n super()._save_checkpoint(trainer, str(filepath))\n if self._verbose:\n relative_path = filepath.relative_to(Path('.').resolve())\n rank_zero_info(f'Checkpoint {relative_path} saved.')\n\n def _remove_checkpoint(self, trainer: \"pl.Trainer\", filepath: str):\n filepath = (Path(self.dirpath) / Path(filepath).name).resolve()\n relative_path = filepath.relative_to(Path('.').resolve())\n search = re.search(r'steps_\\d+', relative_path.stem)\n if search:\n step = int(search.group(0)[6:])\n if self.enable_permanent_ckpt and \\\n step >= self.permanent_ckpt_start and \\\n (step - self.permanent_ckpt_start) % self.permanent_ckpt_interval == 0:\n rank_zero_info(f'Checkpoint {relative_path} is now permanent.')\n return\n super()._remove_checkpoint(trainer, filepath)\n if self._verbose:\n rank_zero_info(f'Removed checkpoint {relative_path}.')" }, { "identifier": "DsTQDMProgressBar", "path": "utils/training_utils.py", "snippet": "class DsTQDMProgressBar(TQDMProgressBar):\n def __init__(self, refresh_rate: int = 1, process_position: int = 0, show_steps: bool = True):\n super().__init__(refresh_rate, process_position)\n self.show_steps = show_steps\n\n def get_metrics(self, trainer, model):\n items = super().get_metrics(trainer, model)\n if 'batch_size' in items:\n items['batch_size'] = int(items['batch_size'])\n if self.show_steps:\n items['steps'] = str(trainer.global_step)\n for k, v in items.items():\n if isinstance(v, float):\n if np.isnan(v):\n items[k] = 'nan'\n elif 0.001 <= v < 10:\n items[k] = np.format_float_positional(v, unique=True, precision=5, trim='-')\n elif 0.00001 <= v < 0.001:\n if len(np.format_float_positional(v, unique=True, precision=8, trim='-')) > 8:\n items[k] = np.format_float_scientific(v, precision=3, unique=True, min_digits=2, trim='-')\n else:\n items[k] = np.format_float_positional(v, unique=True, precision=5, trim='-')\n elif v < 0.00001:\n items[k] = np.format_float_scientific(v, precision=3, unique=True, min_digits=2, trim='-')\n items.pop(\"v_num\", None)\n return items" }, { "identifier": "get_latest_checkpoint_path", "path": "utils/training_utils.py", "snippet": "def get_latest_checkpoint_path(work_dir):\n if not isinstance(work_dir, Path):\n work_dir = Path(work_dir)\n if not work_dir.exists():\n return None\n\n last_step = -1\n last_ckpt_name = None\n\n for ckpt in work_dir.glob('model_ckpt_steps_*.ckpt'):\n search = re.search(r'steps_\\d+', ckpt.name)\n if search:\n step = int(search.group(0)[6:])\n if step > last_step:\n last_step = step\n last_ckpt_name = str(ckpt)\n\n return last_ckpt_name if last_ckpt_name is not None else None" }, { "identifier": "get_strategy", "path": "utils/training_utils.py", "snippet": "def get_strategy(strategy):\n if strategy['name'] == 'auto':\n return 'auto'\n\n from lightning.pytorch.strategies import StrategyRegistry\n if strategy['name'] not in StrategyRegistry:\n available_names = \", \".join(sorted(StrategyRegistry.keys())) or \"none\"\n raise ValueError(f\"Invalid strategy name {strategy['name']}. Available names: {available_names}\")\n\n data = StrategyRegistry[strategy['name']]\n params = data['init_params']\n params.update({k: v for k, v in strategy.items() if k != 'name'})\n return data['strategy'](**utils.filter_kwargs(params, data['strategy']))" } ]
import importlib import logging import os import pathlib import sys import click import lightning.pytorch as pl import torch.utils.data import yaml from lightning.pytorch.loggers import TensorBoardLogger from utils.config_utils import read_full_config, print_config from utils.training_utils import ( DsModelCheckpoint, DsTQDMProgressBar, get_latest_checkpoint_path, get_strategy )
2,764
torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system')) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') @click.command(help='') @click.option('--config', required=True, metavar='FILE', help='Path to the configuration file') @click.option('--exp_name', required=True, metavar='EXP', help='Name of the experiment') @click.option('--work_dir', required=False, metavar='DIR', help='Directory to save the experiment') def train(config, exp_name, work_dir): config = pathlib.Path(config) config = read_full_config(config) print_config(config) if work_dir is None: work_dir = pathlib.Path(__file__).parent / 'experiments' else: work_dir = pathlib.Path(work_dir) work_dir = work_dir / exp_name assert not work_dir.exists() or work_dir.is_dir(), f'Path \'{work_dir}\' is not a directory.' work_dir.mkdir(parents=True, exist_ok=True) with open(work_dir / 'config.yaml', 'w', encoding='utf8') as f: yaml.safe_dump(config, f) config.update({'work_dir':str(work_dir)}) if not config['nccl_p2p']: print("Disabling NCCL P2P") os.environ['NCCL_P2P_DISABLE'] = '1' pl.seed_everything(config['seed'], workers=True) assert config['task_cls'] != '' pkg = ".".join(config["task_cls"].split(".")[:-1]) cls_name = config["task_cls"].split(".")[-1] task_cls = getattr(importlib.import_module(pkg), cls_name) # assert issubclass(task_cls, training.BaseTask), f'Task class {task_cls} is not a subclass of {training.BaseTask}.' task = task_cls(config=config) # work_dir = pathlib.Path(config['work_dir']) trainer = pl.Trainer( accelerator=config['pl_trainer_accelerator'], devices=config['pl_trainer_devices'], num_nodes=config['pl_trainer_num_nodes'], strategy=get_strategy(config['pl_trainer_strategy']), precision=config['pl_trainer_precision'], callbacks=[ DsModelCheckpoint( dirpath=work_dir, filename='model_ckpt_steps_{step}', auto_insert_metric_name=False, monitor='step', mode='max', save_last=False, # every_n_train_steps=config['val_check_interval'], save_top_k=config['num_ckpt_keep'], permanent_ckpt_start=config['permanent_ckpt_start'], permanent_ckpt_interval=config['permanent_ckpt_interval'], verbose=True ), # LearningRateMonitor(logging_interval='step'), DsTQDMProgressBar(), ], logger=TensorBoardLogger( save_dir=str(work_dir), name='lightning_logs', version='lastest' ), # gradient_clip_val=config['clip_grad_norm'], val_check_interval=config['val_check_interval'] ,#* config['accumulate_grad_batches'], # so this is global_steps check_val_every_n_epoch=None, log_every_n_steps=1, max_steps=config['max_updates'], use_distributed_sampler=True, num_sanity_val_steps=config['num_sanity_val_steps'], # accumulate_grad_batches=config['accumulate_grad_batches'] )
torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system')) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') @click.command(help='') @click.option('--config', required=True, metavar='FILE', help='Path to the configuration file') @click.option('--exp_name', required=True, metavar='EXP', help='Name of the experiment') @click.option('--work_dir', required=False, metavar='DIR', help='Directory to save the experiment') def train(config, exp_name, work_dir): config = pathlib.Path(config) config = read_full_config(config) print_config(config) if work_dir is None: work_dir = pathlib.Path(__file__).parent / 'experiments' else: work_dir = pathlib.Path(work_dir) work_dir = work_dir / exp_name assert not work_dir.exists() or work_dir.is_dir(), f'Path \'{work_dir}\' is not a directory.' work_dir.mkdir(parents=True, exist_ok=True) with open(work_dir / 'config.yaml', 'w', encoding='utf8') as f: yaml.safe_dump(config, f) config.update({'work_dir':str(work_dir)}) if not config['nccl_p2p']: print("Disabling NCCL P2P") os.environ['NCCL_P2P_DISABLE'] = '1' pl.seed_everything(config['seed'], workers=True) assert config['task_cls'] != '' pkg = ".".join(config["task_cls"].split(".")[:-1]) cls_name = config["task_cls"].split(".")[-1] task_cls = getattr(importlib.import_module(pkg), cls_name) # assert issubclass(task_cls, training.BaseTask), f'Task class {task_cls} is not a subclass of {training.BaseTask}.' task = task_cls(config=config) # work_dir = pathlib.Path(config['work_dir']) trainer = pl.Trainer( accelerator=config['pl_trainer_accelerator'], devices=config['pl_trainer_devices'], num_nodes=config['pl_trainer_num_nodes'], strategy=get_strategy(config['pl_trainer_strategy']), precision=config['pl_trainer_precision'], callbacks=[ DsModelCheckpoint( dirpath=work_dir, filename='model_ckpt_steps_{step}', auto_insert_metric_name=False, monitor='step', mode='max', save_last=False, # every_n_train_steps=config['val_check_interval'], save_top_k=config['num_ckpt_keep'], permanent_ckpt_start=config['permanent_ckpt_start'], permanent_ckpt_interval=config['permanent_ckpt_interval'], verbose=True ), # LearningRateMonitor(logging_interval='step'), DsTQDMProgressBar(), ], logger=TensorBoardLogger( save_dir=str(work_dir), name='lightning_logs', version='lastest' ), # gradient_clip_val=config['clip_grad_norm'], val_check_interval=config['val_check_interval'] ,#* config['accumulate_grad_batches'], # so this is global_steps check_val_every_n_epoch=None, log_every_n_steps=1, max_steps=config['max_updates'], use_distributed_sampler=True, num_sanity_val_steps=config['num_sanity_val_steps'], # accumulate_grad_batches=config['accumulate_grad_batches'] )
trainer.fit(task, ckpt_path=get_latest_checkpoint_path(work_dir))
4
2023-10-17 13:45:09+00:00
4k
RobertCsordas/moe
models/transformer_language_model.py
[ { "identifier": "LoggingLayer", "path": "layers/logging_layer.py", "snippet": "class LoggingLayer:\n def __init__(self) -> None:\n super().__init__()\n self._logs = {}\n self._log_counts = {}\n self._custom_reductions = {}\n\n def custom_reduction(self, name: str, reduction):\n self._custom_reductions[name] = reduction\n\n def log(self, name: str, value: Any, drop_old: bool = False):\n value = U.apply_to_tensors(value, lambda x: x.detach())\n\n drop_old = drop_old or (not isinstance(value, (torch.Tensor, np.ndarray, float, int)))\n\n if name in self._custom_reductions:\n if name not in self._logs:\n self._logs[name] = []\n\n self._logs[name].append(value)\n else:\n if name not in self._logs or drop_old:\n self._logs[name] = value\n self._log_counts[name] = 1\n else:\n self._logs[name] = self._logs[name] + value\n self._log_counts[name] = self._log_counts[name] + 1\n\n def get_logs(self) -> Dict[str, Any]:\n res = {}\n for k, v in self._logs.items():\n if k in self._custom_reductions:\n res[k] = self._custom_reductions[k](v)\n elif isinstance(v, (torch.Tensor, np.ndarray, int, float)):\n res[k] = v / self._log_counts[k]\n else:\n res[k] = v\n\n self._logs = {}\n self._log_counts = {}\n return res\n\n def dump_logs(self, save_dir: str):\n pass" }, { "identifier": "AttentionMask", "path": "layers/transformer/multi_head_attention.py", "snippet": "class AttentionMask:\n src_length_mask: Optional[torch.Tensor]\n position_mask: Optional[torch.Tensor]" }, { "identifier": "Transformer", "path": "layers/transformer/transformer.py", "snippet": "class Transformer(TransformerBase):\n def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6, num_decoder_layers: int = 6,\n dim_feedforward: int = 2048, dropout: float = 0.1, activation: ActivationFunction = F.relu,\n encoder_layer=TransformerEncoderWithLayer(), decoder_layer=TransformerDecoderWithLayer(),\n attention_dropout: float = 0):\n\n super().__init__(\n encoder_layer(num_encoder_layers, d_model, nhead, dim_feedforward, dropout, activation, attention_dropout),\n decoder_layer(num_decoder_layers, d_model, nhead, dim_feedforward, dropout, activation, attention_dropout))" } ]
import torch import torch.nn import torch.nn.functional as F import framework import math from typing import Optional, Tuple, Any, List from layers import LoggingLayer from layers.transformer.multi_head_attention import AttentionMask from layers.transformer.transformer import Transformer
2,064
self.shared_layers = all([la is layers[0] for la in layers]) if embedding_size is None: self.embedding_adapter = lambda x: x else: self.embedding_adapter = torch.nn.Linear(embedding_size, state_size) self.dropout = torch.nn.Dropout(dropout) self.layers = torch.nn.ModuleList(layers) self.output_adapter = lambda x: x self.n_prev_states = n_prev_states self.n_prev_states_test = n_prev_states_test or n_prev_states self.same_length_eval = same_length_eval self.embedding_scale = math.sqrt(state_size) self.p_drop_layer = p_drop_layer self.use_last_state = use_last_state self.same_length = same_length self.iter = 0 self.output_mode = output_mode assert self.output_mode in {"normal", "sum", "geometric", "sigmoid"} if self.output_mode in {"geometric", "sigmoid"}: self.output_gate = torch.nn.Linear(state_size, 1) self.adaptive = bool(adaptive_cutoffs) out_proj_size = (embedding_size or state_size) if tied_embedding else state_size if self.adaptive: self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss( out_proj_size, voc_size, adaptive_cutoffs, div_value=1, tied_to=self.embedding if tied_embedding else None) else: self.output = torch.nn.Linear(out_proj_size, voc_size) if norm_before_output or self.output_mode in {"sum", "sigmoid"}: self.out_norm = torch.nn.LayerNorm(state_size) else: self.out_norm = lambda x: x if tied_embedding: if not self.adaptive: self.output.weight = self.embedding.weight if embedding_size is not None: self.output_adapter = torch.nn.Linear(state_size, embedding_size) @staticmethod def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor: return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1) def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor: net = self.out_norm(x) net = self.output_adapter(net) net = self.dropout(net) if self.adaptive: net = self.output(net.transpose(0, 1), target) else: net = self.output(net.transpose(0, 1)) return net def accumulate_output(self, features: List[torch.Tensor]) -> torch.Tensor: if self.output_mode == "sum": return sum(features) elif self.output_mode in {"geometric", "sigmoid"}: # Must cast it to float16, otherwise pytorch will crash after a few hundred iterations with an # incomprehensible error in the gradient scaler gates = torch.sigmoid(torch.cat([self.output_gate(f).float() for f in features], -1)) if self.output_mode == "geometric": ngates = torch.cumprod(1.0 - gates, -1) scores = torch.cat([gates[..., 0:1], gates[..., 1:] * ngates[..., :-1]], -1) else: scores = gates if self.iter % 100 == 0 and self.training: self.log("output_gate_mean", framework.visualize.plot.Barplot(scores.flatten(end_dim=-2).mean(0))) # return sum(f * scores[..., i: i+1] for i, f in enumerate(features)) f = scores.unsqueeze(-2) @ torch.stack(features, -2) return f.squeeze(-2) else: assert False, "Invalid output mode" def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]: causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device) net = self.dropout(self.embedding(x.T.long())) net = self.embedding_adapter(net) net = net * self.embedding_scale new_state = [] features = [net] n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test same_length = self.same_length or ((not self.training) and self.same_length_eval) if same_length and state is not None: causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \ [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask] causality_mask = torch.cat(causality_mask, -1) plot_cossim = (self.iter % 100 == 0 and self.training) for li, l in enumerate(self.layers): if n_prev_states > 0: if li == 0: # Pos offset should be constant for all layers pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0 # Concatenate the new state with the previous states li_r = 0 if self.use_last_state else li s = (state[li_r] + [net]) if state is not None else [net] attend_to = torch.cat(s, 1) if not self.use_last_state: s[-1] = s[-1].detach() new_state.append(s[-n_prev_states:]) else: pos_offset = None attend_to = None
class TransformerLanguageModel(LoggingLayer, torch.nn.Module): def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float, tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int, n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [], same_length_eval: bool = True, norm_before_output: bool = False, p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False, output_mode: str = "normal"): super().__init__() self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size) # with torch.no_grad(): # self.embedding.weight.uniform_(-0.1, 0.1) torch.nn.init.xavier_uniform_(self.embedding.weight) self.shared_layers = all([la is layers[0] for la in layers]) if embedding_size is None: self.embedding_adapter = lambda x: x else: self.embedding_adapter = torch.nn.Linear(embedding_size, state_size) self.dropout = torch.nn.Dropout(dropout) self.layers = torch.nn.ModuleList(layers) self.output_adapter = lambda x: x self.n_prev_states = n_prev_states self.n_prev_states_test = n_prev_states_test or n_prev_states self.same_length_eval = same_length_eval self.embedding_scale = math.sqrt(state_size) self.p_drop_layer = p_drop_layer self.use_last_state = use_last_state self.same_length = same_length self.iter = 0 self.output_mode = output_mode assert self.output_mode in {"normal", "sum", "geometric", "sigmoid"} if self.output_mode in {"geometric", "sigmoid"}: self.output_gate = torch.nn.Linear(state_size, 1) self.adaptive = bool(adaptive_cutoffs) out_proj_size = (embedding_size or state_size) if tied_embedding else state_size if self.adaptive: self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss( out_proj_size, voc_size, adaptive_cutoffs, div_value=1, tied_to=self.embedding if tied_embedding else None) else: self.output = torch.nn.Linear(out_proj_size, voc_size) if norm_before_output or self.output_mode in {"sum", "sigmoid"}: self.out_norm = torch.nn.LayerNorm(state_size) else: self.out_norm = lambda x: x if tied_embedding: if not self.adaptive: self.output.weight = self.embedding.weight if embedding_size is not None: self.output_adapter = torch.nn.Linear(state_size, embedding_size) @staticmethod def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor: return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1) def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor: net = self.out_norm(x) net = self.output_adapter(net) net = self.dropout(net) if self.adaptive: net = self.output(net.transpose(0, 1), target) else: net = self.output(net.transpose(0, 1)) return net def accumulate_output(self, features: List[torch.Tensor]) -> torch.Tensor: if self.output_mode == "sum": return sum(features) elif self.output_mode in {"geometric", "sigmoid"}: # Must cast it to float16, otherwise pytorch will crash after a few hundred iterations with an # incomprehensible error in the gradient scaler gates = torch.sigmoid(torch.cat([self.output_gate(f).float() for f in features], -1)) if self.output_mode == "geometric": ngates = torch.cumprod(1.0 - gates, -1) scores = torch.cat([gates[..., 0:1], gates[..., 1:] * ngates[..., :-1]], -1) else: scores = gates if self.iter % 100 == 0 and self.training: self.log("output_gate_mean", framework.visualize.plot.Barplot(scores.flatten(end_dim=-2).mean(0))) # return sum(f * scores[..., i: i+1] for i, f in enumerate(features)) f = scores.unsqueeze(-2) @ torch.stack(features, -2) return f.squeeze(-2) else: assert False, "Invalid output mode" def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]: causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device) net = self.dropout(self.embedding(x.T.long())) net = self.embedding_adapter(net) net = net * self.embedding_scale new_state = [] features = [net] n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test same_length = self.same_length or ((not self.training) and self.same_length_eval) if same_length and state is not None: causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \ [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask] causality_mask = torch.cat(causality_mask, -1) plot_cossim = (self.iter % 100 == 0 and self.training) for li, l in enumerate(self.layers): if n_prev_states > 0: if li == 0: # Pos offset should be constant for all layers pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0 # Concatenate the new state with the previous states li_r = 0 if self.use_last_state else li s = (state[li_r] + [net]) if state is not None else [net] attend_to = torch.cat(s, 1) if not self.use_last_state: s[-1] = s[-1].detach() new_state.append(s[-n_prev_states:]) else: pos_offset = None attend_to = None
net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,
1
2023-10-16 11:26:45+00:00
4k
yk/llmvm
parsing.py
[ { "identifier": "Arg", "path": "interface.py", "snippet": "class Arg(pydantic.BaseModel):\n vtype: str\n value: str" }, { "identifier": "Load", "path": "interface.py", "snippet": "class Load(Expr):\n kind: str = \"load\"\n vtype: str\n ptr: str" }, { "identifier": "Icmp", "path": "interface.py", "snippet": "class Icmp(Expr):\n kind: str = \"icmp\"\n vtype: str\n op: str\n lhs: str\n rhs: str" }, { "identifier": "Srem", "path": "interface.py", "snippet": "class Srem(Expr):\n kind: str = \"srem\"\n vtype: str\n lhs: str\n rhs: str" }, { "identifier": "Add", "path": "interface.py", "snippet": "class Add(Expr):\n kind: str = \"add\"\n vtype: str\n lhs: str\n rhs: str" }, { "identifier": "Mul", "path": "interface.py", "snippet": "class Mul(Expr):\n kind: str = \"mul\"\n vtype: str\n lhs: str\n rhs: str" }, { "identifier": "Call", "path": "interface.py", "snippet": "class Call(Expr):\n kind: str = \"call\"\n name: str\n args: list[Arg]" }, { "identifier": "Assign", "path": "interface.py", "snippet": "class Assign(Instruction):\n kind: str = \"assign\"\n reg: str\n expr: Expr" }, { "identifier": "Store", "path": "interface.py", "snippet": "class Store(Instruction):\n kind: str = \"store\"\n vtype: str\n value: str\n ptr: str" }, { "identifier": "Branch", "path": "interface.py", "snippet": "class Branch(Instruction):\n kind: str = \"branch\"\n label: str" }, { "identifier": "BranchCond", "path": "interface.py", "snippet": "class BranchCond(Instruction):\n kind: str = \"branch_cond\"\n cond_reg: str\n label_true: str\n label_false: str" }, { "identifier": "Return", "path": "interface.py", "snippet": "class Return(Instruction):\n kind: str = \"return\"\n vtype: str\n value: str" }, { "identifier": "Program", "path": "interface.py", "snippet": "class Program(pydantic.BaseModel):\n instructions: list[Instruction]\n labels: dict[str, int]\n constants: dict[str, Any]\n convert_numbers_to_chars: bool = False" }, { "identifier": "to_vtype", "path": "interface.py", "snippet": "def to_vtype(value, vtype):\n match vtype:\n case \"i32\":\n return int(value)\n case \"i8\":\n return str(value)\n case \"str\":\n return str(value)\n raise NotImplementedError(vtype)" }, { "identifier": "GetElementPtr", "path": "interface.py", "snippet": "class GetElementPtr(Expr):\n kind: str = \"get_element_ptr\"\n vtype: str\n ptr: str\n idx: str" }, { "identifier": "Copy", "path": "interface.py", "snippet": "class Copy(Expr):\n kind: str = \"copy\"\n ptr: str" }, { "identifier": "Switch", "path": "interface.py", "snippet": "class Switch(Instruction):\n kind: str = \"switch\"\n ptr: str\n default_label: str\n cases: dict[str, str]" }, { "identifier": "AllocArray", "path": "interface.py", "snippet": "class AllocArray(Expr):\n kind: str = \"alloc_array\"\n vtype: str\n size: int" }, { "identifier": "Alloc", "path": "interface.py", "snippet": "class Alloc(Expr):\n kind: str = \"alloc\"\n vtype: str" } ]
import re from loguru import logger from interface import Arg, Load, Icmp, Srem, Add, Mul, Call, Assign, Store, Branch, BranchCond, Return, Program, to_vtype, GetElementPtr, Copy, Switch, AllocArray, Alloc
2,119
def parse_arg(arg): logger.debug(f"parse_arg({arg})") if m := re.match(r"ptr noundef (\S+)", arg): return Arg(vtype="str", value=m.group(1)) if m := re.match(r"i32 noundef (\S+)", arg): return Arg(vtype="i32", value=m.group(1)) raise NotImplementedError(arg) def parse_call(expr): logger.debug(f"parse_call({expr})") if m := re.match(r"\s*call \w+(?: \(.*\))? @(\w+)\((.*)\)", expr): name, args = m.groups() args = args.split(", ") args = [parse_arg(arg) for arg in args if arg] return Call(name=name, args=args) return None def parse_expr(expr): if m := re.match(r"alloca \[(\d+) x (\S+)\]", expr): size, vtype = m.groups() return AllocArray(vtype=vtype, size=int(size)) if m := re.match(r"alloca (\S+),", expr): vtype = m.group(1) return Alloc(vtype=vtype) if m := re.match(r"sext \S+ (\S+) to \S+", expr): return Copy(ptr=m.group(1)) if m := re.match(r"load (\w+), ptr (%\d+),", expr): return Load(vtype=m.group(1), ptr=m.group(2)) if m := re.match(r"icmp (eq|ne|sgt|sge|slt|sle) (\w+) (\S+), (\S+)", expr): op, vtype, lhs, rhs = m.groups() return Icmp(vtype=vtype, op=op, lhs=lhs, rhs=rhs) if m := re.match(r"srem (\w+) (\S+), (\S+)", expr): vtype, lhs, rhs = m.groups() return Srem(vtype=vtype, lhs=lhs, rhs=rhs) if m := re.match(r"add nsw (\w+) (\S+), (\S+)", expr): vtype, lhs, rhs = m.groups() return Add(vtype=vtype, lhs=lhs, rhs=rhs) if m := re.match(r"mul nsw (\w+) (\S+), (\S+)", expr): vtype, lhs, rhs = m.groups() return Mul(vtype=vtype, lhs=lhs, rhs=rhs) if call := parse_call(expr): if call is not None: logger.debug(f"found call {call}") return call if m := re.match(r"getelementptr inbounds \[\d+ x (\S+)\], ptr (\S+), i32 0, i32 (\S+)", expr): vtype, ptr, idx = m.groups() return GetElementPtr(vtype=vtype, ptr=ptr, idx=idx) raise NotImplementedError(expr) def parse_switch(in_f): cases = {} for line in _line_stripper(in_f): if re.fullmatch(r"\s+\]", line): break if m := re.match(r"\s+i32 (\S+), label %(\d+)", line): value, label = m.groups() cases[value] = label continue raise NotImplementedError(line) else: raise ValueError("Expected ']' in switch") return cases def parse_instructions(in_f): instructions = [] labels = {} for line in _line_stripper(in_f): if re.fullmatch(r"\}", line): break if m := re.match(r"(\d+):", line): label = m.group(1) labels[label] = len(instructions) continue if m := re.fullmatch(r"\s+(%\d+) = (.*)", line): # register assignment reg, expr = m.groups() expr = parse_expr(expr) if expr is not None: instructions.append(Assign(reg=reg, expr=expr)) continue if m := re.match(r"\s+store (\w+) (\S+), ptr (\S+),", line): vtype, value, ptr = m.groups() instructions.append(Store(vtype=vtype, value=value, ptr=ptr)) continue if m := re.match(r"\s+br label %(\d+)", line): label = m.group(1) instructions.append(Branch(label=label)) continue if m := re.match(r"\s+br i1 (%\d+), label %(\d+), label %(\d+)", line): cond_reg, label_true, label_false = m.groups() instructions.append(BranchCond(cond_reg=cond_reg, label_true=label_true, label_false=label_false)) continue if m := re.match(r"\s+ret (\S+) (\S+)", line): vtype, value = m.groups() instructions.append(Return(vtype=vtype, value=value)) continue if call := parse_call(line): if call is not None: logger.debug(f"found call {call}") instructions.append(call) continue if m := re.match(r"\s+switch \S+ (\S+), label %(\d+) \[", line): ptr, default_label = m.groups() cases = parse_switch(in_f) instructions.append(Switch(ptr=ptr, default_label=default_label, cases=cases)) continue raise NotImplementedError(line) return instructions, labels def parse_program(in_f): constants = {} for line in _line_stripper(in_f): if m := re.match(r'(@\.str(?:\.\d+)?) .* c"([^"]+)\\00",', line): name, value = m.groups() value = value.replace(r"\0A", "\n")
def _line_stripper(in_f): for line in in_f: line = line.rstrip() if not line: continue yield line def parse_arg(arg): logger.debug(f"parse_arg({arg})") if m := re.match(r"ptr noundef (\S+)", arg): return Arg(vtype="str", value=m.group(1)) if m := re.match(r"i32 noundef (\S+)", arg): return Arg(vtype="i32", value=m.group(1)) raise NotImplementedError(arg) def parse_call(expr): logger.debug(f"parse_call({expr})") if m := re.match(r"\s*call \w+(?: \(.*\))? @(\w+)\((.*)\)", expr): name, args = m.groups() args = args.split(", ") args = [parse_arg(arg) for arg in args if arg] return Call(name=name, args=args) return None def parse_expr(expr): if m := re.match(r"alloca \[(\d+) x (\S+)\]", expr): size, vtype = m.groups() return AllocArray(vtype=vtype, size=int(size)) if m := re.match(r"alloca (\S+),", expr): vtype = m.group(1) return Alloc(vtype=vtype) if m := re.match(r"sext \S+ (\S+) to \S+", expr): return Copy(ptr=m.group(1)) if m := re.match(r"load (\w+), ptr (%\d+),", expr): return Load(vtype=m.group(1), ptr=m.group(2)) if m := re.match(r"icmp (eq|ne|sgt|sge|slt|sle) (\w+) (\S+), (\S+)", expr): op, vtype, lhs, rhs = m.groups() return Icmp(vtype=vtype, op=op, lhs=lhs, rhs=rhs) if m := re.match(r"srem (\w+) (\S+), (\S+)", expr): vtype, lhs, rhs = m.groups() return Srem(vtype=vtype, lhs=lhs, rhs=rhs) if m := re.match(r"add nsw (\w+) (\S+), (\S+)", expr): vtype, lhs, rhs = m.groups() return Add(vtype=vtype, lhs=lhs, rhs=rhs) if m := re.match(r"mul nsw (\w+) (\S+), (\S+)", expr): vtype, lhs, rhs = m.groups() return Mul(vtype=vtype, lhs=lhs, rhs=rhs) if call := parse_call(expr): if call is not None: logger.debug(f"found call {call}") return call if m := re.match(r"getelementptr inbounds \[\d+ x (\S+)\], ptr (\S+), i32 0, i32 (\S+)", expr): vtype, ptr, idx = m.groups() return GetElementPtr(vtype=vtype, ptr=ptr, idx=idx) raise NotImplementedError(expr) def parse_switch(in_f): cases = {} for line in _line_stripper(in_f): if re.fullmatch(r"\s+\]", line): break if m := re.match(r"\s+i32 (\S+), label %(\d+)", line): value, label = m.groups() cases[value] = label continue raise NotImplementedError(line) else: raise ValueError("Expected ']' in switch") return cases def parse_instructions(in_f): instructions = [] labels = {} for line in _line_stripper(in_f): if re.fullmatch(r"\}", line): break if m := re.match(r"(\d+):", line): label = m.group(1) labels[label] = len(instructions) continue if m := re.fullmatch(r"\s+(%\d+) = (.*)", line): # register assignment reg, expr = m.groups() expr = parse_expr(expr) if expr is not None: instructions.append(Assign(reg=reg, expr=expr)) continue if m := re.match(r"\s+store (\w+) (\S+), ptr (\S+),", line): vtype, value, ptr = m.groups() instructions.append(Store(vtype=vtype, value=value, ptr=ptr)) continue if m := re.match(r"\s+br label %(\d+)", line): label = m.group(1) instructions.append(Branch(label=label)) continue if m := re.match(r"\s+br i1 (%\d+), label %(\d+), label %(\d+)", line): cond_reg, label_true, label_false = m.groups() instructions.append(BranchCond(cond_reg=cond_reg, label_true=label_true, label_false=label_false)) continue if m := re.match(r"\s+ret (\S+) (\S+)", line): vtype, value = m.groups() instructions.append(Return(vtype=vtype, value=value)) continue if call := parse_call(line): if call is not None: logger.debug(f"found call {call}") instructions.append(call) continue if m := re.match(r"\s+switch \S+ (\S+), label %(\d+) \[", line): ptr, default_label = m.groups() cases = parse_switch(in_f) instructions.append(Switch(ptr=ptr, default_label=default_label, cases=cases)) continue raise NotImplementedError(line) return instructions, labels def parse_program(in_f): constants = {} for line in _line_stripper(in_f): if m := re.match(r'(@\.str(?:\.\d+)?) .* c"([^"]+)\\00",', line): name, value = m.groups() value = value.replace(r"\0A", "\n")
constants[name] = to_vtype(value=value, vtype="str")
13
2023-10-23 21:29:14+00:00
4k
w-e-w/sd-webui-nudenet-nsfw-censor
scripts/nudenet_nsfw_censor_scripts/post_processing_script.py
[ { "identifier": "pil_nude_detector", "path": "scripts/nudenet_nsfw_censor_scripts/pil_nude_detector.py", "snippet": "def draw_ellipse(draw, left_expanded, top_expanded, right_expanded, down_expanded, *args, **kwargs):\ndef draw_rectangle(draw, left_expanded, top_expanded, right_expanded, down_expanded, *args, **kwargs):\ndef rounded_rectangle(draw, left_expanded, top_expanded, right_expanded, down_expanded, width_expanded, height_expanded, rectangle_round_radius, *args, **kwargs):\n def __init__(self):\n def init_onnx(self):\n def change_onnx_provider(self):\n def refresh_label_configs(self):\n def pre_process_pil(self, pil_image):\n def calculate_censor_mask(self, detection_results, img_size, thresholds, expand_horizontal, expand_vertical, nms_threshold, nudenet_nsfw_censor_mask_shape, rectangle_round_radius):\n def get_censor_mask(self, pil_image, nms_threshold, nudenet_nsfw_censor_mask_shape, rectangle_round_radius, thresholds, expand_horizontal, expand_vertical):\nclass PilNudeDetector:" }, { "identifier": "apply_filter", "path": "scripts/nudenet_nsfw_censor_scripts/censor_image_filters.py", "snippet": "def combine_results(input_image, input_mask, processed):\ndef variable_blur(input_image: Image, control_mask: Image, blur_radius: float = 10, blur_strength_curve: float = 3, *args, **kwargs):\n def mask_array_to_img(i):\n def img_gaussian_blur(i):\n def combine_mask(index_1, index_2, pre_step_size):\n def combine(index_1, index_2, pre_step_size):\ndef gaussian_blur(input_image, input_mask, blur_radius, *args, **kwargs):\ndef pixelate(input_image, input_mask, pixelation_factor, *args, **kwargs):\ndef fill_color(input_image, input_mask, color, *args, **kwargs):\ndef do_nothing(input_image, *args, **kwargs):\ndef apply_filter(input_image, input_mask, filter_type, *args, **kwargs):" } ]
from scripts.nudenet_nsfw_censor_scripts.pil_nude_detector import pil_nude_detector, mask_shapes_func_dict from scripts.nudenet_nsfw_censor_scripts.censor_image_filters import apply_filter, filter_dict from modules import shared, images, scripts_postprocessing from PIL import Image, ImageFilter from math import sqrt from modules.ui_components import InputAccordion import gradio as gr
2,843
mask_brush_color.change( fn=update_mask_brush_color, inputs=[mask_brush_color], outputs=[input_mask] ) def get_current_image(image): # ToDo if possible make this a client side operation if image: return gr.Image.update(image) dummy_component = gr.Label(visible=False) create_canvas.click( fn=get_current_image, _js='getCurrentExtraSourceImg', inputs=[dummy_component], outputs=[input_mask], postprocess=False, ) def update_opt_ui(_filter_type, _mask_shape, _override_settings, _enable_nudenet): filter_opt_enable_list = filter_opt_ui_show_dict[_filter_type] mask_shape_opt_show_list = mask_shape_opt_ui_show_dict[_mask_shape] # blur_radius, blur_strength_curve, pixelation_factor, fill_color, mask_blend_radius, mask_blend_radius_variable_blur, rectangle_round_radius, nms_threshold return ( gr.Dropdown.update(visible=_override_settings), # filter_type gr.Dropdown.update(visible=_override_settings), # mask_shape gr.Slider.update(visible=_override_settings and filter_opt_enable_list[0]), # blur_radius gr.Slider.update(visible=_override_settings and filter_opt_enable_list[1]), # blur_strength_curve gr.Slider.update(visible=_override_settings and filter_opt_enable_list[2]), # pixelation_factor gr.ColorPicker.update(visible=_override_settings and filter_opt_enable_list[3]), # fill_color gr.Slider.update(visible=_override_settings and filter_opt_enable_list[4] and mask_shape_opt_show_list[0]), # mask_blend_radius gr.Slider.update(visible=_override_settings and filter_opt_enable_list[5] and mask_shape_opt_show_list[0]), # mask_blend_radius_variable_blur gr.Number().update(visible=_override_settings and mask_shape_opt_show_list[1]), # rectangle_round_radius gr.Slider.update(visible=_override_settings and mask_shape_opt_show_list[2] and _enable_nudenet), # nms_threshold ) for element in [override_settings, filter_type, mask_shape, enable_nudenet]: element.change(update_opt_ui, inputs=[filter_type, mask_shape, override_settings, enable_nudenet], outputs=[filter_type, mask_shape, blur_radius, blur_strength_curve, pixelation_factor, fill_color, mask_blend_radius, mask_blend_radius_variable_blur, rectangle_round_radius, nms_threshold]) controls = { 'enable': enable, 'enable_nudenet': enable_nudenet, 'override_settings': override_settings, 'save_mask': save_mask, 'filter_type': filter_type, 'blur_radius': blur_radius, 'pixelation_factor': pixelation_factor, 'fill_color': fill_color, 'mask_shape': mask_shape, 'blur_strength_curve': blur_strength_curve, 'mask_blend_radius': mask_blend_radius, 'mask_blend_radius_variable_blur': mask_blend_radius_variable_blur, 'rectangle_round_radius': rectangle_round_radius, 'nms_threshold': nms_threshold, 'input_mask': input_mask, 'mask_source': mask_source, } return controls def process(self, pp: scripts_postprocessing.PostprocessedImage, **args): if not args['enable']: return censor_mask = None if args['input_mask']: if 'Upload mask' in args['mask_source']: censor_mask = args['input_mask']['image'].convert('L').resize(pp.image.size) if 'Draw mask' in args['mask_source']: censor_mask = Image.new('L', pp.image.size, 0) if censor_mask is None else censor_mask draw_mask = args['input_mask']['mask'].convert('L').resize(pp.image.size) censor_mask.paste(draw_mask, draw_mask) if args['enable_nudenet']: if args['override_settings']: nms_threshold = args['nms_threshold'] mask_shape = args['mask_shape'] rectangle_round_radius = args['rectangle_round_radius'] else: nms_threshold = shared.opts.nudenet_nsfw_censor_nms_threshold mask_shape = shared.opts.nudenet_nsfw_censor_mask_shape rectangle_round_radius = shared.opts.nudenet_nsfw_censor_rectangle_round_radius if pil_nude_detector.thresholds is None: pil_nude_detector.refresh_label_configs() nudenet_mask = pil_nude_detector.get_censor_mask(pp.image, nms_threshold, mask_shape, rectangle_round_radius, pil_nude_detector.thresholds, pil_nude_detector.expand_horizontal, pil_nude_detector.expand_vertical) if nudenet_mask is not None: nudenet_mask = nudenet_mask.convert('L') if nudenet_mask and censor_mask: censor_mask.paste(nudenet_mask, nudenet_mask) else: censor_mask = nudenet_mask if censor_mask: scale_factor = sqrt((pp.image.size[0] ** 2 + pp.image.size[1] ** 2) / 524288) save_mask = args['save_mask'] if args['override_settings']: filter_type = args['filter_type'] mask_blend_radius = args['mask_blend_radius_variable_blur'] if filter_type == 'Variable blur' else args['mask_blend_radius'] filter_settings = { 'blur_radius': args['blur_radius'], 'blur_strength_curve': args['blur_strength_curve'], 'color': args['fill_color'], 'pixelation_factor': args['pixelation_factor'], } else: filter_type = shared.opts.nudenet_nsfw_censor_extras_filter_type mask_blend_radius = shared.opts.nudenet_nsfw_censor_mask_blend_radius_variable_blur if filter_type == 'Variable blur' else shared.opts.nudenet_nsfw_censor_mask_blend_radius filter_settings = { 'blur_radius': shared.opts.nudenet_nsfw_censor_blur_radius * scale_factor, 'blur_strength_curve': shared.opts.nudenet_nsfw_censor_blur_strength_curve, 'color': shared.opts.nudenet_nsfw_censor_fill_color, 'pixelation_factor': shared.opts.nudenet_nsfw_censor_pixelation_factor, } censor_mask = censor_mask.filter(ImageFilter.GaussianBlur(mask_blend_radius * scale_factor)) if filter_type:
if hasattr(scripts_postprocessing.ScriptPostprocessing, 'process_firstpass'): # webui >= 1.7 else: InputAccordion = None filter_opt_ui_show_dict = { # [blur_radius, blur_strength_curve, pixelation_factor, fill_color, mask_blend_radius, mask_blend_radius_variable_blur] 'Variable blur': [True, True, False, False, False, True], 'Gaussian Blur': [True, False, False, False, True, False], 'Pixelate': [False, False, True, False, True, False], 'Fill color': [False, False, False, True, True, False], 'Detect only': [False, False, False, False, True, False], } mask_shape_opt_ui_show_dict = { # [(mask_blend_radius, mask_blend_radius_variable_blur), rectangle_round_radius, nms_threshold] 'Ellipse': [True, False, True], 'Rectangle': [True, False, True], 'Rounded rectangle': [True, True, True], 'Entire image': [False, False, False] } class ScriptPostprocessingNudenetCensor(scripts_postprocessing.ScriptPostprocessing): name = 'NudeNet NSFW censor' order = 100000 def ui(self): with ( InputAccordion(False, label="NSFW Censor", elem_id='nudenet_nsfw_censor_extras') if InputAccordion else gr.Accordion('NSFW Censor', open=False, elem_id='nudenet_nsfw_censor_extras') as enable ): with gr.Row(): if not InputAccordion: enable = gr.Checkbox(False, label='Enable', elem_id='nudenet_nsfw_censor_extras-visible-checkbox') enable_nudenet = gr.Checkbox(True, label='NudeNet Auto-detect') save_mask = gr.Checkbox(False, label='Save mask') override_settings = gr.Checkbox(False, label='Override filter configs') with gr.Row(): filter_type = gr.Dropdown(value='Variable blur', label='Censor filter', choices=list(filter_dict), visible=False) mask_shape = gr.Dropdown(value='Ellipse', choices=list(mask_shapes_func_dict), label='Mask shape', visible=False) with gr.Row(): blur_radius = gr.Slider(0, 100, 10, label='Blur radius', visible=False) # Variable blur Gaussian Blur blur_strength_curve = gr.Slider(0, 6, 3, label='Blur strength curve', visible=False) # Variable blur pixelation_factor = gr.Slider(1, 10, 5, label='Pixelation factor', visible=False) # Pixelate fill_color = gr.ColorPicker(value='#000000', label='fill color', visible=False) # Fill color mask_blend_radius = gr.Slider(0, 100, 0, label='Mask blend radius', visible=False) # except Variable blur mask_blend_radius_variable_blur = gr.Slider(0, 100, 10, label='Variable blur mask blend radius', visible=False) # Variable blur nms_threshold = gr.Slider(0, 1, 1, label='NMS threshold', visible=False) # NMS threshold rectangle_round_radius = gr.Number(value=0.5, label='Rectangle round radius', visible=False) # Rounded rectangle with gr.Row(): create_canvas = gr.Button('Create canvas') mask_source = gr.CheckboxGroup(['Draw mask', 'Upload mask'], value=['Draw mask'], label="Canvas mask source") mask_brush_color = gr.ColorPicker('#000000', label='Brush color', info='visual only, use when brush color is hard to see') with gr.Row(): input_mask = gr.Image( label="Censor mask", show_label=False, elem_id="nsfw_censor_mask", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", brush_color='#000000' ) def update_mask_brush_color(color): return gr.Image.update(brush_color=color) mask_brush_color.change( fn=update_mask_brush_color, inputs=[mask_brush_color], outputs=[input_mask] ) def get_current_image(image): # ToDo if possible make this a client side operation if image: return gr.Image.update(image) dummy_component = gr.Label(visible=False) create_canvas.click( fn=get_current_image, _js='getCurrentExtraSourceImg', inputs=[dummy_component], outputs=[input_mask], postprocess=False, ) def update_opt_ui(_filter_type, _mask_shape, _override_settings, _enable_nudenet): filter_opt_enable_list = filter_opt_ui_show_dict[_filter_type] mask_shape_opt_show_list = mask_shape_opt_ui_show_dict[_mask_shape] # blur_radius, blur_strength_curve, pixelation_factor, fill_color, mask_blend_radius, mask_blend_radius_variable_blur, rectangle_round_radius, nms_threshold return ( gr.Dropdown.update(visible=_override_settings), # filter_type gr.Dropdown.update(visible=_override_settings), # mask_shape gr.Slider.update(visible=_override_settings and filter_opt_enable_list[0]), # blur_radius gr.Slider.update(visible=_override_settings and filter_opt_enable_list[1]), # blur_strength_curve gr.Slider.update(visible=_override_settings and filter_opt_enable_list[2]), # pixelation_factor gr.ColorPicker.update(visible=_override_settings and filter_opt_enable_list[3]), # fill_color gr.Slider.update(visible=_override_settings and filter_opt_enable_list[4] and mask_shape_opt_show_list[0]), # mask_blend_radius gr.Slider.update(visible=_override_settings and filter_opt_enable_list[5] and mask_shape_opt_show_list[0]), # mask_blend_radius_variable_blur gr.Number().update(visible=_override_settings and mask_shape_opt_show_list[1]), # rectangle_round_radius gr.Slider.update(visible=_override_settings and mask_shape_opt_show_list[2] and _enable_nudenet), # nms_threshold ) for element in [override_settings, filter_type, mask_shape, enable_nudenet]: element.change(update_opt_ui, inputs=[filter_type, mask_shape, override_settings, enable_nudenet], outputs=[filter_type, mask_shape, blur_radius, blur_strength_curve, pixelation_factor, fill_color, mask_blend_radius, mask_blend_radius_variable_blur, rectangle_round_radius, nms_threshold]) controls = { 'enable': enable, 'enable_nudenet': enable_nudenet, 'override_settings': override_settings, 'save_mask': save_mask, 'filter_type': filter_type, 'blur_radius': blur_radius, 'pixelation_factor': pixelation_factor, 'fill_color': fill_color, 'mask_shape': mask_shape, 'blur_strength_curve': blur_strength_curve, 'mask_blend_radius': mask_blend_radius, 'mask_blend_radius_variable_blur': mask_blend_radius_variable_blur, 'rectangle_round_radius': rectangle_round_radius, 'nms_threshold': nms_threshold, 'input_mask': input_mask, 'mask_source': mask_source, } return controls def process(self, pp: scripts_postprocessing.PostprocessedImage, **args): if not args['enable']: return censor_mask = None if args['input_mask']: if 'Upload mask' in args['mask_source']: censor_mask = args['input_mask']['image'].convert('L').resize(pp.image.size) if 'Draw mask' in args['mask_source']: censor_mask = Image.new('L', pp.image.size, 0) if censor_mask is None else censor_mask draw_mask = args['input_mask']['mask'].convert('L').resize(pp.image.size) censor_mask.paste(draw_mask, draw_mask) if args['enable_nudenet']: if args['override_settings']: nms_threshold = args['nms_threshold'] mask_shape = args['mask_shape'] rectangle_round_radius = args['rectangle_round_radius'] else: nms_threshold = shared.opts.nudenet_nsfw_censor_nms_threshold mask_shape = shared.opts.nudenet_nsfw_censor_mask_shape rectangle_round_radius = shared.opts.nudenet_nsfw_censor_rectangle_round_radius if pil_nude_detector.thresholds is None: pil_nude_detector.refresh_label_configs() nudenet_mask = pil_nude_detector.get_censor_mask(pp.image, nms_threshold, mask_shape, rectangle_round_radius, pil_nude_detector.thresholds, pil_nude_detector.expand_horizontal, pil_nude_detector.expand_vertical) if nudenet_mask is not None: nudenet_mask = nudenet_mask.convert('L') if nudenet_mask and censor_mask: censor_mask.paste(nudenet_mask, nudenet_mask) else: censor_mask = nudenet_mask if censor_mask: scale_factor = sqrt((pp.image.size[0] ** 2 + pp.image.size[1] ** 2) / 524288) save_mask = args['save_mask'] if args['override_settings']: filter_type = args['filter_type'] mask_blend_radius = args['mask_blend_radius_variable_blur'] if filter_type == 'Variable blur' else args['mask_blend_radius'] filter_settings = { 'blur_radius': args['blur_radius'], 'blur_strength_curve': args['blur_strength_curve'], 'color': args['fill_color'], 'pixelation_factor': args['pixelation_factor'], } else: filter_type = shared.opts.nudenet_nsfw_censor_extras_filter_type mask_blend_radius = shared.opts.nudenet_nsfw_censor_mask_blend_radius_variable_blur if filter_type == 'Variable blur' else shared.opts.nudenet_nsfw_censor_mask_blend_radius filter_settings = { 'blur_radius': shared.opts.nudenet_nsfw_censor_blur_radius * scale_factor, 'blur_strength_curve': shared.opts.nudenet_nsfw_censor_blur_strength_curve, 'color': shared.opts.nudenet_nsfw_censor_fill_color, 'pixelation_factor': shared.opts.nudenet_nsfw_censor_pixelation_factor, } censor_mask = censor_mask.filter(ImageFilter.GaussianBlur(mask_blend_radius * scale_factor)) if filter_type:
pp.image = apply_filter(pp.image, censor_mask, filter_type, **filter_settings)
1
2023-10-16 16:44:07+00:00
4k
enkeejunior1/Diffusion-Pullback
src/models/guided_diffusion/unet.py
[ { "identifier": "convert_module_to_f16", "path": "src/models/guided_diffusion/fp16_util.py", "snippet": "def convert_module_to_f16(l):\n \"\"\"\n Convert primitive modules to float16.\n \"\"\"\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()" }, { "identifier": "convert_module_to_f32", "path": "src/models/guided_diffusion/fp16_util.py", "snippet": "def convert_module_to_f32(l):\n \"\"\"\n Convert primitive modules to float32, undoing convert_module_to_f16().\n \"\"\"\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):\n l.weight.data = l.weight.data.float()\n if l.bias is not None:\n l.bias.data = l.bias.data.float()" }, { "identifier": "checkpoint", "path": "src/models/guided_diffusion/nn.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)" }, { "identifier": "conv_nd", "path": "src/models/guided_diffusion/nn.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "src/models/guided_diffusion/nn.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "avg_pool_nd", "path": "src/models/guided_diffusion/nn.py", "snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "zero_module", "path": "src/models/guided_diffusion/nn.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "normalization", "path": "src/models/guided_diffusion/nn.py", "snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)" }, { "identifier": "timestep_embedding", "path": "src/models/guided_diffusion/nn.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = th.exp(\n -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)\n if dim % 2:\n embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)\n return embedding" } ]
from abc import abstractmethod from einops import rearrange, reduce, repeat, einsum from .fp16_util import convert_module_to_f16, convert_module_to_f32 from .nn import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding, ) import math import time import torchvision.utils as tvu import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F
3,468
) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=int(model_channels * mult), dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = int(model_channels * mult) if ds in attention_resolutions: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ) ) if level and i == num_res_blocks: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)), ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """
class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter( th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5 ) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=1 ) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) class ResBlock(TimestepBlock): """ A residual block that can optionally change the number of channels. :param channels: the number of input channels. :param emb_channels: the number of timestep embedding channels. :param dropout: the rate of dropout. :param out_channels: if specified, the number of out channels. :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1 convolution to change the channels in the skip connection. :param dims: determines if the signal is 1D, 2D, or 3D. :param use_checkpoint: if True, use gradient checkpointing on this module. :param up: if True, use this block for upsampling. :param down: if True, use this block for downsampling. """ def __init__( self, channels, emb_channels, dropout, out_channels=None, use_conv=False, use_scale_shift_norm=False, dims=2, use_checkpoint=False, up=False, down=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm self.in_layers = nn.Sequential( normalization(channels), nn.SiLU(), conv_nd(dims, channels, self.out_channels, 3, padding=1), ) self.updown = up or down if up: self.h_upd = Upsample(channels, False, dims) self.x_upd = Upsample(channels, False, dims) elif down: self.h_upd = Downsample(channels, False, dims) self.x_upd = Downsample(channels, False, dims) else: self.h_upd = self.x_upd = nn.Identity() self.emb_layers = nn.Sequential( nn.SiLU(), linear( emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels, ), ) self.out_layers = nn.Sequential( normalization(self.out_channels), nn.SiLU(), nn.Dropout(p=dropout), zero_module( conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) ), ) if self.out_channels == channels: self.skip_connection = nn.Identity() elif use_conv: self.skip_connection = conv_nd( dims, channels, self.out_channels, 3, padding=1 ) else: self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) # def forward(self, x, emb): # """ # Apply the block to a Tensor, conditioned on a timestep embedding. # :param x: an [N x C x ...] Tensor of features. # :param emb: an [N x emb_channels] Tensor of timestep embeddings. # :return: an [N x C x ...] Tensor of outputs. # """ # return checkpoint( # self._forward, (x, emb), self.parameters(), self.use_checkpoint # ) def forward(self, x, emb): if self.updown: in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] h = in_rest(x) h = self.h_upd(h) x = self.x_upd(x) h = in_conv(h) else: h = self.in_layers(x) emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] scale, shift = th.chunk(emb_out, 2, dim=1) h = out_norm(h) * (1 + scale) + shift h = out_rest(h) else: h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h class AttentionBlock(nn.Module): """ An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. """ def __init__( self, channels, num_heads=1, num_head_channels=-1, use_checkpoint=False, use_new_attention_order=False, ): super().__init__() self.channels = channels if num_head_channels == -1: self.num_heads = num_heads else: assert ( channels % num_head_channels == 0 ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" self.num_heads = channels // num_head_channels self.use_checkpoint = use_checkpoint self.norm = normalization(channels) self.qkv = conv_nd(1, channels, channels * 3, 1) if use_new_attention_order: # split qkv before split heads self.attention = QKVAttention(self.num_heads) else: # split heads before split qkv self.attention = QKVAttentionLegacy(self.num_heads) self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) # def forward(self, x): # return checkpoint(self._forward, (x,), self.parameters(), True) def forward(self, x): b, c, *spatial = x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) h = self.attention(qkv) h = self.proj_out(h) return (x + h).reshape(b, c, *spatial) def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. matmul_ops = 2 * b * (num_spatial ** 2) * c model.total_ops += th.DoubleTensor([matmul_ops]) class QKVAttentionLegacy(nn.Module): """ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", q * scale, k * scale ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", (q * scale).view(bs * self.n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length), ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class UNetModel(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, ): super().__init__() if num_heads_upsample == -1: num_heads_upsample = num_heads self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels self.num_res_blocks = num_res_blocks self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) ch = input_ch = int(channel_mult[0] * model_channels) self.input_blocks = nn.ModuleList( [TimestepEmbedSequential(conv_nd(dims, in_channels, ch, 3, padding=1))] ) self._feature_size = ch input_block_chans = [ch] ds = 1 for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=int(mult * model_channels), dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = int(mult * model_channels) if ds in attention_resolutions: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=int(model_channels * mult), dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = int(model_channels * mult) if ds in attention_resolutions: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=num_head_channels, use_new_attention_order=use_new_attention_order, ) ) if level and i == num_res_blocks: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, input_ch, out_channels, 3, padding=1)), ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """
self.input_blocks.apply(convert_module_to_f32)
1
2023-10-21 04:08:44+00:00
4k
NVIDIA-Omniverse/IsaacSim-Automator
src/python/deployer.py
[ { "identifier": "colorize_error", "path": "src/python/utils.py", "snippet": "def colorize_error(text):\n return click.style(text, fg=\"bright_red\", italic=True)" }, { "identifier": "colorize_info", "path": "src/python/utils.py", "snippet": "def colorize_info(text):\n return click.style(text, fg=\"bright_magenta\", italic=True)" }, { "identifier": "colorize_prompt", "path": "src/python/utils.py", "snippet": "def colorize_prompt(text):\n return click.style(text, fg=\"bright_cyan\", italic=True)" }, { "identifier": "colorize_result", "path": "src/python/utils.py", "snippet": "def colorize_result(text):\n return click.style(text, fg=\"bright_green\", italic=True)" }, { "identifier": "read_meta", "path": "src/python/utils.py", "snippet": "def read_meta(deployment_name: str, verbose: bool = False):\n \"\"\"\n Read metadata from json file\n \"\"\"\n\n meta_file = f\"{config['state_dir']}/{deployment_name}/meta.json\"\n\n if os.path.isfile(meta_file):\n data = json.loads(Path(meta_file).read_text())\n if verbose:\n click.echo(colorize_info(f\"* Meta info loaded from '{meta_file}'\"))\n return data\n\n raise Exception(f\"Meta file '{meta_file}' not found\")" }, { "identifier": "shell_command", "path": "src/python/utils.py", "snippet": "def shell_command(\n command, verbose=False, cwd=None, exit_on_error=True, capture_output=False\n):\n \"\"\"\n Execute shell command, print it if debug is enabled\n \"\"\"\n\n if verbose:\n if cwd is not None:\n click.echo(colorize_info(f\"* Running `(cd {cwd} && {command})`...\"))\n else:\n click.echo(colorize_info(f\"* Running `{command}`...\"))\n\n res = subprocess.run(\n command,\n shell=True,\n cwd=cwd,\n capture_output=capture_output,\n )\n\n if res.returncode == 0:\n if verbose and res.stdout is not None:\n click.echo(res.stdout.decode())\n\n elif exit_on_error:\n if res.stderr is not None:\n click.echo(\n colorize_error(f\"Error: {res.stderr.decode()}\"),\n err=True,\n )\n exit(1)\n\n return res" }, { "identifier": "debug_break", "path": "src/python/debug.py", "snippet": "def debug_break():\n debug_start()\n debugpy.breakpoint()" }, { "identifier": "check_ngc_access", "path": "src/python/ngc.py", "snippet": "def check_ngc_access(ngc_api_key, org=\"\", team=\"\", verbose=False):\n \"\"\"\n Checks if NGC API key is valid and user has access to DRIVE Sim.\n\n Returns:\n\n - 0 - all is fine\n - 100 - invalid api key\n - 102 - user is not in the team\n \"\"\"\n\n proc = subprocess.run(\n [f\"{SELF_DIR}/ngc_check.expect\", ngc_api_key, org, team],\n capture_output=not verbose,\n timeout=60,\n )\n\n if proc.returncode not in [0, 100, 101, 102]:\n raise RuntimeError(\n f\"Error checking NGC API Key. Return code: {proc.returncode}\"\n )\n\n return proc.returncode" } ]
import json import os import re import shlex import sys import click from pathlib import Path from src.python.utils import ( colorize_error, colorize_info, colorize_prompt, colorize_result, read_meta, shell_command, ) from src.python.debug import debug_break # noqa from src.python.ngc import check_ngc_access
2,107
self.params["debug"], ) def recreate_command_line(self, separator=" \\\n"): """ Recreate command line """ command_line = sys.argv[0] for k, v in self.input_params.items(): k = k.replace("_", "-") if isinstance(v, bool): if v: command_line += separator + "--" + k else: not_prefix = "--no-" if k in ["from-image"]: not_prefix = "--not-" command_line += separator + not_prefix + k else: command_line += separator + "--" + k + " " if isinstance(v, str): command_line += "'" + shlex.quote(v) + "'" else: command_line += str(v) return command_line def ask_existing_behavior(self): """ Ask what to do if deployment already exists """ deployment_name = self.params["deployment_name"] existing = self.params["existing"] self.existing_behavior = existing if existing == "ask" and os.path.isfile( f"{self.config['state_dir']}/{deployment_name}/.tfvars" ): self.existing_behavior = click.prompt( text=colorize_prompt( "* Deploymemnt exists, what would you like to do? See --help for details." ), type=click.Choice(["repair", "modify", "replace", "run_ansible"]), default="replace", ) if ( self.existing_behavior == "repair" or self.existing_behavior == "run_ansible" ): # restore params from meta file r = self.read_meta() self.params = r["params"] click.echo( colorize_info( f"* Repairing existing deployment \"{self.params['deployment_name']}\"..." ) ) # update meta info (with new value for existing_behavior) self.save_meta() # destroy existing deployment`` if self.existing_behavior == "replace": debug = self.params["debug"] click.echo(colorize_info("* Deleting existing deployment...")) shell_command( command=f'{self.config["app_dir"]}/destroy "{deployment_name}" --yes' + f' {"--debug" if debug else ""}', verbose=debug, ) # update meta info if deployment was destroyed self.save_meta() def validate_ngc_api_key(self, image, restricted_image=False): """ Check if NGC API key allows to log in and has access to appropriate NGC image @param image: NGC image to check access to @param restricted_image: If image is restricted to specific org/team? """ debug = self.params["debug"] ngc_api_key = self.params["ngc_api_key"] ngc_api_key_check = self.params["ngc_api_key_check"] # extract org and team from the image path r = re.findall( "^nvcr\\.io/([a-z0-9\\-_]+)/([a-z0-9\\-_]+/)?[a-z0-9\\-_]+:[a-z0-9\\-_.]+$", image, ) ngc_org, ngc_team = r[0] ngc_team = ngc_team.rstrip("/") if ngc_org == "nvidia": click.echo( colorize_info( "* Access to docker image can't be checked for NVIDIA org. But you'll be fine. Fingers crossed." ) ) return if debug: click.echo(colorize_info(f'* Will check access to NGC Org: "{ngc_org}"')) click.echo(colorize_info(f'* Will check access to NGC Team: "{ngc_team}"')) if ngc_api_key_check and ngc_api_key != "none": click.echo(colorize_info("* Validating NGC API key... "))
# region copyright # Copyright 2023 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # endregion class Deployer: def __init__(self, params, config): self.tf_outputs = {} self.params = params self.config = config self.existing_behavior = None # save original params so we can recreate command line self.input_params = params.copy() # convert "in_china" self.params["in_china"] = {"yes": True, "no": False, "auto": False}[ self.params["in_china"] ] # create state directory if it doesn't exist os.makedirs(self.config["state_dir"], exist_ok=True) # print complete command line if self.params["debug"]: click.echo(colorize_info("* Command:\n" + self.recreate_command_line())) def __del__(self): # update meta info self.save_meta() def save_meta(self): """ Save command parameters in json file, just in case """ meta_file = ( f"{self.config['state_dir']}/{self.params['deployment_name']}/meta.json" ) data = { "command": self.recreate_command_line(separator=" "), "input_params": self.input_params, "params": self.params, "config": self.config, } Path(meta_file).parent.mkdir(parents=True, exist_ok=True) Path(meta_file).write_text(json.dumps(data, indent=4)) if self.params["debug"]: click.echo(colorize_info(f"* Meta info saved to '{meta_file}'")) def read_meta(self): return read_meta( self.params["deployment_name"], self.params["debug"], ) def recreate_command_line(self, separator=" \\\n"): """ Recreate command line """ command_line = sys.argv[0] for k, v in self.input_params.items(): k = k.replace("_", "-") if isinstance(v, bool): if v: command_line += separator + "--" + k else: not_prefix = "--no-" if k in ["from-image"]: not_prefix = "--not-" command_line += separator + not_prefix + k else: command_line += separator + "--" + k + " " if isinstance(v, str): command_line += "'" + shlex.quote(v) + "'" else: command_line += str(v) return command_line def ask_existing_behavior(self): """ Ask what to do if deployment already exists """ deployment_name = self.params["deployment_name"] existing = self.params["existing"] self.existing_behavior = existing if existing == "ask" and os.path.isfile( f"{self.config['state_dir']}/{deployment_name}/.tfvars" ): self.existing_behavior = click.prompt( text=colorize_prompt( "* Deploymemnt exists, what would you like to do? See --help for details." ), type=click.Choice(["repair", "modify", "replace", "run_ansible"]), default="replace", ) if ( self.existing_behavior == "repair" or self.existing_behavior == "run_ansible" ): # restore params from meta file r = self.read_meta() self.params = r["params"] click.echo( colorize_info( f"* Repairing existing deployment \"{self.params['deployment_name']}\"..." ) ) # update meta info (with new value for existing_behavior) self.save_meta() # destroy existing deployment`` if self.existing_behavior == "replace": debug = self.params["debug"] click.echo(colorize_info("* Deleting existing deployment...")) shell_command( command=f'{self.config["app_dir"]}/destroy "{deployment_name}" --yes' + f' {"--debug" if debug else ""}', verbose=debug, ) # update meta info if deployment was destroyed self.save_meta() def validate_ngc_api_key(self, image, restricted_image=False): """ Check if NGC API key allows to log in and has access to appropriate NGC image @param image: NGC image to check access to @param restricted_image: If image is restricted to specific org/team? """ debug = self.params["debug"] ngc_api_key = self.params["ngc_api_key"] ngc_api_key_check = self.params["ngc_api_key_check"] # extract org and team from the image path r = re.findall( "^nvcr\\.io/([a-z0-9\\-_]+)/([a-z0-9\\-_]+/)?[a-z0-9\\-_]+:[a-z0-9\\-_.]+$", image, ) ngc_org, ngc_team = r[0] ngc_team = ngc_team.rstrip("/") if ngc_org == "nvidia": click.echo( colorize_info( "* Access to docker image can't be checked for NVIDIA org. But you'll be fine. Fingers crossed." ) ) return if debug: click.echo(colorize_info(f'* Will check access to NGC Org: "{ngc_org}"')) click.echo(colorize_info(f'* Will check access to NGC Team: "{ngc_team}"')) if ngc_api_key_check and ngc_api_key != "none": click.echo(colorize_info("* Validating NGC API key... "))
r = check_ngc_access(
7
2023-10-18 17:25:44+00:00
4k
blackgold3/SemanticBoost
mdm/sample.py
[ { "identifier": "recover_from_ric", "path": "mdm/dataset/recover_joints.py", "snippet": "def recover_from_ric(data, joints_num):\n if isinstance(data, np.ndarray):\n data = torch.from_numpy(data).float()\n dtype = \"numpy\"\n else:\n data = data.float()\n dtype = \"tensor\"\n\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n if dtype == \"numpy\":\n positions = positions.numpy()\n\n return positions" }, { "identifier": "ClassifierFreeSampleModel", "path": "mdm/model/cfg_sampler.py", "snippet": "class ClassifierFreeSampleModel(nn.Module):\n\n def __init__(self, model):\n super().__init__()\n self.model = model # model is the actual model to run\n\n # assert self.model.cond_mask_prob > 0, 'Cannot run a guided diffusion on a model that has not been trained with no conditions'\n\n # pointers to inner model\n self.njoints = self.model.njoints\n self.nfeats = self.model.nfeats\n self.cond_mode = self.model.cond_mode\n\n def forward(self, x, timesteps, y=None):\n cond_mode = self.model.cond_mode\n assert cond_mode in ['text', 'action', \"motion\", \"text-motion\"]\n y_uncond = deepcopy(y)\n y_uncond['uncond'] = True\n\n out = self.model(x, timesteps, y) ###### 全部条件生成\n\n if \"predict_length\" in out.keys():\n y_uncond[\"predict_mask\"] = out[\"predict_length\"]\n\n out_uncond = self.model(x, timesteps, y_uncond) ####### 全部无条件\n\n output = {}\n\n y['scale'] = y['scale'].to(out_uncond[\"output\"].device)\n\n output[\"output\"] = out_uncond[\"output\"] + (y['scale'].view(-1, 1, 1, 1) * (out[\"output\"] - out_uncond[\"output\"]))\n \n return output ##### 这里并不是生成 \\epsilon,而是特征" }, { "identifier": "create_model_and_diffusion", "path": "mdm/model_util.py", "snippet": "def create_model_and_diffusion(args, mode=\"text\", json_dict=None):\n model = MDM(**get_model_args(args), json_dict=json_dict)\n diffusion = create_gaussian_diffusion(args, mode)\n return model, diffusion" }, { "identifier": "load_model_wo_clip", "path": "mdm/model_util.py", "snippet": "def load_model_wo_clip(model, state_dict): \n print(\"load model checkpoints without clip\")\n\n try:\n new_state_dict = {}\n for key, value in state_dict.items():\n if \"in_proj\" in key:\n keyq = key.replace(\"in_proj_weight\", \"wq.weight\")\n keyk = key.replace(\"in_proj_weight\", \"wk.weight\")\n keyv = key.replace(\"in_proj_weight\", \"wv.weight\")\n inshape = value.shape[0] // 3\n valueq = value[:inshape]\n valuek = value[inshape:inshape * 2]\n valuev = value[inshape * 2:]\n\n new_state_dict[keyq] = valueq\n new_state_dict[keyk] = valuek\n new_state_dict[keyv] = valuev\n\n elif \"out_proj\" in key:\n newkey = key.replace(\"out_proj\", \"wo\")\n new_state_dict[newkey] = value\n \n else:\n new_state_dict[key] = value\n \n missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)\n except:\n missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)\n\n print(unexpected_keys)\n\n other_miss = []\n for key in missing_keys:\n if not key.startswith('clip_model.'):\n other_miss.append(key)\n\n print(other_miss)\n assert all([k.startswith('clip_model.') for k in missing_keys])" }, { "identifier": "create_trt_model", "path": "mdm/model_util.py", "snippet": "def create_trt_model(args, model, mode=\"text\", json_dict=None, device=\"cuda\"):\n model = TRT_MDM(model, json_dict, device=device)\n diffusion = create_gaussian_diffusion(args, mode)\n return model, diffusion" }, { "identifier": "double_take", "path": "mdm/double_take.py", "snippet": "def double_take(prompt=None, path=None, num_repetitions=1, model=None, diffusion=None, handshake_size=20, blend_len=10, default_length=196, guidance_param=2.5, lengths=\"120\", device=\"cpu\", progress=True):\n assert model is not None\n assert diffusion is not None\n if prompt is not None:\n texts = prompt.split(\"|\")\n lengths = lengths.split(\"|\")\n num_samples = len(texts)\n length = []\n captions = []\n for i in range(len(texts)):\n if i < len(lengths):\n try:\n nframes = int(lengths[i])\n except:\n nframes = default_length\n else:\n nframes = default_length\n\n curr_text = texts[i]\n\n captions.append(curr_text)\n length.append(nframes)\n\n model_kwargs = {'y': {\n 'mask': torch.ones((len(texts), 1, 1, default_length)), # 196 is humanml max frames number\n 'lengths': torch.tensor(length),\n 'text': captions,\n 'tokens': [''],\n 'scale': torch.ones(len(texts))*guidance_param\n }}\n \n all_motions = []\n\n for rep_i in range(num_repetitions):\n if guidance_param != 1:\n model_kwargs['y']['scale'] = torch.ones(num_samples, device=device) * guidance_param\n model_kwargs['y'] = {key: val.to(device) if torch.is_tensor(val) else val for key, val in model_kwargs['y'].items()}\n\n max_arb_len = model_kwargs['y']['lengths'].max()\n min_arb_len = 2 * handshake_size + 2*blend_len + 10\n\n for ii, len_s in enumerate(model_kwargs['y']['lengths']):\n if len_s > max_arb_len:\n model_kwargs['y']['lengths'][ii] = max_arb_len\n if len_s < min_arb_len:\n model_kwargs['y']['lengths'][ii] = min_arb_len\n\n sample = double_take_arb_len(diffusion, model, model_kwargs, max_arb_len, blend_len, handshake_size, device, progress=progress) \n step_sizes = np.zeros(len(model_kwargs['y']['lengths']), dtype=int)\n for ii, len_i in enumerate(model_kwargs['y']['lengths']):\n if ii == 0:\n step_sizes[ii] = len_i\n continue\n step_sizes[ii] = step_sizes[ii-1] + len_i - handshake_size\n\n final_n_frames = step_sizes[-1]\n sample = unfold_sample_arb_len(sample, handshake_size, step_sizes, final_n_frames, model_kwargs)\n\n all_motions.append(sample)\n \n all_motions = torch.cat(all_motions, dim=0)\n return all_motions, step_sizes" } ]
from argparse import Namespace from mdm.dataset.recover_joints import recover_from_ric from mdm.model.cfg_sampler import ClassifierFreeSampleModel from mdm.model_util import create_model_and_diffusion, load_model_wo_clip, create_trt_model from mdm.dataset.recover_smr import * from mdm.double_take import double_take import torch import os import numpy as np import json
2,862
class Predictor(object): def __init__(self, **kargs): self.path = kargs["path"] self.handshake_size = 20 self.blend_size = 10 self.speedup = kargs.get("speedup", 1) args = Namespace() with open(self.path["config"], 'r') as f: params1 = json.load(f) for key, value in params1.items(): setattr(args, key, value) args.quantization = False mode = kargs.get("mode", "camd") if mode != "mdm" and (not os.path.exists(self.path[f"{mode}1"]) or not os.path.exists(self.path[f"{mode}2"])): self.speedup = 0 if mode == "camd": args.arch = "llama_decoder_static" args.encode_full = 2 args.txt_tokens = 1 args.model_path = self.path["camd"] args.rep = "smr" args.conv_bias = False args.conv_norm = "rmsnorm" args.conv_activate = "silu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 elif mode == "camd-augment": args.arch = "llama_decoder_static" args.encode_full = 2 args.txt_tokens = 1 args.model_path = self.path["camd-augment"] args.rep = "smr" args.conv_bias = False args.conv_norm = "rmsnorm" args.conv_activate = "silu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 elif mode == "mdm": args.arch = "trans_enc" args.encode_full = 0 args.txt_tokens = 0 args.model_path = self.path["mdm"] args.rep = "t2m" elif mode == "ncamd": args.arch = "llama_decoder_rope" args.encode_full = 2 args.txt_tokens = 2 args.model_path = self.path["ncamd"] args.rep = "smr" args.conv_bias = True args.conv_norm = "layernorm" args.conv_activate = "relu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 elif mode == "ncamd-augment": args.arch = "llama_decoder_rope" args.encode_full = 2 args.txt_tokens = 2 args.model_path = self.path["ncamd-augment"] args.rep = "smr" args.conv_bias = True args.conv_norm = "layernorm" args.conv_activate = "relu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 self.skip_steps = kargs.get("skip_steps", 0) self.device = kargs.get("device", "cpu") self.length = kargs.get("length", "120") self.args = args self.rep = args.rep self.num_frames = args.num_frames self.condition = kargs.get("condition", "text") if self.condition == "uncond": self.args.guidance_param = 0 if self.rep == "t2m": extension = "" elif self.rep == "smr": extension = "_smr" self.mean = torch.from_numpy(np.load(os.path.join(self.path["dataset_dir"], 'Mean{}.npy'.format(extension)))).to(self.device) self.std = torch.from_numpy(np.load(os.path.join(self.path["dataset_dir"], 'Std{}.npy'.format(extension)))).to(self.device) if not args.quantization: print(f"Loading checkpoints from...") self.model, self.diffusion = create_model_and_diffusion(args, args.control_signal, self.path) state_dict = torch.load(self.args.model_path, map_location='cpu') if mode == "mdm": load_model_wo_clip(self.model, state_dict) else: load_model_wo_clip(self.model, state_dict["ema"]) if self.args.guidance_param != 1 and not self.args.unconstrained:
class Predictor(object): def __init__(self, **kargs): self.path = kargs["path"] self.handshake_size = 20 self.blend_size = 10 self.speedup = kargs.get("speedup", 1) args = Namespace() with open(self.path["config"], 'r') as f: params1 = json.load(f) for key, value in params1.items(): setattr(args, key, value) args.quantization = False mode = kargs.get("mode", "camd") if mode != "mdm" and (not os.path.exists(self.path[f"{mode}1"]) or not os.path.exists(self.path[f"{mode}2"])): self.speedup = 0 if mode == "camd": args.arch = "llama_decoder_static" args.encode_full = 2 args.txt_tokens = 1 args.model_path = self.path["camd"] args.rep = "smr" args.conv_bias = False args.conv_norm = "rmsnorm" args.conv_activate = "silu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 elif mode == "camd-augment": args.arch = "llama_decoder_static" args.encode_full = 2 args.txt_tokens = 1 args.model_path = self.path["camd-augment"] args.rep = "smr" args.conv_bias = False args.conv_norm = "rmsnorm" args.conv_activate = "silu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 elif mode == "mdm": args.arch = "trans_enc" args.encode_full = 0 args.txt_tokens = 0 args.model_path = self.path["mdm"] args.rep = "t2m" elif mode == "ncamd": args.arch = "llama_decoder_rope" args.encode_full = 2 args.txt_tokens = 2 args.model_path = self.path["ncamd"] args.rep = "smr" args.conv_bias = True args.conv_norm = "layernorm" args.conv_activate = "relu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 elif mode == "ncamd-augment": args.arch = "llama_decoder_rope" args.encode_full = 2 args.txt_tokens = 2 args.model_path = self.path["ncamd-augment"] args.rep = "smr" args.conv_bias = True args.conv_norm = "layernorm" args.conv_activate = "relu" args.trans_activate = "swiglu" args.quantization = self.speedup == 1 self.skip_steps = kargs.get("skip_steps", 0) self.device = kargs.get("device", "cpu") self.length = kargs.get("length", "120") self.args = args self.rep = args.rep self.num_frames = args.num_frames self.condition = kargs.get("condition", "text") if self.condition == "uncond": self.args.guidance_param = 0 if self.rep == "t2m": extension = "" elif self.rep == "smr": extension = "_smr" self.mean = torch.from_numpy(np.load(os.path.join(self.path["dataset_dir"], 'Mean{}.npy'.format(extension)))).to(self.device) self.std = torch.from_numpy(np.load(os.path.join(self.path["dataset_dir"], 'Std{}.npy'.format(extension)))).to(self.device) if not args.quantization: print(f"Loading checkpoints from...") self.model, self.diffusion = create_model_and_diffusion(args, args.control_signal, self.path) state_dict = torch.load(self.args.model_path, map_location='cpu') if mode == "mdm": load_model_wo_clip(self.model, state_dict) else: load_model_wo_clip(self.model, state_dict["ema"]) if self.args.guidance_param != 1 and not self.args.unconstrained:
self.model = ClassifierFreeSampleModel(self.model) # wrapping model with the classifier-free sampler
1
2023-10-20 14:53:26+00:00
4k
justchenhao/SILI_CD
datasets/base_dataset.py
[ { "identifier": "get_transforms", "path": "datasets/transforms.py", "snippet": "def get_transforms(norm=False, img_size=256):\n basic_transform = []\n basic_transform.append(T.ToTensor()) # ndarray转为 torch.FloatTensor, 范围[0,1]\n if norm:\n basic_transform.append(T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))\n basic_transform.append(T.Resize(size=(img_size, img_size), interpolation=InterpolationMode.BILINEAR))\n return T.Compose(basic_transform)" }, { "identifier": "get_mask_transforms", "path": "datasets/transforms.py", "snippet": "def get_mask_transforms(img_size=256):\n basic_target_transform = T.Compose(\n [\n MaskToTensor(),\n T.Resize(size=(img_size, img_size), interpolation=InterpolationMode.NEAREST),\n ]\n )\n return basic_target_transform" }, { "identifier": "get_seg_augs", "path": "datasets/transforms.py", "snippet": "def get_seg_augs(imgz_size=256, data_keys=(\"input\", \"mask\")):\n default_seg_augs = K.AugmentationSequential(\n K.RandomHorizontalFlip(p=0.5),\n K.RandomVerticalFlip(p=0.5),\n K.RandomResizedCrop(\n size=(imgz_size, imgz_size), scale=(0.8, 1.0), resample=\"bilinear\", align_corners=False\n ),\n K.RandomGaussianBlur(kernel_size=(3, 3), sigma=(0.1, 2.0), p=0.5),\n K.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n data_keys=data_keys\n )\n return default_seg_augs" }, { "identifier": "visualize_tensors", "path": "misc/torchutils.py", "snippet": "def visualize_tensors(*tensors):\n \"\"\"\n 可视化tensor,支持单通道特征或3通道图像\n :param tensors: tensor: C*H*W, C=1/3\n :return:\n \"\"\"\n import matplotlib.pyplot as plt\n # from misc.torchutils import tensor2np\n images = []\n for tensor in tensors:\n assert tensor.ndim == 3 or tensor.ndim==2\n if tensor.ndim ==3:\n assert tensor.shape[0] == 1 or tensor.shape[0] == 3\n images.append(tensor2np(tensor))\n nums = len(images)\n if nums>1:\n fig, axs = plt.subplots(1, nums)\n for i, image in enumerate(images):\n axs[i].imshow(image, cmap='jet')\n plt.show()\n elif nums == 1:\n fig, ax = plt.subplots(1, nums)\n for i, image in enumerate(images):\n ax.imshow(image, cmap='jet')\n plt.show()" } ]
import os import numpy as np import torch from typing import Dict, Sequence, Tuple, Optional, Union from PIL import Image from torch.utils import data from datasets.transforms import get_transforms, get_mask_transforms from datasets.transforms import get_seg_augs from misc.imutils import pil_rescale, pil_resize from misc.imutils import pil_rescale, pil_resize from misc.torchutils import visualize_tensors
2,067
list_folder_name: str = 'list', scale_ratios: Union[int, list] = 1): super(ImageDataset, self).__init__() self.root_dir = root_dir self.split = split # train | train_aug | val self.list_path = os.path.join(self.root_dir, list_folder_name, self.split+'.txt') self.img_name_list = load_img_name_list(self.list_path) if isinstance(img_folder_name, list) or isinstance(img_folder_name, tuple): # 此处为了兼容存在多个img_folder,内部文件共名字的情况,比如img_folder_name=['A','B'] self.img_folder_with_name_list = [img_folder_name_+'/'+name for name in self.img_name_list for img_folder_name_ in img_folder_name] elif isinstance(img_folder_name, str): self.img_folder_with_name_list = [img_folder_name+'/'+name for name in self.img_name_list] else: raise NotImplementedError self.A_size = len(self.img_folder_with_name_list) # get the size of dataset A self.img_folder_name = img_folder_name self.img_size = img_size self.norm = norm self.basic_transforms = get_transforms(norm=norm, img_size=img_size) self.scale_ratios = scale_ratios def __getitem__(self, index): folder_with_name = self.img_folder_with_name_list[index % self.A_size] img_folder_name = folder_with_name.split('/')[0] name = folder_with_name.split('/')[-1] A_path = os.path.join(self.root_dir, img_folder_name, name) img = np.asarray(Image.open(A_path).convert('RGB')) scales = self.scale_ratios if isinstance(scales, list): scale = scales[torch.randint(len(scales), (1,)).item()] else: scale = scales if scale != 1: h, w = img.shape[:2] img = pil_rescale(img, scale=scale, order=3) img = pil_resize(img, size=[h, w], order=3) if self.basic_transforms is not None: img = self.basic_transforms(img) return {'A': img, 'name': name} def __len__(self): """Return the total number of images in the dataset.""" return self.A_size class SegDataset(ImageDataset): ''' transforms: 表示同时对image 和 mask 做变换; ''' def __init__(self, root_dir: str, split: str = 'train', img_size: int = 256, norm: bool = False, img_folder_name: Union[str, list, tuple] = 'A', label_transform: str = 'norm', label_folder_name: str = 'label', scale_ratios: Union[int, list] = 1): super(SegDataset, self).__init__(root_dir, split=split, img_size=img_size, norm=norm, img_folder_name=img_folder_name, scale_ratios=scale_ratios) self.basic_mask_transforms = get_mask_transforms(img_size=img_size) self.label_folder_name = label_folder_name self.label_transform = label_transform def __getitem__(self, index): # name = self.img_name_list[index] # A_path = os.path.join(self.root_dir, self.img_folder_name, name) folder_with_name = self.img_folder_with_name_list[index % self.A_size] img_folder_name = folder_with_name.split('/')[0] name = folder_with_name.split('/')[-1] A_path = os.path.join(self.root_dir, img_folder_name, name) img = np.asarray(Image.open(A_path).convert('RGB')) scales = self.scale_ratios if isinstance(scales, list): scale = scales[torch.randint(len(scales), (1,)).item()] else: scale = scales if scale != 1: h, w = img.shape[:2] img = pil_rescale(img, scale=scale, order=3) img = pil_resize(img, size=[h, w], order=3) L_path = os.path.join(self.root_dir, self.label_folder_name, name) mask = np.array(Image.open(L_path), dtype=np.uint8) # 二分类中,前景标注为255 if self.label_transform == 'norm': mask = mask // 255 elif self.label_transform == 'ignore0_sub1': mask = mask - 1 # 原来label==0的部分变为255,自动被ignore if self.basic_transforms is not None: img = self.basic_transforms(img) if self.basic_mask_transforms is not None: mask = self.basic_mask_transforms(mask) return {'A': img, 'mask': mask, 'name': name} if __name__ == '__main__': is_train = True root_dir = r'G:/tmp_data/inria_cut256/' img_folder_name = ['A'] split = 'train' label_transform = 'norm' dataset = SegDataset(root_dir=root_dir, split=split, img_folder_name=img_folder_name, label_transform=label_transform) print(f'dataset len is {len(dataset)}')
""" some basic data loader for example: Image loader, Segmentation loader, data root ├─A ├─label └─list """ def load_img_name_list(dataset_path): img_name_list = np.loadtxt(dataset_path, dtype=str) if img_name_list.ndim == 2: return img_name_list[:, 0] return img_name_list class ImageDataset(data.Dataset): """list dataloder""" def __init__(self, root_dir: str, split: str = 'train', img_size: int = 256, norm: bool = False, img_folder_name: Union[str, list, tuple] = 'A', list_folder_name: str = 'list', scale_ratios: Union[int, list] = 1): super(ImageDataset, self).__init__() self.root_dir = root_dir self.split = split # train | train_aug | val self.list_path = os.path.join(self.root_dir, list_folder_name, self.split+'.txt') self.img_name_list = load_img_name_list(self.list_path) if isinstance(img_folder_name, list) or isinstance(img_folder_name, tuple): # 此处为了兼容存在多个img_folder,内部文件共名字的情况,比如img_folder_name=['A','B'] self.img_folder_with_name_list = [img_folder_name_+'/'+name for name in self.img_name_list for img_folder_name_ in img_folder_name] elif isinstance(img_folder_name, str): self.img_folder_with_name_list = [img_folder_name+'/'+name for name in self.img_name_list] else: raise NotImplementedError self.A_size = len(self.img_folder_with_name_list) # get the size of dataset A self.img_folder_name = img_folder_name self.img_size = img_size self.norm = norm self.basic_transforms = get_transforms(norm=norm, img_size=img_size) self.scale_ratios = scale_ratios def __getitem__(self, index): folder_with_name = self.img_folder_with_name_list[index % self.A_size] img_folder_name = folder_with_name.split('/')[0] name = folder_with_name.split('/')[-1] A_path = os.path.join(self.root_dir, img_folder_name, name) img = np.asarray(Image.open(A_path).convert('RGB')) scales = self.scale_ratios if isinstance(scales, list): scale = scales[torch.randint(len(scales), (1,)).item()] else: scale = scales if scale != 1: h, w = img.shape[:2] img = pil_rescale(img, scale=scale, order=3) img = pil_resize(img, size=[h, w], order=3) if self.basic_transforms is not None: img = self.basic_transforms(img) return {'A': img, 'name': name} def __len__(self): """Return the total number of images in the dataset.""" return self.A_size class SegDataset(ImageDataset): ''' transforms: 表示同时对image 和 mask 做变换; ''' def __init__(self, root_dir: str, split: str = 'train', img_size: int = 256, norm: bool = False, img_folder_name: Union[str, list, tuple] = 'A', label_transform: str = 'norm', label_folder_name: str = 'label', scale_ratios: Union[int, list] = 1): super(SegDataset, self).__init__(root_dir, split=split, img_size=img_size, norm=norm, img_folder_name=img_folder_name, scale_ratios=scale_ratios) self.basic_mask_transforms = get_mask_transforms(img_size=img_size) self.label_folder_name = label_folder_name self.label_transform = label_transform def __getitem__(self, index): # name = self.img_name_list[index] # A_path = os.path.join(self.root_dir, self.img_folder_name, name) folder_with_name = self.img_folder_with_name_list[index % self.A_size] img_folder_name = folder_with_name.split('/')[0] name = folder_with_name.split('/')[-1] A_path = os.path.join(self.root_dir, img_folder_name, name) img = np.asarray(Image.open(A_path).convert('RGB')) scales = self.scale_ratios if isinstance(scales, list): scale = scales[torch.randint(len(scales), (1,)).item()] else: scale = scales if scale != 1: h, w = img.shape[:2] img = pil_rescale(img, scale=scale, order=3) img = pil_resize(img, size=[h, w], order=3) L_path = os.path.join(self.root_dir, self.label_folder_name, name) mask = np.array(Image.open(L_path), dtype=np.uint8) # 二分类中,前景标注为255 if self.label_transform == 'norm': mask = mask // 255 elif self.label_transform == 'ignore0_sub1': mask = mask - 1 # 原来label==0的部分变为255,自动被ignore if self.basic_transforms is not None: img = self.basic_transforms(img) if self.basic_mask_transforms is not None: mask = self.basic_mask_transforms(mask) return {'A': img, 'mask': mask, 'name': name} if __name__ == '__main__': is_train = True root_dir = r'G:/tmp_data/inria_cut256/' img_folder_name = ['A'] split = 'train' label_transform = 'norm' dataset = SegDataset(root_dir=root_dir, split=split, img_folder_name=img_folder_name, label_transform=label_transform) print(f'dataset len is {len(dataset)}')
augs = get_seg_augs(imgz_size=256)
2
2023-10-21 09:09:57+00:00
4k
pythonlessons/FinRock
finrock/trading_env.py
[ { "identifier": "State", "path": "finrock/state.py", "snippet": "class State:\n def __init__(\n self, \n timestamp: str, \n open: float, \n high: float, \n low: float, \n close: float, \n volume: float=0.0,\n indicators: list=[]\n ):\n self.timestamp = timestamp\n self.open = open\n self.high = high\n self.low = low\n self.close = close\n self.volume = volume\n self.indicators = indicators\n\n try:\n self.date = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise ValueError(f'received invalid timestamp date format: {timestamp}, expected: YYYY-MM-DD HH:MM:SS')\n \n self._balance = 0.0 # balance in cash\n self._assets = 0.0 # balance in assets\n self._allocation_percentage = 0.0 # percentage of assets allocated to this state\n \n @property\n def balance(self):\n return self._balance\n \n @balance.setter\n def balance(self, value: float):\n self._balance = value\n\n @property\n def assets(self):\n return self._assets\n \n @assets.setter\n def assets(self, value: float):\n self._assets = value\n\n @property\n def account_value(self):\n return self.balance + self.assets * self.close\n\n @property\n def allocation_percentage(self):\n return self._allocation_percentage\n \n @allocation_percentage.setter\n def allocation_percentage(self, value: float):\n assert 0.0 <= value <= 1.0, f'allocation_percentage value must be between 0.0 and 1.0, received: {value}'\n self._allocation_percentage = value" }, { "identifier": "Observations", "path": "finrock/state.py", "snippet": "class Observations:\n def __init__(\n self, \n window_size: int,\n observations: typing.List[State]=[],\n ):\n self._observations = observations\n self._window_size = window_size\n\n assert isinstance(self._observations, list) == True, \"observations must be a list\"\n assert len(self._observations) <= self._window_size, f'observations length must be <= window_size, received: {len(self._observations)}'\n assert all(isinstance(observation, State) for observation in self._observations) == True, \"observations must be a list of State objects\"\n\n def __len__(self) -> int:\n return len(self._observations)\n \n @property\n def window_size(self) -> int:\n return self._window_size\n \n @property\n def observations(self) -> typing.List[State]:\n return self._observations\n \n @property\n def full(self) -> bool:\n return len(self._observations) == self._window_size\n \n def __getitem__(self, idx: int) -> State:\n try:\n return self._observations[idx]\n except IndexError:\n raise IndexError(f'index out of range: {idx}, observations length: {len(self._observations)}')\n \n def __iter__(self) -> State:\n \"\"\" Create a generator that iterate over the Sequence.\"\"\"\n for index in range(len(self)):\n yield self[index]\n\n def reset(self) -> None:\n self._observations = []\n \n def append(self, state: State) -> None:\n # state should be State object or None\n assert isinstance(state, State) or state is None, \"state must be a State object or None\"\n self._observations.append(state)\n\n if len(self._observations) > self._window_size:\n self._observations.pop(0)" }, { "identifier": "PdDataFeeder", "path": "finrock/data_feeder.py", "snippet": "class PdDataFeeder:\n def __init__(\n self, \n df: pd.DataFrame,\n indicators: list = [],\n min: float = None,\n max: float = None\n ) -> None:\n self._df = df\n self._min = min\n self._max = max\n self._indicators = indicators\n self._cache = {}\n\n assert isinstance(self._df, pd.DataFrame) == True, \"df must be a pandas.DataFrame\"\n assert 'timestamp' in self._df.columns, \"df must have 'timestamp' column\"\n assert 'open' in self._df.columns, \"df must have 'open' column\"\n assert 'high' in self._df.columns, \"df must have 'high' column\"\n assert 'low' in self._df.columns, \"df must have 'low' column\"\n assert 'close' in self._df.columns, \"df must have 'close' column\"\n\n assert isinstance(self._indicators, list) == True, \"indicators must be an iterable\"\n assert all(isinstance(indicator, Indicator) for indicator in self._indicators) == True, \"indicators must be a list of Indicator objects\"\n\n @property\n def min(self) -> float:\n return self._min or self._df['low'].min()\n \n @property\n def max(self) -> float:\n return self._max or self._df['high'].max()\n\n def __len__(self) -> int:\n return len(self._df)\n \n def __getitem__(self, idx: int, args=None) -> State:\n # Use cache to speed up training\n if idx in self._cache:\n return self._cache[idx]\n\n indicators = []\n for indicator in self._indicators:\n results = indicator(idx)\n if results is None:\n self._cache[idx] = None\n return None\n \n indicators.append(results)\n\n data = self._df.iloc[idx]\n state = State(\n timestamp=data['timestamp'],\n open=data['open'],\n high=data['high'],\n low=data['low'],\n close=data['close'],\n volume=data.get('volume', 0.0),\n indicators=indicators\n )\n self._cache[idx] = state\n\n return state\n \n def __iter__(self) -> State:\n \"\"\" Create a generator that iterate over the Sequence.\"\"\"\n for index in range(len(self)):\n yield self[index]" }, { "identifier": "simpleReward", "path": "finrock/reward.py", "snippet": "def simpleReward(observations: Observations) -> float:\n \n assert isinstance(observations, Observations) == True, \"observations must be an instance of Observations\"\n\n last_state, next_state = observations[-2:]\n\n # buy\n if next_state.allocation_percentage > last_state.allocation_percentage:\n # check whether it was good or bad to buy\n order_size = next_state.allocation_percentage - last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * order_size\n\n # sell\n elif next_state.allocation_percentage < last_state.allocation_percentage:\n # check whether it was good or bad to sell\n order_size = last_state.allocation_percentage - next_state.allocation_percentage\n reward = -1 * (next_state.close - last_state.close) / last_state.close * order_size\n\n # hold\n else:\n # check whether it was good or bad to hold\n ratio = -1 if not last_state.allocation_percentage else last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * ratio\n \n return reward" } ]
import typing import numpy as np from .state import State, Observations from .data_feeder import PdDataFeeder from .reward import simpleReward
2,050
class TradingEnv: def __init__( self, data_feeder: PdDataFeeder, output_transformer: typing.Callable = None, initial_balance: float = 1000.0, max_episode_steps: int = None, window_size: int = 50, reward_function: typing.Callable = simpleReward, metrics: typing.List[typing.Callable] = [] ) -> None: self._data_feeder = data_feeder self._output_transformer = output_transformer self._initial_balance = initial_balance self._max_episode_steps = max_episode_steps if max_episode_steps is not None else len(data_feeder) self._window_size = window_size self._reward_function = reward_function self._metrics = metrics self._observations = Observations(window_size=window_size) self._observation_space = np.zeros(self.reset()[0].shape) self.action_space = 3 @property def observation_space(self): return self._observation_space
class TradingEnv: def __init__( self, data_feeder: PdDataFeeder, output_transformer: typing.Callable = None, initial_balance: float = 1000.0, max_episode_steps: int = None, window_size: int = 50, reward_function: typing.Callable = simpleReward, metrics: typing.List[typing.Callable] = [] ) -> None: self._data_feeder = data_feeder self._output_transformer = output_transformer self._initial_balance = initial_balance self._max_episode_steps = max_episode_steps if max_episode_steps is not None else len(data_feeder) self._window_size = window_size self._reward_function = reward_function self._metrics = metrics self._observations = Observations(window_size=window_size) self._observation_space = np.zeros(self.reset()[0].shape) self.action_space = 3 @property def observation_space(self): return self._observation_space
def _get_obs(self, index: int, balance: float=None) -> State:
0
2023-10-23 07:44:54+00:00
4k
hitlic/deepepochs
examples/10-multi-optimizers.py
[ { "identifier": "Trainer", "path": "deepepochs/trainer.py", "snippet": "class Trainer(TrainerBase):\r\n def train_step(self,\r\n batch_x:[torch.Tensor, List[torch.Tensor]],\r\n batch_y:[torch.Tensor, List[torch.Tensor]],\r\n **step_args\r\n ) -> Dict[str, PatchBase]:\r\n \"\"\"\r\n TODO: 非常规训练可修改本方法中的代码。\r\n Args:\r\n batch_x: 一个mini-batch的模型输入\r\n batch_y: 一个mini-batch的标签或targets\r\n step_args: 当使用EpochTask时,EpochTask的step_args参数\r\n Returns:\r\n None \r\n 或\r\n dict: 键为指标名,值为封装了数据和指标函数的PatchBase子类对象\r\n \"\"\"\r\n if self.grad_accumulate_steps == 1:\r\n model_out = self.model(*batch_x)\r\n # self.loss是对Trainer中loss参数的封装,会自动调用opt.zero_grad、loss.backward、opt.step等方法\r\n self.loss(model_out, batch_y)\r\n return\r\n\r\n # 累积梯度训练\r\n b_size = batch_size(batch_x)\r\n sub_batch_size = math.ceil(b_size / self.grad_accumulate_steps)\r\n for sub_batch_idx, (sub_batch_x, sub_batch_y) in enumerate(zip(batches(batch_x, sub_batch_size), batches(batch_y, sub_batch_size))):\r\n if self.accelerator is None:\r\n model_out = self.model(*sub_batch_x)\r\n self.loss(model_out, sub_batch_y, sub_batch_idx + 1 < self.grad_accumulate_steps)\r\n else:\r\n with self.accelerator.accumulate(self.model.model):\r\n model_out = self.model(*sub_batch_x)\r\n self.loss(model_out, sub_batch_y, sub_batch_idx + 1 < self.grad_accumulate_steps)\r\n\r\n def evaluate_step(self,\r\n batch_x:[torch.Tensor, List[torch.Tensor]],\r\n batch_y:[torch.Tensor, List[torch.Tensor]],\r\n **step_args\r\n ) -> Dict[str, PatchBase]:\r\n \"\"\"\r\n TODO: 非常规验证或测试可修改本方法中的代码。也可以定义val_step方法或test_step方法。\r\n Args:\r\n batch_x: 一个mini-batch的模型输入\r\n batch_y: 一个mini-batch的标签或targets\r\n step_args: 当使用EpochTask时,EpochTask的step_args参数\r\n Returns:\r\n None \r\n 或\r\n dict: 键为指标名,值为封装了数据和指标函数的PatchBase子类对象\r\n \"\"\"\r\n # self.model是对Trainer中model参数的封装,\r\n model_out = self.model(*batch_x)\r\n # self.loss是对Trainer中loss参数的封装,会自动调用opt.zero_grad、loss.backward、opt.step等方法\r\n self.loss(model_out, batch_y)\r" }, { "identifier": "Optimizer", "path": "deepepochs/optimizer.py", "snippet": "class Optimizer:\n def __init__(self, opt, scheduler=None, sched_on='epoch', sched_with_loss=False):\n \"\"\"\n 优化器组合,对优化器和学习率调度器进行统一管理。\n Args:\n opt: torch.optim.*\n scheduler: torch.optim.lr_scheduler.*\n sched_on: 学习率调整是每个epoch还是每个step\n sched_with_loss: scheduler.step方法是否需要损失作为参数(例如ReduceLROnPlateau)\n \"\"\"\n self.opt = opt\n self.scheduler = scheduler\n assert sched_on in ['step', 'epoch'], '`sched_on`取值为\"step\"或\"epoch\"!'\n self.sched_on = sched_on\n self.sched_with_loss = sched_with_loss\n\n def zero_grad(self):\n self.opt.zero_grad()\n\n def get_last_lr(self):\n return self.scheduler.get_last_lr() if self.scheduler is not None else None\n\n def step(self, at='step', loss=None):\n if at == 'step':\n self.opt.step()\n if self.sched_on == 'step':\n self.sched_step(loss)\n elif at == 'epoch':\n if self.sched_on == 'epoch':\n self.sched_step(loss)\n else:\n raise ValueError('Optimizer.step方法的`at`参数取值为\"step\"或\"epoch\"')\n\n def sched_step(self, loss):\n if self.scheduler is not None:\n if self.sched_with_loss:\n assert loss is not None, \"学习率调度要求损失作为参数,但`train_step`和`evaluate_step`都没有返回`loss`!\"\n self.scheduler.step(loss)\n else:\n self.scheduler.step()\n\n def state_dict(self):\n sched_state = None if self.scheduler is None else self.scheduler.state_dict()\n return {'opt_state': self.opt.state_dict(), 'sched_state': sched_state}\n\n def load_state_dict(self, state):\n opt_state, sched_state = state['opt_state'], state['sched_state']\n self.opt.load_state_dict(opt_state)\n if sched_state is not None and self.scheduler is not None:\n self.scheduler.load_state_dict(opt_state)\n\n @property\n def param_groups(self):\n return self.opt.param_groups\n\n def get_current_lr(self):\n for param_group in self.param_groups:\n return param_group['lr']" } ]
import torch from torch import nn from torch.nn import functional as F from torchvision.datasets import MNIST from torchvision import transforms from torch.utils.data import DataLoader, random_split from deepepochs import Trainer, Optimizer
1,703
""" 使用多个优化器 """ data_dir = './datasets' transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) mnist_full = MNIST(data_dir, train=True, transform=transform, download=True) train_ds, val_ds = random_split(mnist_full, [55000, 5000]) test_ds = MNIST(data_dir, train=False, transform=transform, download=True) train_dl = DataLoader(train_ds, batch_size=32) val_dl = DataLoader(val_ds, batch_size=32) test_dl = DataLoader(test_ds, batch_size=32) channels, width, height = (1, 28, 28) model = nn.Sequential( nn.Flatten(), nn.Linear(channels * width * height, 64), nn.ReLU(), nn.Dropout(0.1), nn.Linear(64, 64), nn.ReLU(), nn.Dropout(0.1), nn.Linear(64, 10) ) # 定义多个优化器,实际使用中每个优化器应针对不同的模型组成部分 # 注意:大多数情况下不需要多个优化器,而是为模型参数分组,每个组使用不同的学习率 opt1 = torch.optim.Adam(model.parameters(), lr=2e-4) opt2 = torch.optim.Adam(model.parameters(), lr=2e-4) opts = [opt1, opt2] # 第一种方式
""" 使用多个优化器 """ data_dir = './datasets' transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) mnist_full = MNIST(data_dir, train=True, transform=transform, download=True) train_ds, val_ds = random_split(mnist_full, [55000, 5000]) test_ds = MNIST(data_dir, train=False, transform=transform, download=True) train_dl = DataLoader(train_ds, batch_size=32) val_dl = DataLoader(val_ds, batch_size=32) test_dl = DataLoader(test_ds, batch_size=32) channels, width, height = (1, 28, 28) model = nn.Sequential( nn.Flatten(), nn.Linear(channels * width * height, 64), nn.ReLU(), nn.Dropout(0.1), nn.Linear(64, 64), nn.ReLU(), nn.Dropout(0.1), nn.Linear(64, 10) ) # 定义多个优化器,实际使用中每个优化器应针对不同的模型组成部分 # 注意:大多数情况下不需要多个优化器,而是为模型参数分组,每个组使用不同的学习率 opt1 = torch.optim.Adam(model.parameters(), lr=2e-4) opt2 = torch.optim.Adam(model.parameters(), lr=2e-4) opts = [opt1, opt2] # 第一种方式
opts = [Optimizer(opt1), Optimizer(opt2)] # 第二种方式,这种方式可为每个优化器指定高度器
1
2023-10-19 05:41:48+00:00
4k
yukara-ikemiya/minimal-sqvae
models/sqvae.py
[ { "identifier": "Encoder", "path": "models/encdec.py", "snippet": "class Encoder(nn.Module):\n def __init__(self, in_ch, width, depth, num_down, stride, **kwargs):\n super().__init__()\n\n blocks = []\n for ii in range(num_down):\n # Down-sampling\n down = nn.Conv2d(in_ch if ii == 0 else width, width, stride * 2, stride, stride // 2)\n # ResNet\n resnet = Resnet2D(width, depth, **kwargs)\n\n blocks.extend([down, resnet])\n\n # output layer\n conv_ch_out = nn.Conv2d(width, width, 3, 1, 1)\n blocks.append(conv_ch_out)\n\n self.model = nn.Sequential(*blocks)\n\n def forward(self, x):\n return self.model(x)" }, { "identifier": "Decoder", "path": "models/encdec.py", "snippet": "class Decoder(nn.Module):\n # Each argument name corresponds to that of EncoderBlock\n def __init__(self, in_ch, width, depth, num_down, stride, **kwargs):\n super().__init__()\n kernel_size = stride * 2 if stride != 1 else 3\n padding = stride // 2 if stride != 1 else 1\n\n blocks = [nn.Conv2d(width, width, 3, 1, 1)]\n for ii in range(num_down):\n # ResNet\n resnet = Resnet2D(width, depth, **kwargs)\n # Up-sampling\n up = nn.ConvTranspose2d(width, in_ch if ii == (num_down - 1) else width,\n kernel_size, stride, padding)\n\n blocks.extend([resnet, up])\n\n # output layer for 0-1 image date\n out = nn.Sigmoid()\n\n blocks.append(out)\n self.model = nn.Sequential(*blocks)\n\n def forward(self, x):\n return self.model(x)" }, { "identifier": "SQuantizer", "path": "models/stochastic_quantizer.py", "snippet": "class SQuantizer(nn.Module):\n # Type-I of the Gaussian SQ-VAE in the paper\n def __init__(self, size_dict: int, dim_dict: int, var_q_init: float):\n super().__init__()\n self.size_dict = size_dict\n self.dim_dict = dim_dict\n\n # Codebook\n self.codebook = nn.Parameter(torch.randn(self.size_dict, self.dim_dict))\n self.var_q = nn.Parameter(torch.zeros(1))\n self.temperature = 1.0\n\n # self.init\n self.register_buffer('init', torch.zeros(1, dtype=torch.bool))\n\n # Dimension of input data\n # 1 -> 1d (e.g. audio), 2 -> 2d (e.g. image)\n self.dim_data = 2\n\n # Initial variance\n self.var_q_init = var_q_init\n self.register_buffer('var_init', torch.tensor([var_q_init]))\n\n def set_temperature(self, value: float):\n self.temperature = value\n\n def set_data_dimension(self, dim: int):\n self.dim_data = dim\n\n def forward(self, z):\n # Pre-reshape\n z = self._pre_reshape(z)\n\n # Initilize codebook\n if self.training and not self.init[0]:\n self._init_codebook(z)\n\n # limit variance range using sigmoid\n var_q = torch.sigmoid(self.var_q) * 2. * self.var_init\n\n # Quantize\n z_quantize, loss, metrics = self.quantize(z, var_q)\n\n # Post-reshape\n z_quantize = self._post_reshape(z_quantize)\n\n metrics['variance_q'] = float(var_q.mean())\n\n return z_quantize, loss, metrics\n\n def quantize(self, z: torch.Tensor, var_q: torch.Tensor):\n # Posterior distance\n weight_q = 0.5 / torch.clamp(var_q, min=1e-10)\n logit = -self._calc_distance_bw_enc_codes(z, weight_q)\n probs = torch.softmax(logit, dim=-1)\n log_probs = torch.log_softmax(logit, dim=-1)\n\n # Quantization\n if self.training:\n encodings = F.gumbel_softmax(logit, tau=self.temperature) # [L, size_dict]\n z_quantized = torch.mm(encodings, self.codebook)\n else:\n idxs_enc = torch.argmax(logit, dim=1) # [L]\n z_quantized = F.embedding(idxs_enc, self.codebook)\n\n # Latent loss\n\n # KLD regularization\n loss_kld_reg = torch.sum(probs * log_probs) / self.bs\n\n # commitment loss\n loss_commit = self._calc_distance_bw_enc_dec(z, z_quantized, weight_q) / self.bs\n\n loss_latent = loss_kld_reg + loss_commit\n\n metrics = {} # logging\n metrics['loss_commit'] = loss_commit.detach()\n metrics['loss_kld_reg'] = loss_kld_reg.detach()\n metrics['loss_latent'] = loss_latent.detach()\n\n return z_quantized, loss_latent, metrics\n\n def _calc_distance_bw_enc_codes(self, z, weight_q):\n distances = weight_q * self._se_codebook(z)\n return distances\n\n def _se_codebook(self, z):\n # z : [L, dim_z]\n # codebook : [size_dict, dim_z]\n # distances : [L, size_dict]\n\n distances = torch.sum(z**2, dim=1, keepdim=True)\\\n + torch.sum(self.codebook**2, dim=1) - 2 * torch.mm(z, self.codebook.t())\n return distances\n\n def _calc_distance_bw_enc_dec(self, z1, z2, weight_q):\n return torch.sum((z1 - z2)**2 * weight_q)\n\n def _init_codebook(self, z):\n def _tile(z_, scale_rand=0.2):\n L, dim = z_.shape\n if L < self.size_dict:\n n_repeats = (self.size_dict - 1) // L + 1\n z_ = z_.repeat(n_repeats, 1)\n z_ = z_ + torch.randn_like(z_, requires_grad=False) * scale_rand * var_z\n return z_\n\n var_z = torch.var(z, dim=0).mean()\n y = _tile(z)\n _k_rand = y[torch.randperm(y.shape[0])][:self.size_dict]\n\n # if dist.is_available():\n # dist.broadcast(_k_rand, 0)\n\n self.codebook.data[:, :] = _k_rand.clone()\n\n var_init = torch.var(y, dim=0).mean().clone().detach() * self.var_q_init\n self.var_init[:] = var_init\n self.init[0] = True\n\n print(f'Variance was initialized to {var_init}')\n\n def _pre_reshape(self, z):\n # (bs, dim_z, *in_shape) -> (bs * prod(in_shape), dim_z)\n\n if self.dim_data == 1:\n self.bs, self.dim_z, self.num_d1 = z.shape\n self.num_d2 = 1\n elif self.dim_data == 2:\n self.bs, self.dim_z, self.num_d1, self.num_d2 = z.shape\n else:\n raise Exception(\"Undefined dimension size.\")\n\n dim_z = z.shape[1]\n\n if self.dim_data == 1:\n z = z.permute(0, 2, 1).contiguous()\n z = z.view(-1, dim_z)\n elif self.dim_data == 2:\n z = z.permute(0, 2, 3, 1).contiguous()\n z = z.view(-1, dim_z)\n else:\n raise Exception(\"Undefined dimension size.\")\n\n return z\n\n def _post_reshape(self, z):\n # (bs * prod(in_shape), dim_z) -> (bs, dim_z, *in_shape)\n\n if self.dim_data == 1:\n z = z.view(self.bs, self.num_d1, -1).permute(0, -1, 1).contiguous()\n elif self.dim_data == 2:\n z = z.view(self.bs, self.num_d1, self.num_d2, -1).permute(0, -1, 1, 2).contiguous()\n else:\n raise Exception(\"Undefined dimension size.\")\n\n return z" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from .encdec import Encoder, Decoder from .stochastic_quantizer import SQuantizer
2,202
""" Copyright (C) 2023 Yukara Ikemiya """ class SQVAE(nn.Module): def __init__(self, kwargs_encdec: dict, kwargs_quantizer: dict): super().__init__() assert (kwargs_encdec['width'] == kwargs_quantizer['dim_dict']) self.encoder = Encoder(**kwargs_encdec)
""" Copyright (C) 2023 Yukara Ikemiya """ class SQVAE(nn.Module): def __init__(self, kwargs_encdec: dict, kwargs_quantizer: dict): super().__init__() assert (kwargs_encdec['width'] == kwargs_quantizer['dim_dict']) self.encoder = Encoder(**kwargs_encdec)
self.decoder = Decoder(**kwargs_encdec)
1
2023-10-15 14:48:55+00:00
4k
colour-science/colour-visuals
colour_visuals/planckian_locus.py
[ { "identifier": "DEFAULT_FLOAT_DTYPE_WGPU", "path": "colour_visuals/common.py", "snippet": "DEFAULT_FLOAT_DTYPE_WGPU = np.float32" }, { "identifier": "append_channel", "path": "colour_visuals/common.py", "snippet": "def append_channel(a: ArrayLike, value: float = 1) -> NDArray:\n \"\"\"\n Append a channel to given variable :math:`a`.\n\n Parameters\n ----------\n a\n Variable :math:`a` to append a channel to.\n value\n Channel value.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Variable :math:`a` with appended channel.\n \"\"\"\n\n a = np.copy(a)\n\n return np.hstack( # pyright: ignore\n [\n a,\n full(\n (*list(a.shape[:-1]), 1),\n value,\n dtype=a.dtype, # pyright: ignore\n ),\n ]\n )" }, { "identifier": "as_contiguous_array", "path": "colour_visuals/common.py", "snippet": "def as_contiguous_array(\n a: NDArray, dtype: Type[DType] = DEFAULT_FLOAT_DTYPE_WGPU\n) -> NDArray:\n \"\"\"\n Convert given array to a contiguous array (ndim >= 1) in memory (C order).\n\n Parameters\n ----------\n a\n Variable :math:`a` to convert.\n dtype\n :class:`numpy.dtype` to use for conversion, default to the\n :class:`numpy.dtype` defined by the\n :attr:`colour.constant.DEFAULT_FLOAT_DTYPE_WGPU` attribute.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Converted variable :math:`a`.\n \"\"\"\n\n return np.ascontiguousarray(a.astype(dtype))" }, { "identifier": "MixinPropertyColour", "path": "colour_visuals/visual.py", "snippet": "class MixinPropertyColour:\n \"\"\"\n Define a mixin for a colour.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyColour.colour`\n \"\"\"\n\n def __init__(self):\n self._colour = None\n\n super().__init__()\n\n @visual_property\n def colour(self) -> ArrayLike | None:\n \"\"\"\n Getter and setter property for the colour.\n\n Parameters\n ----------\n value\n Value to set the colour with.\n\n Returns\n -------\n ArrayLike or None\n Visual colour.\n \"\"\"\n\n return self._colour\n\n @colour.setter\n def colour(self, value: ArrayLike | None):\n \"\"\"Setter for the **self.colour** property.\"\"\"\n\n self._colour = value" }, { "identifier": "MixinPropertyMethod", "path": "colour_visuals/visual.py", "snippet": "class MixinPropertyMethod:\n \"\"\"\n Define a mixin for a *Chromaticity Diagram* method.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyMethod.method`\n \"\"\"\n\n def __init__(self):\n self._method = \"CIE 1931\"\n\n super().__init__()\n\n @visual_property\n def method(\n self,\n ) -> Literal[\"CIE 1931\", \"CIE 1960 UCS\", \"CIE 1976 UCS\"] | str:\n \"\"\"\n Getter and setter property for the *Chromaticity Diagram* method.\n\n Parameters\n ----------\n value\n Value to set the *Chromaticity Diagram* method with.\n\n Returns\n -------\n :class:`str`\n *Chromaticity Diagram* method.\n \"\"\"\n\n return self._method\n\n @method.setter\n def method(\n self, value: Literal[\"CIE 1931\", \"CIE 1960 UCS\", \"CIE 1976 UCS\"] | str\n ):\n \"\"\"Setter for the **self.method** property.\"\"\"\n\n self._method = validate_method(\n value, tuple(METHODS_CHROMATICITY_DIAGRAM)\n )" }, { "identifier": "MixinPropertyOpacity", "path": "colour_visuals/visual.py", "snippet": "class MixinPropertyOpacity:\n \"\"\"\n Define a mixin for an opacity value.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyOpacity.opacity`\n \"\"\"\n\n def __init__(self):\n self._opacity = 1\n\n super().__init__()\n\n @visual_property\n def opacity(self) -> float:\n \"\"\"\n Getter and setter property for the opacity value.\n\n Parameters\n ----------\n value\n Value to set the opacity value with.\n\n Returns\n -------\n :class:`float`\n Visual opacity.\n \"\"\"\n\n return self._opacity\n\n @opacity.setter\n def opacity(self, value: float):\n \"\"\"Setter for the **self.opacity** property.\"\"\"\n\n self._opacity = value" }, { "identifier": "MixinPropertyThickness", "path": "colour_visuals/visual.py", "snippet": "class MixinPropertyThickness:\n \"\"\"\n Define a mixin for a thickness value.\n\n Attributes\n ----------\n - :attr:`~colour_visuals.visual.MixinPropertyThickness.thickness`\n \"\"\"\n\n def __init__(self):\n self._thickness = 1\n\n super().__init__()\n\n @visual_property\n def thickness(self) -> float:\n \"\"\"\n Getter and setter property for the thickness value.\n\n Parameters\n ----------\n value\n Value to set the thickness value with.\n\n Returns\n -------\n :class:`float`\n Thickness value.\n \"\"\"\n\n return self._thickness\n\n @thickness.setter\n def thickness(self, value: float):\n \"\"\"Setter for the **self.thickness** property.\"\"\"\n\n self._thickness = value" }, { "identifier": "Visual", "path": "colour_visuals/visual.py", "snippet": "class Visual(gfx.Group, metaclass=ABCMeta):\n \"\"\"Define the base class for the visuals.\"\"\"\n\n def __init__(self):\n self._is_update_blocked = False\n\n super().__init__()\n\n @contextmanager\n def block_update(self) -> Generator:\n \"\"\"Define a context manager that blocks the visual updates.\"\"\"\n self._is_update_blocked = True\n\n yield\n\n self._is_update_blocked = False\n\n @abstractmethod\n def update(self):\n \"\"\"\n Update the visual.\n\n Notes\n -----\n - Must be reimplemented by sub-classes.\n \"\"\"" }, { "identifier": "visual_property", "path": "colour_visuals/visual.py", "snippet": "class visual_property(property):\n \"\"\"\n Define a :class:`property` sub-class calling the\n :class:`colour_visuals.Visual.update` method.\n \"\"\"\n\n def __set__(self, obj, value):\n \"\"\"Reimplement the :class:`property.__set__` method.\"\"\"\n super().__set__(obj, value)\n\n obj.update()" } ]
import numpy as np import pygfx as gfx from colour.hints import ( ArrayLike, Literal, Sequence, cast, ) from colour.plotting import ( CONSTANTS_COLOUR_STYLE, LABELS_PLANCKIAN_LOCUS_DEFAULT, lines_planckian_locus, ) from colour.utilities import ( as_int_scalar, optional, ) from colour_visuals.common import ( DEFAULT_FLOAT_DTYPE_WGPU, append_channel, as_contiguous_array, ) from colour_visuals.visual import ( MixinPropertyColour, MixinPropertyMethod, MixinPropertyOpacity, MixinPropertyThickness, Visual, visual_property, )
3,181
colour: ArrayLike | None = None, opacity: float = 1, thickness: float = 1, ): super().__init__() self._planckian_locus = None self._iso_temperature_lines = [] self._texts = [] self._labels = None self._mireds = False with self.block_update(): self.method = method self.labels = labels self.mireds = mireds self.colour = colour self.opacity = opacity self.thickness = thickness self.update() @visual_property def labels( self, ) -> Sequence | None: """ Getter and setter property for the labels. Parameters ---------- value Value to set the labels with. Returns ------- :class:`str` Labels. """ return self._labels @labels.setter def labels(self, value: Sequence | None): """Setter for the **self.labels** property.""" self._labels = cast( Sequence, optional( value, LABELS_PLANCKIAN_LOCUS_DEFAULT[ "Mireds" if self._mireds else "Default" ], ), ) @visual_property def mireds( self, ) -> bool: """ Getter and setter property for the mireds state. Parameters ---------- value Value to set the mireds state with. Returns ------- :class:`bool` Mireds state. """ return self._mireds @mireds.setter def mireds(self, value: bool): """Setter for the **self.mireds** property.""" self._mireds = value def update(self): """Update the visual.""" if self._is_update_blocked: return self.clear() lines_pl, lines_l = lines_planckian_locus( self._labels, self._mireds, method=self._method, ) # Planckian Locus positions = np.concatenate( [lines_pl["position"][:-1], lines_pl["position"][1:]], axis=1 ).reshape([-1, 2]) positions = np.hstack( [ positions, np.full((positions.shape[0], 1), 0, DEFAULT_FLOAT_DTYPE_WGPU), ] ) if self._colour is None: colour_sl = np.concatenate( [lines_pl["colour"][:-1], lines_pl["colour"][1:]], axis=1 ).reshape([-1, 3]) else: colour_sl = np.tile(self._colour, (positions.shape[0], 1)) self._planckian_locus = gfx.Line( gfx.Geometry( positions=as_contiguous_array(positions), colors=as_contiguous_array(
# !/usr/bin/env python """ Planckian Locus Visuals ======================= Defines the *Planckian Locus* visuals: - :class:`colour_visuals.VisualPlanckianLocus` """ from __future__ import annotations __author__ = "Colour Developers" __copyright__ = "Copyright 2023 Colour Developers" __license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "[email protected]" __status__ = "Production" __all__ = [ "VisualPlanckianLocus", ] class VisualPlanckianLocus( MixinPropertyColour, MixinPropertyMethod, MixinPropertyOpacity, MixinPropertyThickness, Visual, ): """ Create a *Planckian Locus* visual. Parameters ---------- method *Planckian Locus* method. labels Array of labels used to customise which iso-temperature lines will be drawn along the *Planckian Locus*. Passing an empty array will result in no iso-temperature lines being drawn. mireds Whether to use micro reciprocal degrees for the iso-temperature lines. colour Colour of the visual, if *None*, the colour is computed from the visual geometry. opacity Opacity of the visual. thickness Thickness of the visual lines. Attributes ---------- - :attr:`~colour_visuals.VisualPlanckianLocus.method` - :attr:`~colour_visuals.VisualPlanckianLocus.labels` - :attr:`~colour_visuals.VisualPlanckianLocus.mireds` - :attr:`~colour_visuals.VisualPlanckianLocus.colour` - :attr:`~colour_visuals.VisualPlanckianLocus.opacity` - :attr:`~colour_visuals.VisualPlanckianLocus.thickness` Methods ------- - :meth:`~colour_visuals.VisualPlanckianLocus.__init__` - :meth:`~colour_visuals.VisualPlanckianLocus.update` Examples -------- >>> import os >>> from colour.utilities import suppress_stdout >>> from wgpu.gui.auto import WgpuCanvas >>> with suppress_stdout(): ... canvas = WgpuCanvas(size=(960, 540)) ... scene = gfx.Scene() ... scene.add( ... gfx.Background( ... None, gfx.BackgroundMaterial(np.array([0.18, 0.18, 0.18])) ... ) ... ) ... visual = VisualPlanckianLocus() ... camera = gfx.PerspectiveCamera(50, 16 / 9) ... camera.show_object(visual, up=np.array([0, 0, 1]), scale=1.25) ... scene.add(visual) ... if os.environ.get("CI") is None: ... gfx.show(scene, camera=camera, canvas=canvas) ... .. image:: ../_static/Plotting_VisualPlanckianLocus.png :align: center :alt: visual-planckian-locus """ def __init__( self, method: Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"] | str = "CIE 1931", labels: Sequence | None = None, mireds: bool = False, colour: ArrayLike | None = None, opacity: float = 1, thickness: float = 1, ): super().__init__() self._planckian_locus = None self._iso_temperature_lines = [] self._texts = [] self._labels = None self._mireds = False with self.block_update(): self.method = method self.labels = labels self.mireds = mireds self.colour = colour self.opacity = opacity self.thickness = thickness self.update() @visual_property def labels( self, ) -> Sequence | None: """ Getter and setter property for the labels. Parameters ---------- value Value to set the labels with. Returns ------- :class:`str` Labels. """ return self._labels @labels.setter def labels(self, value: Sequence | None): """Setter for the **self.labels** property.""" self._labels = cast( Sequence, optional( value, LABELS_PLANCKIAN_LOCUS_DEFAULT[ "Mireds" if self._mireds else "Default" ], ), ) @visual_property def mireds( self, ) -> bool: """ Getter and setter property for the mireds state. Parameters ---------- value Value to set the mireds state with. Returns ------- :class:`bool` Mireds state. """ return self._mireds @mireds.setter def mireds(self, value: bool): """Setter for the **self.mireds** property.""" self._mireds = value def update(self): """Update the visual.""" if self._is_update_blocked: return self.clear() lines_pl, lines_l = lines_planckian_locus( self._labels, self._mireds, method=self._method, ) # Planckian Locus positions = np.concatenate( [lines_pl["position"][:-1], lines_pl["position"][1:]], axis=1 ).reshape([-1, 2]) positions = np.hstack( [ positions, np.full((positions.shape[0], 1), 0, DEFAULT_FLOAT_DTYPE_WGPU), ] ) if self._colour is None: colour_sl = np.concatenate( [lines_pl["colour"][:-1], lines_pl["colour"][1:]], axis=1 ).reshape([-1, 3]) else: colour_sl = np.tile(self._colour, (positions.shape[0], 1)) self._planckian_locus = gfx.Line( gfx.Geometry( positions=as_contiguous_array(positions), colors=as_contiguous_array(
append_channel(colour_sl, self._opacity)
1
2023-10-15 04:30:47+00:00
4k
JiahuiLei/NAP
core/models/utils/occnet_utils/utils/voxels.py
[ { "identifier": "check_mesh_contains", "path": "core/models/utils/occnet_utils/utils/libmesh/inside_mesh.py", "snippet": "def check_mesh_contains(mesh, points, hash_resolution=512):\n intersector = MeshIntersector(mesh, hash_resolution)\n contains = intersector.query(points)\n return contains" }, { "identifier": "make_3d_grid", "path": "core/models/utils/occnet_utils/utils/common.py", "snippet": "def make_3d_grid(bb_min, bb_max, shape):\n ''' Makes a 3D grid.\n\n Args:\n bb_min (tuple): bounding box minimum\n bb_max (tuple): bounding box maximum\n shape (tuple): output shape\n '''\n size = shape[0] * shape[1] * shape[2]\n\n pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])\n pys = torch.linspace(bb_min[1], bb_max[1], shape[1])\n pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])\n\n pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)\n pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)\n pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)\n p = torch.stack([pxs, pys, pzs], dim=1)\n\n return p" } ]
import numpy as np import trimesh from scipy import ndimage from skimage.measure import block_reduce from .libvoxelize.voxelize import voxelize_mesh_ from .libmesh import check_mesh_contains from .common import make_3d_grid
2,850
v_idx[f1_l_x, f1_l_y, f1_l_z + 1], v_idx[f1_l_x, f1_l_y + 1, f1_l_z + 1], v_idx[f1_l_x, f1_l_y + 1, f1_l_z], ], axis=1) faces_1_r = np.stack([ v_idx[f1_r_x, f1_r_y, f1_r_z], v_idx[f1_r_x, f1_r_y + 1, f1_r_z], v_idx[f1_r_x, f1_r_y + 1, f1_r_z + 1], v_idx[f1_r_x, f1_r_y, f1_r_z + 1], ], axis=1) faces_2_l = np.stack([ v_idx[f2_l_x, f2_l_y, f2_l_z], v_idx[f2_l_x + 1, f2_l_y, f2_l_z], v_idx[f2_l_x + 1, f2_l_y, f2_l_z + 1], v_idx[f2_l_x, f2_l_y, f2_l_z + 1], ], axis=1) faces_2_r = np.stack([ v_idx[f2_r_x, f2_r_y, f2_r_z], v_idx[f2_r_x, f2_r_y, f2_r_z + 1], v_idx[f2_r_x + 1, f2_r_y, f2_r_z + 1], v_idx[f2_r_x + 1, f2_r_y, f2_r_z], ], axis=1) faces_3_l = np.stack([ v_idx[f3_l_x, f3_l_y, f3_l_z], v_idx[f3_l_x, f3_l_y + 1, f3_l_z], v_idx[f3_l_x + 1, f3_l_y + 1, f3_l_z], v_idx[f3_l_x + 1, f3_l_y, f3_l_z], ], axis=1) faces_3_r = np.stack([ v_idx[f3_r_x, f3_r_y, f3_r_z], v_idx[f3_r_x + 1, f3_r_y, f3_r_z], v_idx[f3_r_x + 1, f3_r_y + 1, f3_r_z], v_idx[f3_r_x, f3_r_y + 1, f3_r_z], ], axis=1) faces = np.concatenate([ faces_1_l, faces_1_r, faces_2_l, faces_2_r, faces_3_l, faces_3_r, ], axis=0) vertices = self.loc + self.scale * vertices mesh = trimesh.Trimesh(vertices, faces, process=False) return mesh @property def resolution(self): assert (self.data.shape[0] == self.data.shape[1] == self.data.shape[2]) return self.data.shape[0] def contains(self, points): nx = self.resolution # Rescale bounding box to [-0.5, 0.5]^3 points = (points - self.loc) / self.scale # Discretize points to [0, nx-1]^3 points_i = ((points + 0.5) * nx).astype(np.int32) # i1, i2, i3 have sizes (batch_size, T) i1, i2, i3 = points_i[..., 0], points_i[..., 1], points_i[..., 2] # Only use indices inside bounding box mask = ( (i1 >= 0) & (i2 >= 0) & (i3 >= 0) & (nx > i1) & (nx > i2) & (nx > i3) ) # Prevent out of bounds error i1 = i1[mask] i2 = i2[mask] i3 = i3[mask] # Compute values, default value outside box is 0 occ = np.zeros(points.shape[:-1], dtype=np.bool) occ[mask] = self.data[i1, i2, i3] return occ def voxelize_ray(mesh, resolution): occ_surface = voxelize_surface(mesh, resolution) # TODO: use surface voxels here? occ_interior = voxelize_interior(mesh, resolution) occ = (occ_interior | occ_surface) return occ def voxelize_fill(mesh, resolution): bounds = mesh.bounds if (np.abs(bounds) >= 0.5).any(): raise ValueError('voxelize fill is only supported if mesh is inside [-0.5, 0.5]^3/') occ = voxelize_surface(mesh, resolution) occ = ndimage.morphology.binary_fill_holes(occ) return occ def voxelize_surface(mesh, resolution): vertices = mesh.vertices faces = mesh.faces vertices = (vertices + 0.5) * resolution face_loc = vertices[faces] occ = np.full((resolution,) * 3, 0, dtype=np.int32) face_loc = face_loc.astype(np.float32) voxelize_mesh_(occ, face_loc) occ = (occ != 0) return occ def voxelize_interior(mesh, resolution): shape = (resolution,) * 3 bb_min = (0.5,) * 3 bb_max = (resolution - 0.5,) * 3 # Create points. Add noise to break symmetry
class VoxelGrid: def __init__(self, data, loc=(0., 0., 0.), scale=1): assert (data.shape[0] == data.shape[1] == data.shape[2]) data = np.asarray(data, dtype=np.bool) loc = np.asarray(loc) self.data = data self.loc = loc self.scale = scale @classmethod def from_mesh(cls, mesh, resolution, loc=None, scale=None, method='ray'): bounds = mesh.bounds # Default location is center if loc is None: loc = (bounds[0] + bounds[1]) / 2 # Default scale, scales the mesh to [-0.45, 0.45]^3 if scale is None: scale = (bounds[1] - bounds[0]).max() / 0.9 loc = np.asarray(loc) scale = float(scale) # Transform mesh mesh = mesh.copy() mesh.apply_translation(-loc) mesh.apply_scale(1 / scale) # Apply method if method == 'ray': voxel_data = voxelize_ray(mesh, resolution) elif method == 'fill': voxel_data = voxelize_fill(mesh, resolution) voxels = cls(voxel_data, loc, scale) return voxels def down_sample(self, factor=2): if not (self.resolution % factor) == 0: raise ValueError('Resolution must be divisible by factor.') new_data = block_reduce(self.data, (factor,) * 3, np.max) return VoxelGrid(new_data, self.loc, self.scale) def to_mesh(self): # Shorthand occ = self.data # Shape of voxel grid nx, ny, nz = occ.shape # Shape of corresponding occupancy grid grid_shape = (nx + 1, ny + 1, nz + 1) # Convert values to occupancies occ = np.pad(occ, 1, 'constant') # Determine if face present f1_r = (occ[:-1, 1:-1, 1:-1] & ~occ[1:, 1:-1, 1:-1]) f2_r = (occ[1:-1, :-1, 1:-1] & ~occ[1:-1, 1:, 1:-1]) f3_r = (occ[1:-1, 1:-1, :-1] & ~occ[1:-1, 1:-1, 1:]) f1_l = (~occ[:-1, 1:-1, 1:-1] & occ[1:, 1:-1, 1:-1]) f2_l = (~occ[1:-1, :-1, 1:-1] & occ[1:-1, 1:, 1:-1]) f3_l = (~occ[1:-1, 1:-1, :-1] & occ[1:-1, 1:-1, 1:]) f1 = f1_r | f1_l f2 = f2_r | f2_l f3 = f3_r | f3_l assert (f1.shape == (nx + 1, ny, nz)) assert (f2.shape == (nx, ny + 1, nz)) assert (f3.shape == (nx, ny, nz + 1)) # Determine if vertex present v = np.full(grid_shape, False) v[:, :-1, :-1] |= f1 v[:, :-1, 1:] |= f1 v[:, 1:, :-1] |= f1 v[:, 1:, 1:] |= f1 v[:-1, :, :-1] |= f2 v[:-1, :, 1:] |= f2 v[1:, :, :-1] |= f2 v[1:, :, 1:] |= f2 v[:-1, :-1, :] |= f3 v[:-1, 1:, :] |= f3 v[1:, :-1, :] |= f3 v[1:, 1:, :] |= f3 # Calculate indices for vertices n_vertices = v.sum() v_idx = np.full(grid_shape, -1) v_idx[v] = np.arange(n_vertices) # Vertices v_x, v_y, v_z = np.where(v) v_x = v_x / nx - 0.5 v_y = v_y / ny - 0.5 v_z = v_z / nz - 0.5 vertices = np.stack([v_x, v_y, v_z], axis=1) # Face indices f1_l_x, f1_l_y, f1_l_z = np.where(f1_l) f2_l_x, f2_l_y, f2_l_z = np.where(f2_l) f3_l_x, f3_l_y, f3_l_z = np.where(f3_l) f1_r_x, f1_r_y, f1_r_z = np.where(f1_r) f2_r_x, f2_r_y, f2_r_z = np.where(f2_r) f3_r_x, f3_r_y, f3_r_z = np.where(f3_r) faces_1_l = np.stack([ v_idx[f1_l_x, f1_l_y, f1_l_z], v_idx[f1_l_x, f1_l_y, f1_l_z + 1], v_idx[f1_l_x, f1_l_y + 1, f1_l_z + 1], v_idx[f1_l_x, f1_l_y + 1, f1_l_z], ], axis=1) faces_1_r = np.stack([ v_idx[f1_r_x, f1_r_y, f1_r_z], v_idx[f1_r_x, f1_r_y + 1, f1_r_z], v_idx[f1_r_x, f1_r_y + 1, f1_r_z + 1], v_idx[f1_r_x, f1_r_y, f1_r_z + 1], ], axis=1) faces_2_l = np.stack([ v_idx[f2_l_x, f2_l_y, f2_l_z], v_idx[f2_l_x + 1, f2_l_y, f2_l_z], v_idx[f2_l_x + 1, f2_l_y, f2_l_z + 1], v_idx[f2_l_x, f2_l_y, f2_l_z + 1], ], axis=1) faces_2_r = np.stack([ v_idx[f2_r_x, f2_r_y, f2_r_z], v_idx[f2_r_x, f2_r_y, f2_r_z + 1], v_idx[f2_r_x + 1, f2_r_y, f2_r_z + 1], v_idx[f2_r_x + 1, f2_r_y, f2_r_z], ], axis=1) faces_3_l = np.stack([ v_idx[f3_l_x, f3_l_y, f3_l_z], v_idx[f3_l_x, f3_l_y + 1, f3_l_z], v_idx[f3_l_x + 1, f3_l_y + 1, f3_l_z], v_idx[f3_l_x + 1, f3_l_y, f3_l_z], ], axis=1) faces_3_r = np.stack([ v_idx[f3_r_x, f3_r_y, f3_r_z], v_idx[f3_r_x + 1, f3_r_y, f3_r_z], v_idx[f3_r_x + 1, f3_r_y + 1, f3_r_z], v_idx[f3_r_x, f3_r_y + 1, f3_r_z], ], axis=1) faces = np.concatenate([ faces_1_l, faces_1_r, faces_2_l, faces_2_r, faces_3_l, faces_3_r, ], axis=0) vertices = self.loc + self.scale * vertices mesh = trimesh.Trimesh(vertices, faces, process=False) return mesh @property def resolution(self): assert (self.data.shape[0] == self.data.shape[1] == self.data.shape[2]) return self.data.shape[0] def contains(self, points): nx = self.resolution # Rescale bounding box to [-0.5, 0.5]^3 points = (points - self.loc) / self.scale # Discretize points to [0, nx-1]^3 points_i = ((points + 0.5) * nx).astype(np.int32) # i1, i2, i3 have sizes (batch_size, T) i1, i2, i3 = points_i[..., 0], points_i[..., 1], points_i[..., 2] # Only use indices inside bounding box mask = ( (i1 >= 0) & (i2 >= 0) & (i3 >= 0) & (nx > i1) & (nx > i2) & (nx > i3) ) # Prevent out of bounds error i1 = i1[mask] i2 = i2[mask] i3 = i3[mask] # Compute values, default value outside box is 0 occ = np.zeros(points.shape[:-1], dtype=np.bool) occ[mask] = self.data[i1, i2, i3] return occ def voxelize_ray(mesh, resolution): occ_surface = voxelize_surface(mesh, resolution) # TODO: use surface voxels here? occ_interior = voxelize_interior(mesh, resolution) occ = (occ_interior | occ_surface) return occ def voxelize_fill(mesh, resolution): bounds = mesh.bounds if (np.abs(bounds) >= 0.5).any(): raise ValueError('voxelize fill is only supported if mesh is inside [-0.5, 0.5]^3/') occ = voxelize_surface(mesh, resolution) occ = ndimage.morphology.binary_fill_holes(occ) return occ def voxelize_surface(mesh, resolution): vertices = mesh.vertices faces = mesh.faces vertices = (vertices + 0.5) * resolution face_loc = vertices[faces] occ = np.full((resolution,) * 3, 0, dtype=np.int32) face_loc = face_loc.astype(np.float32) voxelize_mesh_(occ, face_loc) occ = (occ != 0) return occ def voxelize_interior(mesh, resolution): shape = (resolution,) * 3 bb_min = (0.5,) * 3 bb_max = (resolution - 0.5,) * 3 # Create points. Add noise to break symmetry
points = make_3d_grid(bb_min, bb_max, shape=shape).numpy()
1
2023-10-22 03:46:35+00:00
4k
Th3Tr1ckst3r/GReverse
greverse.py
[ { "identifier": "requestData", "path": "utils/imageSearch.py", "snippet": "def requestData(image_input, max_results=10, titles_to_urls=None):\n client = vision_v1.ImageAnnotatorClient()\n if image_input.startswith('http') or image_input.startswith('https'):\n response = requests.get(image_input)\n image = types.Image(content=response.content)\n else:\n with open(image_input, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n combined_results = {\n \"pages_with_matching_images\": [],\n \"full_matching_images\": [],\n \"partial_matching_images\": [],\n \"visually_similar_images\": [],\n \"json\": None\n }\n num_results = 0\n while num_results < max_results:\n batch_max_results = min(max_results - num_results, 10)\n request = vision_v1.AnnotateImageRequest(\n image=image,\n features=[\n vision_v1.Feature(\n type_=vision_v1.Feature.Type.WEB_DETECTION,\n max_results=batch_max_results\n )\n ]\n )\n response = client.annotate_image(request)\n if response.error.message:\n raise Exception(f'Error: {response.error.message}')\n web_detection = response.web_detection\n for page in web_detection.pages_with_matching_images:\n page_title = page.page_title\n url = page.url\n combined_results[\"pages_with_matching_images\"].append({\"url\": url, \"title\": page_title})\n num_results += 1\n for full_matching_image in web_detection.full_matching_images:\n full_matching_image_url = full_matching_image.url\n combined_results[\"full_matching_images\"].append(full_matching_image_url)\n num_results += 1\n for partial_matching_image in web_detection.partial_matching_images:\n partial_matching_image_url = partial_matching_image.url\n combined_results[\"partial_matching_images\"].append({\"url\": partial_matching_image_url})\n num_results += 1\n visually_similar_images = web_detection.visually_similar_images\n combined_results[\"visually_similar_images\"].extend(visually_similar_images)\n if num_results >= max_results:\n combined_results[\"visually_similar_images\"] = combined_results[\"visually_similar_images\"][:max_results]\n break\n json_data = MessageToDict(response._pb)\n if json_data['webDetection']['webEntities']:\n del json_data['webDetection']['webEntities']\n if json_data['webDetection']['bestGuessLabels']:\n del json_data['webDetection']['bestGuessLabels']\n combined_results[\"json\"] = json_data\n return combined_results" }, { "identifier": "requestData", "path": "utils/querySearch.py", "snippet": "def requestData(query, max_limit, searchtype, api_key, cx):\n service = build(\"customsearch\", \"v1\", developerKey=api_key)\n try:\n image_data = {}\n start_index = 1\n if searchtype:\n searchType = None\n else:\n searchType = 'image'\n while len(image_data) < max_limit:\n batch_size = min(10, max_limit - len(image_data))\n results = service.cse().list(\n q=query,\n cx=cx,\n searchType=searchType,\n num=batch_size,\n start=start_index\n ).execute()\n for item in results.get(\"items\", []):\n image_link = item[\"link\"]\n image_title = item.get(\"title\", \"\")\n image_data[image_title] = image_link\n if len(results.get(\"items\", [])) < batch_size:\n break\n start_index += batch_size\n return image_data\n except Exception as e:\n print(f\"Error occurred: {e}\")\n return None" }, { "identifier": "googleCreds", "path": "api_creds/creds.py", "snippet": "" } ]
import sys import argparse from utils.imageSearch import requestData as imageSearch from utils.querySearch import requestData as querySearch from utils.dataUtils import * from api_creds.creds import googleCreds
1,654
""" GReverse - A tool for OSINT(Open Source Intelligence) gathering & facial recognition via Google Custom Search & Google Vision API's. Created by Adrian Tarver(Th3Tr1ckst3r) @ https://github.com/Th3Tr1ckst3r/ //////////////////////////////////////////////////////////////////////////////////////// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. By downloading, copying, installing, or using the software you agree to this license. If you do not agree to this license, do not download, install, copy, or use the software. GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow here: https://raw.githubusercontent.com/Th3Tr1ckst3r/GReverse/main/LICENSE """
""" GReverse - A tool for OSINT(Open Source Intelligence) gathering & facial recognition via Google Custom Search & Google Vision API's. Created by Adrian Tarver(Th3Tr1ckst3r) @ https://github.com/Th3Tr1ckst3r/ //////////////////////////////////////////////////////////////////////////////////////// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. By downloading, copying, installing, or using the software you agree to this license. If you do not agree to this license, do not download, install, copy, or use the software. GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow here: https://raw.githubusercontent.com/Th3Tr1ckst3r/GReverse/main/LICENSE """
from utils.imageSearch import requestData as imageSearch
0
2023-10-20 03:48:16+00:00
4k
yongliang-wu/ExploreCfg
open_flamingo/src/factory.py
[ { "identifier": "Flamingo", "path": "open_flamingo/src/flamingo.py", "snippet": "class Flamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\n cross_attn_every_n_layers: int = 1,\n use_media_placement_augmentation: bool = False,\n ):\n \"\"\"\n Args:\n vision_encoder (nn.Module): HF CLIPModel\n lang_encoder (nn.Module): HF causal language model\n eoc_token_id (int): Token id for <|endofchunk|>\n media_token_id (int): Token id for <image>\n vis_dim (int): Dimension of the visual features.\n Visual features are projected to match this shape along the last dimension.\n cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.\n use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.\n \"\"\"\n super().__init__()\n self.eoc_token_id = eoc_token_id\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.vis_dim = vis_dim\n self.vision_encoder = vision_encoder\n self.perceiver = PerceiverResampler(dim=self.vis_dim)\n self.lang_encoder = lang_encoder\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n use_media_placement_augmentation=self.use_media_placement_augmentation,\n )\n\n def forward(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n labels: torch.Tensor = None,\n use_cached_vision_x: bool = False,\n clear_conditioned_layers: bool = True,\n past_key_values=None,\n use_cache: bool = False,\n ):\n \"\"\"\n Forward pass of Flamingo.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W) with F=1\n lang_x (torch.Tensor): Language input ids\n shape (B, T_txt)\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n labels (torch.Tensor, optional): Labels. Defaults to None.\n clear_conditioned_layers: if True, clear the conditioned layers\n once the foward pass is completed. Set this to false if the\n same set of images will be reused in another subsequent\n forward pass.\n past_key_values: pre-computed values to pass to language model.\n See past_key_values documentation in Hugging Face\n CausalLM models.\n use_cache: whether to use cached key values. See use_cache\n documentation in Hugging Face CausalLM models.\n \"\"\"\n assert (\n vision_x is not None\n ) or use_cached_vision_x, (\n \"Must provide either vision_x or use_cached_vision_x to True.\"\n )\n\n if use_cached_vision_x:\n # Case: use cached; vision_x should be cached and other\n # vision-related inputs should not be provided.\n assert (\n vision_x is None\n ), \"Expect vision_x to be None when use_cached_vision_x is True.\"\n assert self.lang_encoder.is_conditioned()\n\n else:\n # Case: do not use caching (i.e. this is a standard forward pass);\n self._encode_vision_x(vision_x=vision_x)\n\n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask,\n labels=labels,\n past_key_values=past_key_values,\n use_cache=use_cache,\n )\n\n if clear_conditioned_layers:\n self.lang_encoder.clear_conditioned_layers()\n\n return output\n\n def generate(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n num_beams=1,\n max_new_tokens=None,\n temperature=1.0,\n top_k=0,\n top_p=1.0,\n no_repeat_ngram_size=0,\n prefix_allowed_tokens_fn=None,\n length_penalty=1.0,\n num_return_sequences=1,\n do_sample=False,\n early_stopping=False,\n ):\n \"\"\"\n Generate text conditioned on vision and language inputs.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n images in the same chunk are collated along T_img, and frames are collated along F\n currently only F=1 is supported (single-frame videos)\n lang_x (torch.Tensor): Language input\n shape (B, T_txt)\n max_length (int, optional): Maximum length of the output. Defaults to None.\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n num_beams (int, optional): Number of beams. Defaults to 1.\n max_new_tokens (int, optional): Maximum new tokens. Defaults to None.\n temperature (float, optional): Temperature. Defaults to 1.0.\n top_k (int, optional): Top k. Defaults to 0.\n top_p (float, optional): Top p. Defaults to 1.0.\n no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.\n length_penalty (float, optional): Length penalty. Defaults to 1.0.\n num_return_sequences (int, optional): Number of return sequences. Defaults to 1.\n do_sample (bool, optional): Do sample. Defaults to False.\n early_stopping (bool, optional): Early stopping. Defaults to False.\n Returns:\n torch.Tensor: lang_x with generated tokens appended to it\n \"\"\"\n if num_beams > 1:\n vision_x = vision_x.repeat_interleave(num_beams, dim=0)\n\n self._encode_vision_x(vision_x=vision_x)\n\n output = self.lang_encoder.generate(\n lang_x,\n attention_mask=attention_mask,\n eos_token_id=self.eoc_token_id,\n num_beams=num_beams,\n max_new_tokens=max_new_tokens,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n no_repeat_ngram_size=no_repeat_ngram_size,\n length_penalty=length_penalty,\n num_return_sequences=num_return_sequences,\n do_sample=do_sample,\n early_stopping=early_stopping,\n )\n\n self.lang_encoder.clear_conditioned_layers()\n return output\n\n def _encode_vision_x(self, vision_x: torch.Tensor):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)" }, { "identifier": "FlamingoLMMixin", "path": "open_flamingo/src/flamingo_lm.py", "snippet": "class FlamingoLMMixin(nn.Module):\n \"\"\"\n Mixin to add cross-attention layers to a language model.\n \"\"\"\n\n def set_decoder_layers_attr_name(self, decoder_layers_attr_name):\n self.decoder_layers_attr_name = decoder_layers_attr_name\n\n def _get_decoder_layers(self):\n return getattr_recursive(self, self.decoder_layers_attr_name)\n\n def _set_decoder_layers(self, value):\n setattr_recursive(self, self.decoder_layers_attr_name, value)\n\n def init_flamingo(\n self,\n media_token_id,\n vis_hidden_size,\n cross_attn_every_n_layers,\n use_media_placement_augmentation,\n ):\n \"\"\"\n Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.\n \"\"\"\n\n self.gated_cross_attn_layers = nn.ModuleList(\n [\n GatedCrossAttentionBlock(\n dim=self.config.hidden_size, dim_visual=vis_hidden_size\n )\n if (layer_idx + 1) % cross_attn_every_n_layers == 0\n else None\n for layer_idx, _ in enumerate(self._get_decoder_layers())\n ]\n )\n self._set_decoder_layers(\n nn.ModuleList(\n [\n FlamingoLayer(gated_cross_attn_layer, decoder_layer)\n for gated_cross_attn_layer, decoder_layer in zip(\n self.gated_cross_attn_layers, self._get_decoder_layers()\n )\n ]\n )\n )\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.initialized_flamingo = True\n\n def forward(self, *input, **kwargs):\n \"\"\"Condition the Flamingo layers on the media locations before forward()\"\"\"\n if not self.initialized_flamingo:\n raise ValueError(\n \"Flamingo layers are not initialized. Please call `init_flamingo` first.\"\n )\n\n input_ids = kwargs[\"input_ids\"] if \"input_ids\" in kwargs else input[0]\n media_locations = input_ids == self.media_token_id\n attend_previous = (\n (random.random() < 0.5) if self.use_media_placement_augmentation else False\n )\n\n for layer in self.get_decoder().layers:\n layer.condition_media_locations(media_locations)\n layer.condition_attend_previous(attend_previous)\n\n return super().forward(\n *input, **kwargs\n ) # Call the other parent's forward method\n\n def is_conditioned(self) -> bool:\n \"\"\"Check whether all decoder layers are already conditioned.\"\"\"\n return all(l.is_conditioned() for l in self._get_decoder_layers())\n\n def clear_conditioned_layers(self):\n for layer in self._get_decoder_layers():\n layer.condition_vis_x(None)\n layer.condition_media_locations(None)\n layer.condition_attend_previous(None)" }, { "identifier": "extend_instance", "path": "open_flamingo/src/utils.py", "snippet": "def extend_instance(obj, mixin):\n \"\"\"Apply mixins to a class instance after creation\"\"\"\n base_cls = obj.__class__\n base_cls_name = obj.__class__.__name__\n obj.__class__ = type(\n base_cls_name, (mixin, base_cls), {}\n ) # mixin needs to go first for our forward() logic to work" } ]
from transformers import AutoModelForCausalLM, AutoTokenizer from typing import Literal, Optional from .flamingo import Flamingo from .flamingo_lm import FlamingoLMMixin from .utils import extend_instance from open_clip import transformer from torch.nn import functional as F import open_clip import torch
3,579
def LNormforward(self, x: torch.Tensor): #x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps) return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) transformer.LayerNormFp32.forward = LNormforward def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: Optional[str] = None, inference: bool = False, precision: Literal["fp16","fp32"] = "fp32", device: str = "cpu", checkpoint_path: Optional[str] = None, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. inference (bool, optional): whether to use inference mode. Defaults to True. precision (str, optional): precision to use. Defaults to "fp16". device (str, optional): device to use. Defaults to "cuda". checkpoint_path (str, optional): path to flamingo checkpoint. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained, precision=precision, device=device ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) dtype = torch.float16 if precision == "fp16" else torch.float32 lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, torch_dtype=dtype, # DO NOT EVER USE device_map HERE IT WILL CAUSE HORROR ).to(device)
def LNormforward(self, x: torch.Tensor): #x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps) return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) transformer.LayerNormFp32.forward = LNormforward def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: Optional[str] = None, inference: bool = False, precision: Literal["fp16","fp32"] = "fp32", device: str = "cpu", checkpoint_path: Optional[str] = None, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. inference (bool, optional): whether to use inference mode. Defaults to True. precision (str, optional): precision to use. Defaults to "fp16". device (str, optional): device to use. Defaults to "cuda". checkpoint_path (str, optional): path to flamingo checkpoint. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained, precision=precision, device=device ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) dtype = torch.float16 if precision == "fp16" else torch.float32 lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, torch_dtype=dtype, # DO NOT EVER USE device_map HERE IT WILL CAUSE HORROR ).to(device)
extend_instance(lang_encoder, FlamingoLMMixin)
1
2023-10-18 02:38:00+00:00
4k
mimo-x/Code-Review-GPT-Gitlab
app/gitlab_webhook.py
[ { "identifier": "WEBHOOK_VERIFY_TOKEN", "path": "config/config.py", "snippet": "" }, { "identifier": "review_code", "path": "service/chat_review.py", "snippet": "@retry(stop_max_attempt_number=3, wait_fixed=2000)\ndef review_code(project_id, project_commit_id, merge_id, context):\n review_info = \"\"\n index = 0\n for commit_id in project_commit_id:\n index += 1\n url = f'{gitlab_server_url}/api/v4/projects/{project_id}/repository/commits/{commit_id}/diff'\n log.info(f\"开始请求gitlab的{url} ,commit: {commit_id}的diff内容\")\n\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n content = response.json()\n # 开始处理请求的类容\n log.info(f\"开始处理All请求的类容: {content}\")\n review_info += chat_review(index, project_id, commit_id, content, context, \"\")\n\n else:\n log.error(f\"请求gitlab的{url}commit失败,状态码:{response.status_code}\")\n raise Exception(f\"请求gitlab的{url}commit失败,状态码:{response.status_code}\")\n add_comment_to_mr(project_id, merge_id, review_info)" }, { "identifier": "review_code_for_mr", "path": "service/chat_review.py", "snippet": "@retry(stop_max_attempt_number=3, wait_fixed=2000)\ndef review_code_for_mr(project_id, merge_id, gitlab_message):\n # 获取diff分支的修改文件列表\n changes = get_merge_request_changes(project_id, merge_id)\n\n if changes and len(changes) <= maximum_files:\n # Code Review 信息\n review_info = chat_review(\"\", project_id, \"\", changes, \"\", \"\")\n if review_info:\n add_comment_to_mr(project_id, merge_id, review_info)\n send_dingtalk_message_by_sign(\n f\"project_name:{gitlab_message['project']['name']}\\nmr_url:{gitlab_message['object_attributes']['url']}\\nfrom:{gitlab_message['object_attributes']['source_branch']} to:{gitlab_message['object_attributes']['target_branch']} \\n修改文件个数:{len(changes)}\\ncodereview状态:✅\")\n else:\n send_dingtalk_message_by_sign(\n f\"project_name:{gitlab_message['project']['name']}\\nmr_url:{gitlab_message['object_attributes']['url']}\\nfrom:{gitlab_message['object_attributes']['source_branch']} to:{gitlab_message['object_attributes']['target_branch']} \\n修改文件个数:{len(changes)} 存在已经提交mr,所有文件已进行mr \\ncodereview状态:pass✅\")\n\n elif changes and len(changes) > maximum_files:\n send_dingtalk_message_by_sign(\n f\"project_name:{gitlab_message['project']['name']}\\n备注:修改{len(changes)} > 50个文件不进行codereview ⚠️ \\nmr_url:{gitlab_message['object_attributes']['url']}\\nfrom:{gitlab_message['object_attributes']['source_branch']} to:{gitlab_message['object_attributes']['target_branch']}\")\n else:\n send_dingtalk_message_by_sign(\n f\"project_name:{gitlab_message['project']['name']}\\n获取merge_request信息失败❌,project_id:{project_id} | merge_id{merge_id} | mr:{gitlab_message}\")\n log.error(f\"获取merge_request信息失败,project_id:{project_id} | merge_id{merge_id}\")\n raise Exception(f\"获取merge_request信息失败,project_id:{project_id} | merge_id{merge_id}\")" }, { "identifier": "review_code_for_add_commit", "path": "service/chat_review.py", "snippet": "@retry(stop_max_attempt_number=3, wait_fixed=2000)\ndef review_code_for_add_commit(project_id, merge_id, commit_change_files, gitlab_message):\n \"\"\"\n code review for gitlab commit\n :param project_id:\n :param merge_id:\n :param commit_change_files:\n :param gitlab_message:\n :return: \n \"\"\"\n if len(commit_change_files) > 50:\n send_dingtalk_message_by_sign(\n f\"project_name:{gitlab_message['project']['name']}\\n备注:(增量commit)修改文件{len(commit_change_files)}个 > 50个 不进行codereview ⚠️ \\n分支名:{gitlab_message.get('ref')}\")\n\n # 获取diff分支的修改文件列表\n merge_change_files = get_merge_request_changes(project_id, merge_id)\n\n # 根据增量commit 修改文件列表过滤merge request二次修改的文件\n change_files = [file_content for file_content in merge_change_files if\n file_content[\"new_path\"] in commit_change_files]\n\n print(\"😊增量commit 修改文件列表\", change_files)\n if len(change_files) <= 50:\n review_info = chat_review(\"\", project_id, \"\", change_files, \"\", \"\")\n if review_info:\n add_comment_to_mr(project_id, merge_id, review_info)\n send_dingtalk_message_by_sign(\n f\"project_name:{gitlab_message['project']['name']}\\n增量修改文件个数:{len(change_files)}\\ncodereview状态:✅\")\n\n else:\n send_dingtalk_message_by_sign(\n f\"project_name:{gitlab_message['project']['name']}\\n备注:增量commit 修改{len(change_files)} > 50个文件不进行codereview ⚠️ \\n\")" }, { "identifier": "log", "path": "utils/logger.py", "snippet": "CRITICAL = 50\nFATAL = CRITICAL\nERROR = 40\nWARNING = 30\nWARN = WARNING\nINFO = 20\nDEBUG = 10\nNOTSET = 0\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\nROOT_PATH = os.path.join(CURRENT_PATH, os.pardir)\nLOG_PATH = os.path.join(parent_dir, 'logs')\nclass LogHandler(logging.Logger):\n def __init__(self, name, level=INFO, stream=True, file=True):\n def __setFileHandler__(self, level=None):\n def __setStreamHandler__(self, level=None):\n def resetName(self, name):" }, { "identifier": "get_commit_list", "path": "app/gitlab_utils.py", "snippet": "@retry(stop_max_attempt_number=3, wait_fixed=2000)\ndef get_commit_list(merge_request_iid, project_id):\n # Create API URL for the merge request commits\n api_url = f\"{gitlab_server_url}/api/v4/projects/{project_id}/merge_requests/{merge_request_iid}/commits\"\n # Set the private token in the header\n headers = {\"PRIVATE-TOKEN\": gitlab_private_token}\n\n # Make a GET request to the API URL\n response = requests.get(api_url, headers=headers)\n commit_list = []\n # If the response code is 200, the API call was successful\n if response.status_code == 200:\n # Get the commits from the response\n commits = response.json()\n # Iterate through the commits and print the commit ID and message\n for commit in commits:\n print(f\"Commit ID: {commit['id']}, Message: {commit['message']}\")\n # Append the commit ID to the list\n commit_list.append(commit['id'])\n else:\n # Log an error if the API call was unsuccessful\n log.error(f\"Failed to fetch commits. Status code: {response.status_code}\")\n # Return the list of commit IDs\n return commit_list" }, { "identifier": "get_merge_request_id", "path": "app/gitlab_utils.py", "snippet": "@retry(stop_max_attempt_number=3, wait_fixed=2000)\ndef get_merge_request_id(branch_name, project_id):\n \"\"\"\n 根据分支名,获取mr_id\n :param branch_name: 分支名\n :param project_id: 项目id\n :return: 如果分支存在 mr 则返回mrid / 如果不存在mr 则返回 \"\"\n \"\"\"\n # 构建API请求URL\n url = f\"{gitlab_server_url}/api/v4/projects/{project_id}/merge_requests\"\n\n # 发送API请求,检查是否有与分支相关的Merge Request\n params = {\n \"source_branch\": branch_name,\n \"state\": \"opened\" # 可以根据需求选择合适的状态(opened、closed、merged等)\n }\n headers = {\"Private-Token\": gitlab_private_token}\n response = requests.get(url, params=params, headers=headers)\n\n # 解析JSON响应并检查是否有相关的Merge Request\n if response.status_code == 200:\n merge_requests = response.json()\n if len(merge_requests) > 0:\n log.info(f\"分支 '{branch_name}' 存在mr记录.{merge_requests}\")\n return merge_requests[0].get('iid')\n else:\n log.info(f\"分支 '{branch_name}' 没有未关闭的mr.\")\n else:\n log.error(f\"获取分支'{branch_name}' 失败!. Status code: {response.status_code}\")\n return None" }, { "identifier": "get_commit_change_file", "path": "app/gitlab_utils.py", "snippet": "@retry(stop_max_attempt_number=3, wait_fixed=2000)\ndef get_commit_change_file(push_info):\n # 获取提交列表\n commits = push_info['commits']\n add_file = []\n modify_file = []\n # 遍历提交\n for commit in commits:\n added_files = commit.get('added', [])\n modified_files = commit.get('modified', [])\n add_file += added_files\n modify_file += modified_files\n\n return add_file + modify_file" }, { "identifier": "send_dingtalk_message_by_sign", "path": "utils/dingding.py", "snippet": "@message_error_handler\ndef send_dingtalk_message_by_sign(message_text):\n \"\"\"\n 使用签名方式发送消息通知到钉钉群\n\n Args:\n webhook_url (str): 钉钉群聊机器人的Webhook地址\n secret (str): 机器人的安全设置中的密钥\n message_text (str): 消息文本内容\n\n Returns:\n bool: 消息是否发送成功\n \"\"\"\n timestamp = str(round(time.time() * 1000))\n sign = get_sign(timestamp)\n webhookurl = f\"{dingding_bot_webhook}&timestamp={timestamp}&sign={sign}\"\n # 构建请求头\n headers = {\n \"Content-Type\": \"application/json\",\n }\n\n # 构建请求体\n message = {\n \"msgtype\": \"text\",\n \"text\": {\n \"content\": message_text\n },\n \"timestamp\": timestamp,\n \"sign\": sign\n }\n\n # 发送HTTP POST请求\n response = requests.post(\n webhookurl,\n headers=headers,\n data=json.dumps(message)\n )\n\n # 检查响应\n if response.status_code == 200:\n print(\"消息已发送成功。\")\n return True\n else:\n print(\"消息发送失败,HTTP状态码:\", response.status_code)\n return False" } ]
import json import threading from os import abort from flask import Blueprint, request, jsonify from config.config import WEBHOOK_VERIFY_TOKEN from service.chat_review import review_code, review_code_for_mr, review_code_for_add_commit from utils.logger import log from app.gitlab_utils import get_commit_list, get_merge_request_id, get_commit_change_file from utils.dingding import send_dingtalk_message_by_sign
2,757
git = Blueprint('git', __name__) @git.route('/api') def question(): return 'hello world' @git.route('/webhook', methods=['GET', 'POST']) def webhook(): if request.method == 'GET': # 获取gitlab的webhook的token verify_token = request.headers.get('X-Gitlab-Token') # gitlab的webhook的token验证
git = Blueprint('git', __name__) @git.route('/api') def question(): return 'hello world' @git.route('/webhook', methods=['GET', 'POST']) def webhook(): if request.method == 'GET': # 获取gitlab的webhook的token verify_token = request.headers.get('X-Gitlab-Token') # gitlab的webhook的token验证
if verify_token == WEBHOOK_VERIFY_TOKEN:
0
2023-10-19 14:10:10+00:00
4k
vorausrobotik/voraus-ad-dataset
tests/test_normalizing_flow.py
[ { "identifier": "Configuration", "path": "configuration.py", "snippet": "class Configuration(BaseModel):\n \"\"\"Describes the configuration parameters.\"\"\"\n\n seed: int\n epochs: int\n batchsize: int\n n_hidden_layers: int = Field(alias=\"nHiddenLayers\")\n n_coupling_blocks: int = Field(alias=\"nCouplingBlocks\")\n scale: int\n columns: Literal[\"machine\", \"mechanical\", \"electrical\", \"computed\", \"measured\"]\n clamp: float\n pad: bool\n frequency_divider: int = Field(alias=\"frequencyDivider\")\n train_gain: float = Field(alias=\"trainGain\")\n normalize: bool\n kernel_size_1: int = Field(alias=\"kernelSize1\")\n dilation_1: int = Field(alias=\"dilation1\")\n kernel_size_2: int = Field(alias=\"kernelSize2\")\n dilation_2: int = Field(alias=\"dilation2\")\n kernel_size_3: int = Field(alias=\"kernelSize3\")\n dilation_3: int = Field(alias=\"dilation3\")\n milestones: list[int]\n gamma: float\n learning_rate: float = Field(alias=\"learningRate\")" }, { "identifier": "InternalNetwork", "path": "normalizing_flow.py", "snippet": "class InternalNetwork(torch.nn.Module):\r\n \"\"\"Describes the internal network used for the normalizing flow.\"\"\"\r\n\r\n args: Tuple = tuple()\r\n kwargs: Dict[str, Any] = {}\r\n\r\n def __init__( # pylint: disable=too-many-locals\r\n self,\r\n dims_in: int,\r\n dims_out: int,\r\n number_of_time_steps: int,\r\n number_of_signals: int,\r\n n_hidden_layers: int,\r\n scale: int,\r\n kernel_size_1: int,\r\n dilation_1: int,\r\n kernel_size_2: int,\r\n dilation_2: int,\r\n kernel_size_3: int,\r\n dilation_3: int,\r\n ):\r\n \"\"\"Initializes a new internal network.\r\n\r\n Args:\r\n dims_in: The input dimensions.\r\n dims_out: The output dimensions.\r\n number_of_time_steps: The number of timestamps.\r\n number_of_signals: The number of signals.\r\n n_hidden_layers: The number of hidden layers.\r\n scale: The scale of the network.\r\n kernel_size_1: The kernel size of the first convolution.\r\n dilation_1: The dilation of the first convolution.\r\n kernel_size_2: The kernel size of the hidden convolutions.\r\n dilation_2: The dilation of the hidden convolutions.\r\n kernel_size_3: The kernal size of the last convolution.\r\n dilation_3: The dilation of the last convolution.\r\n \"\"\"\r\n super().__init__()\r\n self.dims_in = dims_in\r\n self.dims_out = dims_out\r\n\r\n self.T = number_of_time_steps # pylint: disable=invalid-name\r\n self.dx = number_of_signals # pylint: disable=invalid-name\r\n\r\n hidden_layers = torch.nn.ModuleList()\r\n for _ in range(n_hidden_layers):\r\n hidden_layers.extend(\r\n [\r\n nn.Conv1d(\r\n self.dx * scale,\r\n self.dx * scale,\r\n kernel_size=kernel_size_2,\r\n dilation=dilation_2,\r\n padding=\"same\",\r\n padding_mode=\"replicate\",\r\n ),\r\n nn.ReLU(),\r\n ]\r\n )\r\n\r\n chn_in = self.dx // 2\r\n chn_out = self.dx\r\n\r\n self.layer1 = nn.Sequential(\r\n nn.Conv1d(\r\n chn_in,\r\n self.dx * scale,\r\n kernel_size=kernel_size_1,\r\n dilation=dilation_1,\r\n padding=\"same\",\r\n padding_mode=\"replicate\",\r\n ),\r\n nn.ReLU(),\r\n *hidden_layers,\r\n nn.Conv1d(\r\n self.dx * scale,\r\n chn_out,\r\n kernel_size=kernel_size_3,\r\n dilation=dilation_3,\r\n padding=\"same\",\r\n padding_mode=\"replicate\",\r\n ),\r\n )\r\n\r\n @classmethod\r\n def setup(cls, *args: Any, **kwargs: Any) -> Type[\"InternalNetwork\"]:\r\n \"\"\"This method is used to create a new instance with the given parameters.\r\n\r\n Args:\r\n *args: The arguments for the TS network.\r\n **kwargs: The keyword arguments for the TS network.\r\n\r\n Returns:\r\n A new initialized TS network.\r\n \"\"\"\r\n cls.args = args\r\n cls.kwargs = kwargs\r\n return cls\r\n\r\n @classmethod\r\n def constructor(cls, dims_in: int, dims_out: int) -> \"InternalNetwork\":\r\n \"\"\"The abstract subnet constructor for the FrEYA coupling blocks.\r\n\r\n This method must be overriden by the inheriting class.\r\n\r\n Args:\r\n dims_in: The input dimensions.\r\n dims_out: The output dimensions.\r\n\r\n Returns:\r\n The initialized TS network.\r\n \"\"\"\r\n return cls(dims_in, dims_out, *cls.args, **cls.kwargs)\r\n\r\n def forward(self, x: torch.Tensor) -> Any: # pylint: disable=invalid-name\r\n \"\"\"Forward computation of the internal network.\r\n\r\n Args:\r\n x: The batch.\r\n\r\n Returns:\r\n The latent space.\r\n \"\"\"\r\n outputs = self.layer1(x)\r\n return outputs\r" }, { "identifier": "NormalizingFlow", "path": "normalizing_flow.py", "snippet": "class NormalizingFlow(GraphINN):\r\n \"\"\"Describes the normalizing flow model.\"\"\"\r\n\r\n def __init__(self, input_dimension: Tuple[int, ...], config: Configuration) -> None:\r\n \"\"\"Initializes the normalizing flow model.\r\n\r\n Args:\r\n input_dimension: The input dimensions.\r\n config: The configuration of the model.\r\n \"\"\"\r\n nodes = [InputNode(*input_dimension, name=\"input\")]\r\n\r\n int_network = InternalNetwork.setup(\r\n input_dimension[1],\r\n input_dimension[0],\r\n n_hidden_layers=config.n_hidden_layers,\r\n scale=config.scale,\r\n kernel_size_1=config.kernel_size_1,\r\n dilation_1=config.dilation_1,\r\n kernel_size_2=config.kernel_size_2,\r\n dilation_2=config.dilation_2,\r\n kernel_size_3=config.kernel_size_3,\r\n dilation_3=config.dilation_3,\r\n )\r\n\r\n for cbi in range(config.n_coupling_blocks):\r\n kwargs: Dict[Any, Any] = {}\r\n\r\n nodes.append(\r\n Node(nodes[-1], PermuteRandom, kwargs, name=f\"permute{cbi}\"),\r\n )\r\n nodes.append(\r\n Node(\r\n nodes[-1],\r\n CouplingBlock,\r\n {\r\n \"subnet_constructor\": int_network.constructor,\r\n \"clamp\": config.clamp,\r\n },\r\n name=f\"cb{cbi}\",\r\n )\r\n )\r\n\r\n output_node = OutputNode(nodes[-1], name=\"output\")\r\n nodes.append(output_node)\r\n\r\n super().__init__(nodes)\r" }, { "identifier": "get_loss", "path": "normalizing_flow.py", "snippet": "def get_loss(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculate the loss of a batch.\r\n\r\n Computes the negative log likelihood loss (per dimension) assuming z should be Gaussian.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss of the batch.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n number = numpy.prod(z_space.shape[1:])\r\n return torch.mean(torch.sum(z_space**2, dim=sum_dimension) - jac) / number\r" }, { "identifier": "get_loss_per_sample", "path": "normalizing_flow.py", "snippet": "def get_loss_per_sample(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculates the loss per sample.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss per sample.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n loss = 0.5 * torch.sum(z_space**2, dim=sum_dimension) - jac\r\n return loss\r" } ]
from typing import List from configuration import Configuration from normalizing_flow import InternalNetwork, NormalizingFlow, get_loss, get_loss_per_sample import pytest import torch
2,602
"""Contains tests for the normalizing flow module.""" @pytest.mark.parametrize( ("z_tensor", "jacobian", "expected_loss"), ( ([[0, 1, 2, 3], [2, 3, 4, 5]], [[1, 3], [1, 3]], 8.0), ([[1, 2, 3, 0], [4, 3, 2, 5]], [[1, 3], [1, 3]], 8.0), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [1, 3]], 10.8750), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [3, 1]], 10.8750), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[0, 2], [3, 5]], 10.75), ), ) def test_get_loss(z_tensor: List[float], jacobian: List[float], expected_loss: float) -> None: loss = get_loss(torch.Tensor(z_tensor), torch.Tensor(jacobian)) assert loss.item() == pytest.approx(expected_loss, abs=1e-3) @pytest.mark.parametrize( ("z_tensor", "jacobian", "expected_loss"), ( ([[0, 1, 2, 3], [2, 3, 4, 5]], [[3, 4], [1, 3]], [[4, 23], [6, 24]]), ([[1, 2, 3, 0], [4, 3, 2, 5]], [[1, 3], [1, 3]], [[6, 24], [6, 24]]), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [1, 3]], [[19.5, 22], [19.5, 22]]), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [3, 1]], [[19.5, 22], [17.5, 24]]), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[0, 2], [3, 5]], [[20.5, 23], [17.5, 20]]), ), ) def test_get_loss_per_sample(z_tensor: List[float], jacobian: List[float], expected_loss: List[float]) -> None: loss = get_loss_per_sample(torch.Tensor(z_tensor), torch.Tensor(jacobian)) for sample_i in range(len(z_tensor)): assert loss[sample_i].detach() == pytest.approx(expected_loss[sample_i], abs=1e-3) def test_internal_network() -> None:
"""Contains tests for the normalizing flow module.""" @pytest.mark.parametrize( ("z_tensor", "jacobian", "expected_loss"), ( ([[0, 1, 2, 3], [2, 3, 4, 5]], [[1, 3], [1, 3]], 8.0), ([[1, 2, 3, 0], [4, 3, 2, 5]], [[1, 3], [1, 3]], 8.0), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [1, 3]], 10.8750), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [3, 1]], 10.8750), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[0, 2], [3, 5]], 10.75), ), ) def test_get_loss(z_tensor: List[float], jacobian: List[float], expected_loss: float) -> None: loss = get_loss(torch.Tensor(z_tensor), torch.Tensor(jacobian)) assert loss.item() == pytest.approx(expected_loss, abs=1e-3) @pytest.mark.parametrize( ("z_tensor", "jacobian", "expected_loss"), ( ([[0, 1, 2, 3], [2, 3, 4, 5]], [[3, 4], [1, 3]], [[4, 23], [6, 24]]), ([[1, 2, 3, 0], [4, 3, 2, 5]], [[1, 3], [1, 3]], [[6, 24], [6, 24]]), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [1, 3]], [[19.5, 22], [19.5, 22]]), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[1, 3], [3, 1]], [[19.5, 22], [17.5, 24]]), ([[6, 0, 1, 2], [7, 0, 0, 1]], [[0, 2], [3, 5]], [[20.5, 23], [17.5, 20]]), ), ) def test_get_loss_per_sample(z_tensor: List[float], jacobian: List[float], expected_loss: List[float]) -> None: loss = get_loss_per_sample(torch.Tensor(z_tensor), torch.Tensor(jacobian)) for sample_i in range(len(z_tensor)): assert loss[sample_i].detach() == pytest.approx(expected_loss[sample_i], abs=1e-3) def test_internal_network() -> None:
internal_network_factory = InternalNetwork.setup(
1
2023-10-18 15:09:24+00:00
4k
invictus717/UniDG
domainbed/algorithms.py
[ { "identifier": "networks", "path": "domainbed/networks.py", "snippet": "def remove_batch_norm_from_resnet(model):\n def __init__(self):\n def forward(self, x):\n def __init__(self):\n def forward(self, x):\n def __init__(self, n_inputs, n_outputs, hparams):\n def forward(self, x):\n def __init__(self, input_shape, hparams):\n def forward(self, x):\n def train(self, mode=True):\n def freeze_bn(self):\n def __init__(self, input_shape):\n def forward(self, x):\n def __init__(self, input_shape):\n def forward(self, x):\ndef Featurizer(input_shape, hparams):\ndef Classifier(in_features, out_features, is_nonlinear=False):\ndef get_module(module, name):\ndef build_blocks(model, block_name_dict):\ndef freeze_(model):\n def __init__(self, input_shape, hparams, preserve_readout=False, freeze=None, feat_layers=None):\n def freeze(self, freeze):\n def hook(self, module, input, output):\n def build_feature_hooks(self, feats, block_names):\n def forward(self, x, ret_feats=False):\n def clear_features(self):\n def train(self, mode=True):\n def freeze_bn(self):\ndef URFeaturizer(input_shape, hparams, **kwargs):\nclass Identity(nn.Module):\nclass SqueezeLastTwo(nn.Module):\nclass MLP(nn.Module):\nclass ResNet(torch.nn.Module):\nclass MNIST_CNN(nn.Module):\nclass ContextNet(nn.Module):\nclass URResNet(torch.nn.Module):\nBLOCKNAMES = {\n \"resnet\": {\n \"stem\": [\"conv1\", \"bn1\", \"relu\", \"maxpool\"],\n \"block1\": [\"layer1\"],\n \"block2\": [\"layer2\"],\n \"block3\": [\"layer3\"],\n \"block4\": [\"layer4\"],\n },\n \"clipresnet\": {\n \"stem\": [\"conv1\", \"bn1\", \"conv2\", \"bn2\", \"conv3\", \"bn3\", \"relu\", \"avgpool\"],\n \"block1\": [\"layer1\"],\n \"block2\": [\"layer2\"],\n \"block3\": [\"layer3\"],\n \"block4\": [\"layer4\"],\n },\n \"clipvit\": { # vit-base\n \"stem\": [\"conv1\"],\n \"block1\": [\"transformer.resblocks.0\", \"transformer.resblocks.1\", \"transformer.resblocks.2\"],\n \"block2\": [\"transformer.resblocks.3\", \"transformer.resblocks.4\", \"transformer.resblocks.5\"],\n \"block3\": [\"transformer.resblocks.6\", \"transformer.resblocks.7\", \"transformer.resblocks.8\"],\n \"block4\": [\"transformer.resblocks.9\", \"transformer.resblocks.10\", \"transformer.resblocks.11\"],\n },\n \"regnety\": {\n \"stem\": [\"stem\"],\n \"block1\": [\"trunk_output.block1\"],\n \"block2\": [\"trunk_output.block2\"],\n \"block3\": [\"trunk_output.block3\"],\n \"block4\": [\"trunk_output.block4\"]\n },\n}" }, { "identifier": "random_pairs_of_minibatches", "path": "domainbed/lib/misc.py", "snippet": "def random_pairs_of_minibatches(minibatches):\n perm = torch.randperm(len(minibatches)).tolist()\n pairs = []\n\n for i in range(len(minibatches)):\n j = i + 1 if i < (len(minibatches) - 1) else 0\n\n xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1]\n xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1]\n\n min_n = min(len(xi), len(xj))\n\n pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))\n\n return pairs" }, { "identifier": "get_optimizer", "path": "domainbed/optimizers.py", "snippet": "def get_optimizer(name, params, **kwargs):\n name = name.lower()\n optimizers = {\"adam\": torch.optim.Adam, \"sgd\": torch.optim.SGD, \"adamw\": torch.optim.AdamW}\n optim_cls = optimizers[name]\n\n return optim_cls(params, **kwargs)" }, { "identifier": "URFeaturizer", "path": "domainbed/networks.py", "snippet": "def URFeaturizer(input_shape, hparams, **kwargs):\n \"\"\"Auto-select an appropriate featurizer for the given input shape.\"\"\"\n if input_shape[1:3] == (224, 224):\n return URResNet(input_shape, hparams, **kwargs)\n else:\n raise NotImplementedError(f\"Input shape {input_shape} is not supported\")" }, { "identifier": "misc", "path": "domainbed/lib/misc.py", "snippet": "def make_weights_for_balanced_classes(dataset):\ndef pdb():\ndef seed_hash(*args):\ndef print_separator():\ndef print_row(row, colwidth=10, latex=False):\n def format_val(x):\n def __init__(self, underlying_dataset, keys):\n def __getitem__(self, key):\n def __len__(self):\ndef split_dataset(dataset, n, seed=0):\ndef random_pairs_of_minibatches(minibatches):\ndef accuracy(network, loader, weights, device):\ndef softmax_entropy(x: torch.Tensor) -> torch.Tensor:\ndef accuracy_ent(network, loader, weights, device, adapt=False):\n def __init__(self, fname, mode=\"a\"):\n def write(self, message):\n def flush(self):\nclass _SplitDataset(torch.utils.data.Dataset):\nclass Tee:" }, { "identifier": "Algorithm", "path": "domainbed/algorithms.py", "snippet": "class Algorithm(torch.nn.Module):\n \"\"\"\n A subclass of Algorithm implements a domain generalization algorithm.\n Subclasses should implement the following:\n - update()\n - predict()\n \"\"\"\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n super(Algorithm, self).__init__()\n self.hparams = hparams\n\n def update(self, minibatches, unlabeled=None):\n \"\"\"\n Perform one update step, given a list of (x, y) tuples for all\n environments.\n\n Admits an optional list of unlabeled minibatches from the test domains,\n when task is domain_adaptation.\n \"\"\"\n raise NotImplementedError\n\n def predict(self, x):\n raise NotImplementedError" } ]
import torch import torch.nn as nn import torch.nn.functional as F import torch.autograd as autograd import copy import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from domainbed import networks from domainbed.lib.misc import random_pairs_of_minibatches from domainbed.optimizers import get_optimizer from domainbed.networks import URFeaturizer from domainbed.lib import misc from domainbed.algorithms import Algorithm
1,944
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved ALGORITHMS = [ 'ERM', 'IRM', 'GroupDRO', 'Mixup', 'MLDG', 'CORAL', 'MMD', 'DANN', 'CDANN', 'MTL', 'SagNet', 'ARM', 'VREx', 'RSC', 'SD', 'MIRO' ] def get_algorithm_class(algorithm_name): """Return the algorithm class with the given name.""" if algorithm_name not in globals(): raise NotImplementedError("Algorithm not found: {}".format(algorithm_name)) return globals()[algorithm_name] class Algorithm(torch.nn.Module): """ A subclass of Algorithm implements a domain generalization algorithm. Subclasses should implement the following: - update() - predict() """ def __init__(self, input_shape, num_classes, num_domains, hparams): super(Algorithm, self).__init__() self.hparams = hparams def update(self, minibatches, unlabeled=None): """ Perform one update step, given a list of (x, y) tuples for all environments. Admits an optional list of unlabeled minibatches from the test domains, when task is domain_adaptation. """ raise NotImplementedError def predict(self, x): raise NotImplementedError class ERM(Algorithm): """ Empirical Risk Minimization (ERM) """ def __init__(self, input_shape, num_classes, num_domains, hparams): super(ERM, self).__init__(input_shape, num_classes, num_domains, hparams)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved ALGORITHMS = [ 'ERM', 'IRM', 'GroupDRO', 'Mixup', 'MLDG', 'CORAL', 'MMD', 'DANN', 'CDANN', 'MTL', 'SagNet', 'ARM', 'VREx', 'RSC', 'SD', 'MIRO' ] def get_algorithm_class(algorithm_name): """Return the algorithm class with the given name.""" if algorithm_name not in globals(): raise NotImplementedError("Algorithm not found: {}".format(algorithm_name)) return globals()[algorithm_name] class Algorithm(torch.nn.Module): """ A subclass of Algorithm implements a domain generalization algorithm. Subclasses should implement the following: - update() - predict() """ def __init__(self, input_shape, num_classes, num_domains, hparams): super(Algorithm, self).__init__() self.hparams = hparams def update(self, minibatches, unlabeled=None): """ Perform one update step, given a list of (x, y) tuples for all environments. Admits an optional list of unlabeled minibatches from the test domains, when task is domain_adaptation. """ raise NotImplementedError def predict(self, x): raise NotImplementedError class ERM(Algorithm): """ Empirical Risk Minimization (ERM) """ def __init__(self, input_shape, num_classes, num_domains, hparams): super(ERM, self).__init__(input_shape, num_classes, num_domains, hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
0
2023-10-15 14:26:12+00:00
4k
AI-Application-and-Integration-Lab/DGUA_FAS
util/evaluate.py
[ { "identifier": "AverageMeter", "path": "util/utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "accuracy", "path": "util/utils.py", "snippet": "def accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_EER_states", "path": "util/statistic.py", "snippet": "def get_EER_states(probs, labels, grid_density = 10000):\n thresholds = get_threshold(probs, grid_density)\n min_dist = 1.0\n min_dist_states = []\n FRR_list = []\n FAR_list = []\n for thr in thresholds:\n TN, FN, FP, TP = eval_state(probs, labels, thr)\n if(FN + TP == 0):\n FRR = TPR = 1.0\n FAR = FP / float(FP + TN)\n TNR = TN / float(TN + FP)\n elif(FP + TN == 0):\n TNR = FAR = 1.0\n FRR = FN / float(FN + TP)\n TPR = TP / float(TP + FN)\n else:\n FAR = FP / float(FP + TN)\n FRR = FN / float(FN + TP)\n TNR = TN / float(TN + FP)\n TPR = TP / float(TP + FN)\n dist = math.fabs(FRR - FAR)\n FAR_list.append(FAR)\n FRR_list.append(FRR)\n if dist <= min_dist:\n min_dist = dist\n min_dist_states = [FAR, FRR, thr]\n EER = (min_dist_states[0] + min_dist_states[1]) / 2.0\n thr = min_dist_states[2]\n return EER, thr, FRR_list, FAR_list" }, { "identifier": "get_HTER_at_thr", "path": "util/statistic.py", "snippet": "def get_HTER_at_thr(probs, labels, thr):\n TN, FN, FP, TP = eval_state(probs, labels, thr)\n if (FN + TP == 0):\n FRR = 1.0\n FAR = FP / float(FP + TN)\n elif(FP + TN == 0):\n FAR = 1.0\n FRR = FN / float(FN + TP)\n else:\n FAR = FP / float(FP + TN)\n FRR = FN / float(FN + TP)\n HTER = (FAR + FRR) / 2.0\n return HTER" }, { "identifier": "calculate", "path": "util/statistic.py", "snippet": "def calculate(probs, labels):\n TN, FN, FP, TP = eval_state(probs, labels, 0.5)\n APCER = 1.0 if (FP + TN == 0) else FP / float(FP + TN)\n NPCER = 1.0 if (FN + TP == 0) else FN / float(FN + TP)\n ACER = (APCER + NPCER) / 2.0\n ACC = (TP + TN) / labels.shape[0]\n return APCER, NPCER, ACER, ACC" }, { "identifier": "calculate_threshold", "path": "util/statistic.py", "snippet": "def calculate_threshold(probs, labels, threshold):\n TN, FN, FP, TP = eval_state(probs, labels, threshold)\n ACC = (TP + TN) / labels.shape[0]\n return ACC" } ]
from util.utils import AverageMeter, accuracy from util.statistic import get_EER_states, get_HTER_at_thr, calculate, calculate_threshold from sklearn.metrics import roc_auc_score from torch.autograd import Variable from torch.nn import functional as F import torch import torch.nn as nn import numpy as np
1,771
def eval(valid_dataloader, model): criterion = nn.CrossEntropyLoss() valid_losses = AverageMeter() valid_top1 = AverageMeter() prob_dict = {} label_dict = {} model.eval() output_dict_tmp = {} target_dict_tmp = {} with torch.no_grad(): for iter, (input, target, videoID) in enumerate(valid_dataloader): input = Variable(input).cuda() target = Variable(torch.from_numpy(np.array(target)).long()).cuda() cls_out = model(input) prob = F.softmax(cls_out, dim=1).cpu().data.numpy()[:, 1] label = target.cpu().data.numpy() videoID = videoID.cpu().data.numpy() for i in range(len(prob)): if(videoID[i] in prob_dict.keys()): prob_dict[videoID[i]].append(prob[i]) label_dict[videoID[i]].append(label[i]) output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3)) target_dict_tmp[videoID[i]].append(target[i].view(1)) else: prob_dict[videoID[i]] = [] label_dict[videoID[i]] = [] prob_dict[videoID[i]].append(prob[i]) label_dict[videoID[i]].append(label[i]) output_dict_tmp[videoID[i]] = [] target_dict_tmp[videoID[i]] = [] output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3)) target_dict_tmp[videoID[i]].append(target[i].view(1)) prob_list = [] label_list = [] na = [] for key in prob_dict.keys(): avg_single_video_prob = sum(prob_dict[key]) / len(prob_dict[key]) avg_single_video_label = sum(label_dict[key]) / len(label_dict[key]) prob_list = np.append(prob_list, avg_single_video_prob) label_list = np.append(label_list, avg_single_video_label) # compute loss and acc for every video avg_single_video_output = sum(output_dict_tmp[key]) / len(output_dict_tmp[key]) avg_single_video_target = (sum(target_dict_tmp[key]) / len(target_dict_tmp[key])).long() loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0)) acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,)) # loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0)) # acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,)) valid_losses.update(loss.item()) valid_top1.update(acc_valid[0]) if avg_single_video_label == 2: na += [avg_single_video_prob] label_list = np.where(np.array(label_list) == 1, np.ones_like(label_list), np.zeros_like(label_list)) auc_score = roc_auc_score(label_list, prob_list) cur_EER_valid, threshold, _, _ = get_EER_states(prob_list, label_list)
def eval(valid_dataloader, model): criterion = nn.CrossEntropyLoss() valid_losses = AverageMeter() valid_top1 = AverageMeter() prob_dict = {} label_dict = {} model.eval() output_dict_tmp = {} target_dict_tmp = {} with torch.no_grad(): for iter, (input, target, videoID) in enumerate(valid_dataloader): input = Variable(input).cuda() target = Variable(torch.from_numpy(np.array(target)).long()).cuda() cls_out = model(input) prob = F.softmax(cls_out, dim=1).cpu().data.numpy()[:, 1] label = target.cpu().data.numpy() videoID = videoID.cpu().data.numpy() for i in range(len(prob)): if(videoID[i] in prob_dict.keys()): prob_dict[videoID[i]].append(prob[i]) label_dict[videoID[i]].append(label[i]) output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3)) target_dict_tmp[videoID[i]].append(target[i].view(1)) else: prob_dict[videoID[i]] = [] label_dict[videoID[i]] = [] prob_dict[videoID[i]].append(prob[i]) label_dict[videoID[i]].append(label[i]) output_dict_tmp[videoID[i]] = [] target_dict_tmp[videoID[i]] = [] output_dict_tmp[videoID[i]].append(cls_out[i].view(1, 3)) target_dict_tmp[videoID[i]].append(target[i].view(1)) prob_list = [] label_list = [] na = [] for key in prob_dict.keys(): avg_single_video_prob = sum(prob_dict[key]) / len(prob_dict[key]) avg_single_video_label = sum(label_dict[key]) / len(label_dict[key]) prob_list = np.append(prob_list, avg_single_video_prob) label_list = np.append(label_list, avg_single_video_label) # compute loss and acc for every video avg_single_video_output = sum(output_dict_tmp[key]) / len(output_dict_tmp[key]) avg_single_video_target = (sum(target_dict_tmp[key]) / len(target_dict_tmp[key])).long() loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0)) acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,)) # loss = criterion(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0)) # acc_valid = accuracy(avg_single_video_output, torch.where(avg_single_video_target == 1, 1, 0), topk=(1,)) valid_losses.update(loss.item()) valid_top1.update(acc_valid[0]) if avg_single_video_label == 2: na += [avg_single_video_prob] label_list = np.where(np.array(label_list) == 1, np.ones_like(label_list), np.zeros_like(label_list)) auc_score = roc_auc_score(label_list, prob_list) cur_EER_valid, threshold, _, _ = get_EER_states(prob_list, label_list)
ACC_threshold = calculate_threshold(prob_list, label_list, threshold)
5
2023-10-17 15:35:33+00:00
4k
jianlanluo/SAQ
vqn/vqiql.py
[ { "identifier": "FullyConnectedNetwork", "path": "vqn/model.py", "snippet": "class FullyConnectedNetwork(nn.Module):\n output_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n def __call__(self, input_tensor):\n x = input_tensor\n hidden_sizes = [int(h) for h in self.arch.split('-')]\n for h in hidden_sizes:\n if self.orthogonal_init:\n x = nn.Dense(\n h,\n kernel_init=jax.nn.initializers.orthogonal(jnp.sqrt(2.0)),\n bias_init=jax.nn.initializers.zeros\n )(x)\n else:\n x = nn.Dense(h)(x)\n x = nn.relu(x)\n\n if self.orthogonal_init:\n output = nn.Dense(\n self.output_dim,\n kernel_init=jax.nn.initializers.orthogonal(1e-2),\n bias_init=jax.nn.initializers.zeros\n )(x)\n else:\n output = nn.Dense(\n self.output_dim,\n kernel_init=jax.nn.initializers.variance_scaling(\n 1e-2, 'fan_in', 'uniform'\n ),\n bias_init=jax.nn.initializers.zeros\n )(x)\n return output\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')" }, { "identifier": "StateActionEnsemble", "path": "vqn/model.py", "snippet": "class StateActionEnsemble(nn.Module):\n hidden_dims: Sequence[int]\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n num_qs: int = 2\n output_dims: int = 1\n\n @nn.compact\n def __call__(self, states, actions=None, training: bool = False):\n VmapCritic = nn.vmap(StateActionValue,\n variable_axes={'params': 0},\n split_rngs={'params': True},\n in_axes=None,\n out_axes=0,\n axis_size=self.num_qs)\n qs = VmapCritic(self.hidden_dims,\n activations=self.activations,\n output_dims=self.output_dims)(states,\n training)\n return qs" }, { "identifier": "StateValue", "path": "vqn/model.py", "snippet": "class StateValue(nn.Module):\n hidden_dims: Sequence[int]\n activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu\n\n @nn.compact\n def __call__(self,\n observations: jnp.ndarray,\n training: bool = False) -> jnp.ndarray:\n critic = MLP((*self.hidden_dims, 1),\n activations=self.activations)(observations,\n training=training)\n return jnp.squeeze(critic, -1)" }, { "identifier": "next_rng", "path": "vqn/jax_utils.py", "snippet": "def next_rng(*args, **kwargs):\n global jax_utils_rng\n return jax_utils_rng(*args, **kwargs)" }, { "identifier": "JaxRNG", "path": "vqn/jax_utils.py", "snippet": "class JaxRNG(object):\n \"\"\" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside\n pure function.\n \"\"\"\n\n @classmethod\n def from_seed(cls, seed):\n return cls(jax.random.PRNGKey(seed))\n\n def __init__(self, rng):\n self.rng = rng\n\n def __call__(self, keys=None):\n if keys is None:\n self.rng, split_rng = jax.random.split(self.rng)\n return split_rng\n elif isinstance(keys, int):\n split_rngs = jax.random.split(self.rng, num=keys + 1)\n self.rng = split_rngs[0]\n return tuple(split_rngs[1:])\n else:\n split_rngs = jax.random.split(self.rng, num=len(keys) + 1)\n self.rng = split_rngs[0]\n return {key: val for key, val in zip(keys, split_rngs[1:])}" }, { "identifier": "collect_jax_metrics", "path": "vqn/jax_utils.py", "snippet": "def collect_jax_metrics(metrics, names, prefix=None):\n collected = {}\n for name in names:\n if name in metrics:\n collected[name] = jnp.mean(metrics[name])\n if prefix is not None:\n collected = {\n '{}/{}'.format(prefix, key): value for key, value in collected.items()\n }\n return collected" }, { "identifier": "wrap_function_with_rng", "path": "vqn/jax_utils.py", "snippet": "def wrap_function_with_rng(rng):\n \"\"\" To be used as decorator, automatically bookkeep a RNG for the wrapped function. \"\"\"\n def wrap_function(function):\n def wrapped(*args, **kwargs):\n nonlocal rng\n rng, split_rng = jax.random.split(rng)\n return function(split_rng, *args, **kwargs)\n return wrapped\n return wrap_function" } ]
import copy import collections import distrax import jax import jax.numpy as jnp import numpy as np import optax import flax from typing import Any, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union from functools import partial from gym.utils import seeding from jax import random from flax import linen as nn from flax.core import frozen_dict from flax.core.frozen_dict import FrozenDict from flax.training.train_state import TrainState from tqdm import tqdm from .model import FullyConnectedNetwork, StateActionEnsemble, StateValue from .jax_utils import next_rng, JaxRNG, collect_jax_metrics, wrap_function_with_rng
2,839
def squared_euclidean_distance(a, b, b2=None, precision=None): if b2 is None: b2 = jnp.sum(b.T**2, axis=0, keepdims=True) a2 = jnp.sum(a**2, axis=1, keepdims=True) ab = jnp.matmul(a, b.T, precision=precision) d = a2 - 2 * ab + b2 return d def entropy_loss_fn(affinity, loss_type="softmax", temperature=1.0): """Calculates the entropy loss.""" flat_affinity = affinity.reshape(-1, affinity.shape[-1]) flat_affinity /= temperature probs = jax.nn.softmax(flat_affinity, axis=-1) log_probs = jax.nn.log_softmax(flat_affinity + 1e-5, axis=-1) if loss_type == "softmax": target_probs = probs elif loss_type == "argmax": codes = jnp.argmax(flat_affinity, axis=-1) onehots = jax.nn.one_hot( codes, flat_affinity.shape[-1], dtype=flat_affinity.dtype ) onehots = probs - jax.lax.stop_gradient(probs - onehots) target_probs = onehots else: raise ValueError("Entropy loss {} not supported".format(loss_type)) avg_probs = jnp.mean(target_probs, axis=0) avg_entropy = -jnp.sum(avg_probs * jnp.log(avg_probs + 1e-5)) sample_entropy = -jnp.mean(jnp.sum(target_probs * log_probs, axis=-1)) loss = sample_entropy - avg_entropy return loss class VectorQuantizer(nn.Module): """Basic vector quantizer.""" codebook_size: int commitment_cost: float quantization_cost: float entropy_loss_ratio: float = 0.0 entropy_loss_type: str = "softmax" entropy_temperature: float = 1.0 @nn.compact def __call__(self, x, train=False): codebook = self.param( "codebook", jax.nn.initializers.variance_scaling( scale=1.0, mode="fan_in", distribution="uniform" ), (self.codebook_size, x.shape[-1]), ) codebook = jnp.asarray(codebook, dtype=jnp.float32) distances = jnp.reshape( squared_euclidean_distance(x, codebook), x.shape[:-1] + (self.codebook_size,), ) encoding_indices = jnp.argmin(distances, axis=-1) encodings = jax.nn.one_hot(encoding_indices, self.codebook_size, dtype=jnp.float32) quantized = self.quantize(encodings) result_dict = dict( quantized=quantized, encoding_indices=encoding_indices, ) if train: e_latent_loss = jnp.mean( (jax.lax.stop_gradient(quantized) - x) ** 2 ) * self.commitment_cost q_latent_loss = jnp.mean( (quantized - jax.lax.stop_gradient(x)) ** 2 ) * self.quantization_cost entropy_loss = 0.0 if self.entropy_loss_ratio != 0: entropy_loss = ( entropy_loss_fn( -distances, loss_type=self.entropy_loss_type, temperature=self.entropy_temperature, ) * self.entropy_loss_ratio ) e_latent_loss = jnp.asarray(e_latent_loss, jnp.float32) q_latent_loss = jnp.asarray(q_latent_loss, jnp.float32) entropy_loss = jnp.asarray(entropy_loss, jnp.float32) loss = e_latent_loss + q_latent_loss + entropy_loss result_dict.update(dict( loss=loss, e_latent_loss=e_latent_loss, q_latent_loss=q_latent_loss, entropy_loss=entropy_loss, )) quantized = x + jax.lax.stop_gradient(quantized - x) return quantized, result_dict def quantize(self, z): codebook = jnp.asarray(self.variables["params"]["codebook"], dtype=jnp.float32) return jnp.dot(z, codebook) def get_codebook(self): return jnp.asarray(self.variables["params"]["codebook"], dtype=jnp.float32) def decode_ids(self, ids): codebook = self.variables["params"]["codebook"] return jnp.take(codebook, ids, axis=0) class ActionVQVAE(nn.Module): observation_dim: int action_dim: int embedding_dim: int codebook_size: int commitment_cost: float = 1.0 quantization_cost: float = 1.0 entropy_loss_ratio: float = 0.0 entropy_loss_type: str = "softmax" entropy_temperature: float = 1.0 arch: str = '256-256' action_only_quantization: bool = False reconstruction_loss_type: str = 'l2' def setup(self):
"""Implementations of algorithms for continuous control.""" Batch = collections.namedtuple( 'Batch', ['observations', 'actions', 'rewards', 'masks', 'next_observations']) def default_init(scale: Optional[float] = jnp.sqrt(2)): return nn.initializers.orthogonal(scale) Shape = Sequence[int] Dtype = Any # this could be a real type? InfoDict = Dict[str, float] DataType = Union[np.ndarray, Dict[str, 'DataType']] PRNGKey = Any Params = flax.core.FrozenDict[str, Any] DatasetDict = Dict[str, DataType] def get_iql_policy_from_model(env, checkpoint_data): sampler_policy = IQLSamplerPolicy(checkpoint_data['iql'].actor) return sampler_policy class IQLSamplerPolicy(object): def __init__(self, actor): self.actor=actor rng = jax.random.PRNGKey(24) rng, actor_key, critic_key, value_key = jax.random.split(rng, 4) self.rng = rng def __call__(self, observations, deterministic=False): actions = self.sample_actions(observations) assert jnp.all(jnp.isfinite(actions)) return jax.device_get(actions) def sample_actions(self, observations: np.ndarray, temperature: float = 1.0) -> jnp.ndarray: rng, actions = sample_actions_jit(self.rng, self.actor.apply_fn, self.actor.params, observations) self.rng = rng actions = np.asarray(actions) return np.clip(actions, -1, 1) def split_into_trajectories(observations, actions, rewards, masks, dones_float, next_observations): trajs = [[]] for i in tqdm(range(len(observations))): trajs[-1].append((observations[i], actions[i], rewards[i], masks[i], dones_float[i], next_observations[i])) if dones_float[i] == 1.0 and i + 1 < len(observations): trajs.append([]) return trajs def squared_euclidean_distance(a, b, b2=None, precision=None): if b2 is None: b2 = jnp.sum(b.T**2, axis=0, keepdims=True) a2 = jnp.sum(a**2, axis=1, keepdims=True) ab = jnp.matmul(a, b.T, precision=precision) d = a2 - 2 * ab + b2 return d def entropy_loss_fn(affinity, loss_type="softmax", temperature=1.0): """Calculates the entropy loss.""" flat_affinity = affinity.reshape(-1, affinity.shape[-1]) flat_affinity /= temperature probs = jax.nn.softmax(flat_affinity, axis=-1) log_probs = jax.nn.log_softmax(flat_affinity + 1e-5, axis=-1) if loss_type == "softmax": target_probs = probs elif loss_type == "argmax": codes = jnp.argmax(flat_affinity, axis=-1) onehots = jax.nn.one_hot( codes, flat_affinity.shape[-1], dtype=flat_affinity.dtype ) onehots = probs - jax.lax.stop_gradient(probs - onehots) target_probs = onehots else: raise ValueError("Entropy loss {} not supported".format(loss_type)) avg_probs = jnp.mean(target_probs, axis=0) avg_entropy = -jnp.sum(avg_probs * jnp.log(avg_probs + 1e-5)) sample_entropy = -jnp.mean(jnp.sum(target_probs * log_probs, axis=-1)) loss = sample_entropy - avg_entropy return loss class VectorQuantizer(nn.Module): """Basic vector quantizer.""" codebook_size: int commitment_cost: float quantization_cost: float entropy_loss_ratio: float = 0.0 entropy_loss_type: str = "softmax" entropy_temperature: float = 1.0 @nn.compact def __call__(self, x, train=False): codebook = self.param( "codebook", jax.nn.initializers.variance_scaling( scale=1.0, mode="fan_in", distribution="uniform" ), (self.codebook_size, x.shape[-1]), ) codebook = jnp.asarray(codebook, dtype=jnp.float32) distances = jnp.reshape( squared_euclidean_distance(x, codebook), x.shape[:-1] + (self.codebook_size,), ) encoding_indices = jnp.argmin(distances, axis=-1) encodings = jax.nn.one_hot(encoding_indices, self.codebook_size, dtype=jnp.float32) quantized = self.quantize(encodings) result_dict = dict( quantized=quantized, encoding_indices=encoding_indices, ) if train: e_latent_loss = jnp.mean( (jax.lax.stop_gradient(quantized) - x) ** 2 ) * self.commitment_cost q_latent_loss = jnp.mean( (quantized - jax.lax.stop_gradient(x)) ** 2 ) * self.quantization_cost entropy_loss = 0.0 if self.entropy_loss_ratio != 0: entropy_loss = ( entropy_loss_fn( -distances, loss_type=self.entropy_loss_type, temperature=self.entropy_temperature, ) * self.entropy_loss_ratio ) e_latent_loss = jnp.asarray(e_latent_loss, jnp.float32) q_latent_loss = jnp.asarray(q_latent_loss, jnp.float32) entropy_loss = jnp.asarray(entropy_loss, jnp.float32) loss = e_latent_loss + q_latent_loss + entropy_loss result_dict.update(dict( loss=loss, e_latent_loss=e_latent_loss, q_latent_loss=q_latent_loss, entropy_loss=entropy_loss, )) quantized = x + jax.lax.stop_gradient(quantized - x) return quantized, result_dict def quantize(self, z): codebook = jnp.asarray(self.variables["params"]["codebook"], dtype=jnp.float32) return jnp.dot(z, codebook) def get_codebook(self): return jnp.asarray(self.variables["params"]["codebook"], dtype=jnp.float32) def decode_ids(self, ids): codebook = self.variables["params"]["codebook"] return jnp.take(codebook, ids, axis=0) class ActionVQVAE(nn.Module): observation_dim: int action_dim: int embedding_dim: int codebook_size: int commitment_cost: float = 1.0 quantization_cost: float = 1.0 entropy_loss_ratio: float = 0.0 entropy_loss_type: str = "softmax" entropy_temperature: float = 1.0 arch: str = '256-256' action_only_quantization: bool = False reconstruction_loss_type: str = 'l2' def setup(self):
self.encoder = FullyConnectedNetwork(
0
2023-10-18 06:31:20+00:00
4k
naver-ai/dual-teacher
tools/test.py
[ { "identifier": "multi_gpu_test", "path": "mmseg/apis/test.py", "snippet": "def multi_gpu_test(model,\n data_loader,\n tmpdir=None,\n gpu_collect=False,\n efficient_test=False):\n \"\"\"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (utils.data.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n efficient_test (bool): Whether save the results as local numpy files to\n save CPU memory during evaluation. Default: False.\n\n Returns:\n list: The prediction results.\n \"\"\"\n\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n\n if isinstance(result, list):\n if efficient_test:\n result = [np2tmp(_) for _ in result]\n results.extend(result)\n else:\n if efficient_test:\n result = np2tmp(result)\n results.append(result)\n\n if rank == 0:\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n if gpu_collect:\n results = collect_results_gpu(results, len(dataset))\n else:\n results = collect_results_cpu(results, len(dataset), tmpdir)\n return results" }, { "identifier": "single_gpu_test", "path": "mmseg/apis/test.py", "snippet": "def single_gpu_test(model,\n data_loader,\n show=False,\n out_dir=None,\n efficient_test=False):\n \"\"\"Test with single GPU.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (utils.data.Dataloader): Pytorch data loader.\n show (bool): Whether show results during infernece. Default: False.\n out_dir (str, optional): If specified, the results will be dumped into\n the directory to save output results.\n efficient_test (bool): Whether save the results as local numpy files to\n save CPU memory during evaluation. Default: False.\n\n Returns:\n list: The prediction results.\n \"\"\"\n\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, **data)\n\n if show or out_dir:\n img_tensor = data['img'][0]\n img_metas = data['img_metas'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n\n for img, img_meta in zip(imgs, img_metas):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n ori_h, ori_w = img_meta['ori_shape'][:-1]\n img_show = mmcv.imresize(img_show, (ori_w, ori_h))\n\n if out_dir:\n out_file = osp.join(out_dir, img_meta['ori_filename'])\n else:\n out_file = None\n\n model.module.show_result(\n img_show,\n result,\n palette=dataset.PALETTE,\n show=show,\n out_file=out_file)\n\n if isinstance(result, list):\n if efficient_test:\n result = [np2tmp(_) for _ in result]\n results.extend(result)\n else:\n if efficient_test:\n result = np2tmp(result)\n results.append(result)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results" }, { "identifier": "build_dataloader", "path": "mmseg/datasets/builder.py", "snippet": "def build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=False,\n pin_memory=True,\n dataloader_type='PoolDataLoader',\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int | None): Seed to be used. Default: None.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: False\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n assert dataloader_type in (\n 'DataLoader',\n 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'\n\n if dataloader_type == 'PoolDataLoader':\n dataloader = PoolDataLoader\n elif dataloader_type == 'DataLoader':\n dataloader = DataLoader\n\n data_loader = dataloader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader" }, { "identifier": "build_dataset", "path": "mmseg/datasets/builder.py", "snippet": "def build_dataset(cfg, default_args=None):\n \"\"\"Build datasets.\"\"\"\n from .dataset_wrappers import ConcatDataset, RepeatDataset\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(\n cfg.get('split', None), (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n\n return dataset" }, { "identifier": "build_segmentor", "path": "mmseg/models/builder.py", "snippet": "def build_segmentor(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build segmentor.\"\"\"\n if train_cfg is not None or test_cfg is not None:\n warnings.warn(\n 'train_cfg and test_cfg is deprecated, '\n 'please specify them in model', UserWarning)\n assert cfg.get('train_cfg') is None or train_cfg is None, \\\n 'train_cfg specified in both outer field and model field '\n assert cfg.get('test_cfg') is None or test_cfg is None, \\\n 'test_cfg specified in both outer field and model field '\n return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))" } ]
import argparse import os import mmcv import torch from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import get_dist_info, init_dist, load_checkpoint from mmcv.utils import DictAction from mmseg.apis import multi_gpu_test, single_gpu_test from mmseg.datasets import build_dataloader, build_dataset from mmseg.models import build_segmentor from IPython import embed
3,326
parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format') parser.add_argument( '--format-only', action='store_true', help='Format the output results without perform evaluation. It is' 'useful when you want to format the result to a specific format and ' 'submit it to the test server') parser.add_argument( '--eval', type=str, nargs='+', default='mIoU', help='evaluation metrics, which depends on the dataset, e.g., "mIoU"' ' for generic datasets, and "cityscapes" for Cityscapes') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument( '--show-dir', help='directory where painted images will be saved') parser.add_argument( '--gpu-collect', action='store_true', help='whether to use gpu to collect results.') parser.add_argument( '--tmpdir', help='tmp directory used for collecting results from multiple ' 'workers, available when gpu_collect is not specified') parser.add_argument( '--options', nargs='+', action=DictAction, help='custom options') parser.add_argument( '--eval-options', nargs='+', action=DictAction, help='custom options for evaluation') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if 'None' in args.eval: args.eval = None if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) # set cudnn_benchmark # if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = False if args.aug_test: if cfg.data.test.type == 'CityscapesDataset': # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0 ] cfg.data.test.pipeline[1].flip = True elif cfg.data.test.type == 'ADE20KDataset': # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.75, 0.875, 1.0, 1.125, 1.25 ] cfg.data.test.pipeline[1].flip = True else: # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 ] cfg.data.test.pipeline[1].flip = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') model.CLASSES = checkpoint['meta']['CLASSES'] model.PALETTE = checkpoint['meta']['PALETTE'] efficient_test = True # False if args.eval_options is not None: efficient_test = args.eval_options.get('efficient_test', False) if not distributed: model = MMDataParallel(model, device_ids=[0])
def parse_args(): parser = argparse.ArgumentParser( description='mmseg test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( '--aug-test', action='store_true', help='Use Flip and Multi scale aug') parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format') parser.add_argument( '--format-only', action='store_true', help='Format the output results without perform evaluation. It is' 'useful when you want to format the result to a specific format and ' 'submit it to the test server') parser.add_argument( '--eval', type=str, nargs='+', default='mIoU', help='evaluation metrics, which depends on the dataset, e.g., "mIoU"' ' for generic datasets, and "cityscapes" for Cityscapes') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument( '--show-dir', help='directory where painted images will be saved') parser.add_argument( '--gpu-collect', action='store_true', help='whether to use gpu to collect results.') parser.add_argument( '--tmpdir', help='tmp directory used for collecting results from multiple ' 'workers, available when gpu_collect is not specified') parser.add_argument( '--options', nargs='+', action=DictAction, help='custom options') parser.add_argument( '--eval-options', nargs='+', action=DictAction, help='custom options for evaluation') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def main(): args = parse_args() assert args.out or args.eval or args.format_only or args.show \ or args.show_dir, \ ('Please specify at least one operation (save/eval/format/show the ' 'results / save the results) with the argument "--out", "--eval"' ', "--format-only", "--show" or "--show-dir"') if 'None' in args.eval: args.eval = None if args.eval and args.format_only: raise ValueError('--eval and --format_only cannot be both specified') if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): raise ValueError('The output file must be a pkl file.') cfg = mmcv.Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) # set cudnn_benchmark # if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = False if args.aug_test: if cfg.data.test.type == 'CityscapesDataset': # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0 ] cfg.data.test.pipeline[1].flip = True elif cfg.data.test.type == 'ADE20KDataset': # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.75, 0.875, 1.0, 1.125, 1.25 ] cfg.data.test.pipeline[1].flip = True else: # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 ] cfg.data.test.pipeline[1].flip = True cfg.model.pretrained = None cfg.data.test.test_mode = True # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # build the dataloader # TODO: support multiple images per gpu (only minor changes are needed) dataset = build_dataset(cfg.data.test) data_loader = build_dataloader( dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) # build the model and load checkpoint cfg.model.train_cfg = None model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg')) checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') model.CLASSES = checkpoint['meta']['CLASSES'] model.PALETTE = checkpoint['meta']['PALETTE'] efficient_test = True # False if args.eval_options is not None: efficient_test = args.eval_options.get('efficient_test', False) if not distributed: model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, efficient_test)
1
2023-10-19 04:04:31+00:00
4k
Azure/azure-openai-benchmark
tests/oairequester.py
[ { "identifier": "OAIRequester", "path": "benchmark/oairequester.py", "snippet": "class OAIRequester:\n \"\"\"\n A simple AOAI requester that makes a streaming call and collect corresponding\n statistics.\n :param api_key: Azure OpenAI resource endpoint key.\n :param url: Full deployment URL in the form of https://<resource>.openai.azure.com/openai/deployments/<deployment>/chat/completins?api-version=<api_version>\n :param timeout: Timeout for each request.\n \"\"\"\n def __init__(self, api_key: str, url: str, timeout=None, backoff=False):\n self.api_key = api_key\n self.url = url\n self.timeout = timeout\n self.backoff = backoff\n\n async def call(self, session:aiohttp.ClientSession, body: dict) -> RequestStats:\n \"\"\"\n Makes a single call with body and returns statistics. The function\n forces the request in streaming mode to be able to collect token\n generation latency.\n In case of failure, if the status code is 429 due to throttling, value\n of header retry-after-ms will be honored. Otherwise, request\n will be retried with an exponential backoff.\n Any other non-200 status code will fail immediately.\n\n :param body: json request body.\n :return RequestStats.\n \"\"\"\n stats = RequestStats()\n # operate only in streaming mode so we can collect token stats.\n body[\"stream\"] = True\n try:\n await self._call(session, body, stats)\n except Exception as e:\n stats.last_exception = e\n\n return stats\n\n @backoff.on_exception(backoff.expo,\n aiohttp.ClientError,\n jitter=backoff.full_jitter,\n max_time=MAX_RETRY_SECONDS,\n giveup=_terminal_http_code)\n async def _call(self, session:aiohttp.ClientSession, body: dict, stats: RequestStats):\n headers = {\n \"api-key\": self.api_key,\n \"Content-Type\": \"application/json\",\n TELEMETRY_USER_AGENT_HEADER: USER_AGENT,\n }\n stats.request_start_time = time.time()\n while time.time() - stats.request_start_time < MAX_RETRY_SECONDS:\n stats.calls += 1\n response = await session.post(self.url, headers=headers, json=body)\n stats.response_status_code = response.status\n # capture utilization in all cases, if found\n self._read_utilization(response, stats)\n if response.status != 429:\n break\n if RETRY_AFTER_MS_HEADER in response.headers:\n try:\n retry_after_str = response.headers[RETRY_AFTER_MS_HEADER]\n retry_after_ms = float(retry_after_str)\n logging.debug(f\"retry-after sleeping for {retry_after_ms}ms\")\n await asyncio.sleep(retry_after_ms/1000.0)\n except ValueError as e:\n logging.warning(f\"unable to parse retry-after header value: {UTILIZATION_HEADER}={retry_after_str}: {e}\") \n # fallback to backoff\n break\n else:\n # fallback to backoff\n break\n\n if response.status != 200 and response.status != 429:\n logging.warning(f\"call failed: {REQUEST_ID_HEADER}={response.headers[REQUEST_ID_HEADER]} {response.status}: {response.reason}\")\n if self.backoff:\n response.raise_for_status()\n if response.status == 200:\n await self._handle_response(response, stats)\n \n async def _handle_response(self, response: aiohttp.ClientResponse, stats: RequestStats):\n async with response:\n stats.response_time = time.time()\n async for line in response.content:\n if not line.startswith(b'data:'):\n continue\n if stats.first_token_time is None:\n stats.first_token_time = time.time()\n if stats.generated_tokens is None:\n stats.generated_tokens = 0\n stats.generated_tokens += 1\n stats.response_end_time = time.time()\n\n def _read_utilization(self, response: aiohttp.ClientResponse, stats: RequestStats):\n if UTILIZATION_HEADER in response.headers:\n util_str = response.headers[UTILIZATION_HEADER]\n if len(util_str) == 0:\n logging.warning(f\"got empty utilization header {UTILIZATION_HEADER}\")\n elif util_str[-1] != '%':\n logging.warning(f\"invalid utilization header value: {UTILIZATION_HEADER}={util_str}\")\n else:\n try:\n stats.deployment_utilization = float(util_str[:-1])\n except ValueError as e:\n logging.warning(f\"unable to parse utilization header value: {UTILIZATION_HEADER}={util_str}: {e}\") " }, { "identifier": "UTILIZATION_HEADER", "path": "benchmark/oairequester.py", "snippet": "UTILIZATION_HEADER = \"azure-openai-deployment-utilization\"" }, { "identifier": "RETRY_AFTER_MS_HEADER", "path": "benchmark/oairequester.py", "snippet": "RETRY_AFTER_MS_HEADER = \"retry-after-ms\"" } ]
import unittest import time import httpretty from benchmark.oairequester import OAIRequester, UTILIZATION_HEADER, RETRY_AFTER_MS_HEADER
1,716
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. TEST_URL = "https://testresource.openai.azure.com/openai/deployments/depl/chat/completion?api-version=2023-05-15" class TokenIterator: def __init__(self, delay: float): self.done = False self.delay = delay self.token_lines = b'data: {}\r\nend: {}\r\n' def __iter__(self): return self def __next__(self): if self.done: raise StopIteration time.sleep(self.delay) self.done = True return self.token_lines class TestRequester(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL, body=(l for l in TokenIterator(0.1)), streaming=True, adding_headers={UTILIZATION_HEADER: "11.2%"}) requester = OAIRequester("", TEST_URL) stats = requester.call({}) self.assertEqual(stats.calls, 1) self.assertIsNone(stats.last_exception) self.assertEqual(stats.generated_tokens, 1) self.assertEqual(stats.response_status_code, 200) self.assertAlmostEqual(stats.response_end_time-stats.request_start_time, 0.1, delta=0.02) self.assertAlmostEqual(stats.first_token_time-stats.request_start_time, 0.1, delta=0.02) self.assertEqual(stats.deployment_utilization, 11.2) class TestRequesterTerminal(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL, status=500) requester = OAIRequester("", TEST_URL) stats = requester.call({}) self.assertEqual(stats.calls, 1) self.assertEqual(stats.response_status_code, 500) self.assertIsNotNone(stats.last_exception) class TestRequesterRetryExponential(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL, status=429) requester = OAIRequester("", TEST_URL) stats = requester.call({}) self.assertGreaterEqual(stats.calls, 4) self.assertEqual(stats.response_status_code, 429) self.assertIsNotNone(stats.last_exception) class TestRequesterRetryAfter(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL,
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. TEST_URL = "https://testresource.openai.azure.com/openai/deployments/depl/chat/completion?api-version=2023-05-15" class TokenIterator: def __init__(self, delay: float): self.done = False self.delay = delay self.token_lines = b'data: {}\r\nend: {}\r\n' def __iter__(self): return self def __next__(self): if self.done: raise StopIteration time.sleep(self.delay) self.done = True return self.token_lines class TestRequester(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL, body=(l for l in TokenIterator(0.1)), streaming=True, adding_headers={UTILIZATION_HEADER: "11.2%"}) requester = OAIRequester("", TEST_URL) stats = requester.call({}) self.assertEqual(stats.calls, 1) self.assertIsNone(stats.last_exception) self.assertEqual(stats.generated_tokens, 1) self.assertEqual(stats.response_status_code, 200) self.assertAlmostEqual(stats.response_end_time-stats.request_start_time, 0.1, delta=0.02) self.assertAlmostEqual(stats.first_token_time-stats.request_start_time, 0.1, delta=0.02) self.assertEqual(stats.deployment_utilization, 11.2) class TestRequesterTerminal(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL, status=500) requester = OAIRequester("", TEST_URL) stats = requester.call({}) self.assertEqual(stats.calls, 1) self.assertEqual(stats.response_status_code, 500) self.assertIsNotNone(stats.last_exception) class TestRequesterRetryExponential(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL, status=429) requester = OAIRequester("", TEST_URL) stats = requester.call({}) self.assertGreaterEqual(stats.calls, 4) self.assertEqual(stats.response_status_code, 429) self.assertIsNotNone(stats.last_exception) class TestRequesterRetryAfter(unittest.TestCase): @httpretty.activate(allow_net_connect=False) def test_norate(self): httpretty.register_uri(httpretty.POST, TEST_URL,
adding_headers={RETRY_AFTER_MS_HEADER: 100},
2
2023-10-19 00:52:26+00:00
4k
pytest-visual/pytest-visual
examples/end_to_end/test_main.py
[ { "identifier": "ClockCoordinateDataset", "path": "examples/end_to_end/main.py", "snippet": "def main() -> None:\n def __init__(self, data_dir: Path, normalize: bool = True):\n def __getitem__(self, index: int) -> Tuple[Tensor, \"Time\"]:\n def __len__(self) -> int:\n def __init__(self, data_dir: Path, augment: bool = False):\n def __getitem__(self, index: int) -> Tuple[Tensor, Dict[str, Tensor]]:\n def __len__(self) -> int:\n def __init__(self, hour: int, minute: int):\n def from_coords(coords: Dict[str, Tensor]) -> \"Time\":\n def percent_from_xy(xy: Tensor) -> float:\n def get_coords(self) -> Dict[str, Tensor]:\n def approx_eq(self, other: \"Time\", max_diff_minutes: int = 5) -> bool:\n def __repr__(self) -> str:\n def __eq__(self, other: object) -> bool:\ndef normalize_image(image: Tensor) -> Tensor:\ndef get_label(path: Path) -> Time:\ndef augment(image: Tensor, label: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, Tensor]]:\ndef get_model(pretrained: bool) -> nn.Module:\ndef get_model_head(in_features: int) -> nn.Module:\ndef train(\n model: nn.Module,\n train_loader: DataLoader[Tuple[Tensor, Tensor]],\n val_loader: DataLoader[Tuple[Tensor, Tensor]],\n device: torch.device,\n) -> Tuple[List[float], List[float]]:\ndef train_loop(\n model: nn.Module,\n loader: DataLoader[Tuple[Tensor, Tensor]],\n criterion: nn.MSELoss,\n optimizer: optim.Optimizer,\n device: torch.device,\n epoch: int,\n train: bool = True,\n) -> float:\ndef test(model: nn.Module, loader: DataLoader[Tuple[Tensor, Time]], device: torch.device) -> float:\nclass ClockDataset(Dataset[Tuple[Tensor, \"Time\"]]):\nclass ClockCoordinateDataset(Dataset[Tuple[Tensor, Tensor]]):\nclass Time:" }, { "identifier": "VisualFixture", "path": "visual/interface.py", "snippet": "class VisualFixture:\n def __init__(self):\n \"\"\"\n An object to collect visualization statements.\n \"\"\"\n self.statements: List[Statement] = []\n\n # Core interface\n\n def print(self, text: str) -> None:\n \"\"\"\n Show text within a visualization case.\n\n Parameters:\n - text (str): The text to show.\n \"\"\"\n self.statements.append([\"print\", text])\n\n def show_figure(self, figure: Figure) -> None:\n \"\"\"\n Show a plotly figure within a visualization case.\n\n Parameters:\n - fig (Figure): The figure to show.\n \"\"\"\n self.statements.append([\"show\", str(figure.to_json())])\n\n # Convenience interface\n\n def show_images(\n self,\n images: List[np.ndarray],\n labels: Optional[List[str]] = None,\n max_cols: int = 3,\n height_per_row: float = 300,\n ) -> None:\n \"\"\"\n Convenience method to show a grid of images. Only accepts standardized numpy images.\n\n Parameters:\n - images (List[np.ndarray]): A list of images to show.\n - labels (Optional[List[str]]): A list of labels for each image.\n - max_cols (int): Maximum number of columns in the grid.\n - height_per_row (float): The height of each row in the grid.\n \"\"\"\n assert all(isinstance(image, np.ndarray) for image in images), \"Images must be numpy arrays\"\n assert len(images) > 0, \"At least one image must be specified\"\n\n grid_shape = get_grid_shape(len(images), max_cols)\n total_height = None if height_per_row is None else height_per_row * grid_shape[0]\n figure = create_plot_from_images(images, labels, grid_shape, total_height)\n self.show_figure(figure)\n\n def show_image(\n self,\n image: np.ndarray,\n label: Optional[str] = None,\n height: float = 600,\n ) -> None:\n \"\"\"\n Convenience method to show a single image. Only accepts standardized numpy images.\n\n Parameters:\n - image (np.ndarray): The image to show.\n - label (Optional[str]): A label for the image.\n - height (float): The height of the image.\n \"\"\"\n labels = None if label is None else [label]\n self.show_images([image], labels, max_cols=1, height_per_row=height)\n\n def show_model(\n self,\n model,\n input_size,\n depth: int = 100,\n height: float = 1500,\n ) -> None:\n \"\"\"\n Convenience method to show a PyTorch model. Requires the torchview package.\n\n Parameters:\n - model (torch.nn.Module): The model to show.\n - input_size (Tuple[int, ...]): The input size of the model.\n - depth (int): The maximum depth of the model to show.\n - height (float): The height of the image.\n \"\"\"\n import torchview # isort: skip\n\n plot = torchview.draw_graph(model, input_size=input_size, depth=depth)\n\n # Create temporary file path\n tempfile_path = tempfile.mktemp()\n plot.visual_graph.render(tempfile_path, format=\"png\")\n\n # Read image and show\n image = np.array(Image.open(tempfile_path + \".png\"))\n self.show_image(image, height=height)\n\n # Remove temporary file\n os.remove(tempfile_path)\n os.remove(tempfile_path + \".png\")" }, { "identifier": "fix_seeds", "path": "visual/interface.py", "snippet": "@pytest.fixture\ndef fix_seeds() -> None:\n \"\"\"\n A pytest fixture that fixes the random seeds of random, and optionally numpy, torch and tensorflow.\n \"\"\"\n random.seed(0)\n\n try:\n import numpy as np\n\n np.random.seed(0)\n except ImportError:\n pass\n\n try:\n import torch\n\n torch.manual_seed(0)\n torch.backends.cudnn.deterministic = True # type: ignore\n torch.backends.cudnn.benchmark = False # type: ignore\n except (ImportError, AttributeError):\n pass\n\n try:\n import tensorflow as tf\n\n tf.random.set_seed(0)\n except ImportError:\n pass" }, { "identifier": "standardize", "path": "visual/interface.py", "snippet": "def standardize(\n image: np.ndarray,\n layout: Optional[str] = None,\n mean_denorm: Optional[List[float]] = None,\n std_denorm: Optional[List[float]] = None,\n min_value: float = 0,\n max_value: Optional[float] = None,\n) -> np.ndarray:\n \"\"\"\n - layout (Optional[str]): The shape of the images. If not specified, the shape is\n determined automatically. Supported shapes are \"hwc\", \"chw\", \"hw\", \"1chw\", \"1hwc\".\n - mean_comp (Optional[List[float]]): The mean that was used to normalize the images, which\n is used to denormalize the images. If not specified, the images are not denormalized.\n - std_comp (Optional[List[float]]): The standard deviation that was used to normalize the\n images, which is used to denormalize the images. If not specified, the images are not\n denormalized.\n - min_value (float): The assumed minimum value of the images.\n - max_value (Optional[float]): The assumed maximum value of the images. If not specified,\n the maximum value is 1 for float images and 255 for integer images.\n \"\"\"\n\n # Get layout and max value\n layout = get_layout_from_image(layout, image)\n max_value = get_image_max_value_from_type(max_value, image)\n\n # Denormalize, convert to uint8, and correct layout\n image = correct_layout(image, layout)\n if std_denorm is not None:\n image = image * np.array(std_denorm)\n if mean_denorm is not None:\n image = image + mean_denorm\n image = (image - min_value) / (max_value - min_value) * 255\n image = np.clip(image, 0, 255).astype(np.uint8)\n\n return image" }, { "identifier": "visual", "path": "visual/interface.py", "snippet": "@pytest.fixture\ndef visual(request: FixtureRequest, visual_UI: UI) -> Generator[VisualFixture, None, None]:\n \"\"\"\n A pytest fixture that manages the visualization process during test execution.\n\n Parameters:\n - request (FixtureRequest): The current pytest request.\n - visual_UI (UI): An instance of the UI class for user interaction.\n\n Yields:\n - VisualFixture: An object to collect visualization statements.\n \"\"\"\n run_visualization, yes_all, reset_all = get_visualization_flags(request)\n visualizer = VisualFixture()\n storage_path = get_storage_path(request)\n\n if run_visualization:\n failed_tests1 = request.session.testsfailed\n yield visualizer # Run test\n failed_tests2 = request.session.testsfailed\n\n if failed_tests2 > failed_tests1:\n return # Test failed, so no visualization\n\n statements = visualizer.statements\n\n if not yes_all:\n # Read previous statements\n location = Location(request.node.module.__file__, request.node.name) # type: ignore\n prev_statements = load_statements(storage_path)\n\n # Check if statements have changed, and prompt user if so\n if statements != prev_statements:\n if not visual_UI.prompt_user(location, prev_statements, statements):\n pytest.fail(\"Visualizations were not accepted\")\n\n # No declined changes or --visual-yes-all flag set, so accept changes\n store_statements(storage_path, statements)\n elif reset_all:\n # Reset visualization\n clear_statements(storage_path)\n pytest.skip(\"Resetting visualization case as per --visualize-reset-all\")\n else:\n pytest.skip(\"Visualization is not enabled, add --visual option to enable\")" } ]
from pathlib import Path from typing import List from PIL import Image from torch import Tensor from examples.end_to_end.main import ( ClockCoordinateDataset, ClockDataset, Time, get_label, get_model, get_model_head, mean_norm, std_norm, ) from visual.interface import VisualFixture, fix_seeds, standardize, visual import cv2 import numpy as np import pytest import torchview
2,571
test_data_path = Path("examples/end_to_end/test_data") def test_original_labels(visual: VisualFixture, fix_seeds): dataset = ClockDataset(test_data_path / "train") images, labels = [], [] for image, label in dataset: # Convert to numpy, denormalize, and standardize layout to HWC
test_data_path = Path("examples/end_to_end/test_data") def test_original_labels(visual: VisualFixture, fix_seeds): dataset = ClockDataset(test_data_path / "train") images, labels = [], [] for image, label in dataset: # Convert to numpy, denormalize, and standardize layout to HWC
images.append(standardize(image.numpy(), mean_denorm=mean_norm, std_denorm=std_norm))
0
2023-10-18 07:13:37+00:00
4k
SLDGroup/G-CASCADE
trainer.py
[ { "identifier": "Synapse_dataset", "path": "utils/dataset_synapse.py", "snippet": "class Synapse_dataset(Dataset):\n def __init__(self, base_dir, list_dir, split, nclass=9, transform=None):\n self.transform = transform # using transform in torch!\n self.split = split\n self.sample_list = open(os.path.join(list_dir, self.split+'.txt')).readlines()\n self.data_dir = base_dir\n self.nclass = nclass\n\n def __len__(self):\n return len(self.sample_list)\n\n def __getitem__(self, idx):\n if self.split == \"train\":\n slice_name = self.sample_list[idx].strip('\\n')\n data_path = os.path.join(self.data_dir, slice_name+'.npz')\n data = np.load(data_path)\n image, label = data['image'], data['label']\n #print(image.shape)\n #image = np.reshape(image, (512, 512))\n #image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n \n #label = np.reshape(label, (512, 512))\n \n \n else:\n vol_name = self.sample_list[idx].strip('\\n')\n filepath = self.data_dir + \"/{}.npy.h5\".format(vol_name)\n data = h5py.File(filepath)\n image, label = data['image'][:], data['label'][:]\n #image = np.reshape(image, (image.shape[2], 512, 512))\n #label = np.reshape(label, (label.shape[2], 512, 512))\n #label[label==5]= 0\n #label[label==9]= 0\n #label[label==10]= 0\n #label[label==12]= 0\n #label[label==13]= 0\n #label[label==11]= 5\n\n if self.nclass == 9:\n label[label==5]= 0\n label[label==9]= 0\n label[label==10]= 0\n label[label==12]= 0\n label[label==13]= 0\n label[label==11]= 5\n \n sample = {'image': image, 'label': label}\n if self.transform:\n sample = self.transform(sample)\n sample['case_name'] = self.sample_list[idx].strip('\\n')\n return sample" }, { "identifier": "RandomGenerator", "path": "utils/dataset_synapse.py", "snippet": "class RandomGenerator(object):\n def __init__(self, output_size):\n self.output_size = output_size\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n\n if random.random() > 0.5:\n image, label = random_rot_flip(image, label)\n elif random.random() > 0.5:\n image, label = random_rotate(image, label)\n x, y = image.shape\n if x != self.output_size[0] or y != self.output_size[1]:\n image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=3) # why not 3?\n label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0)\n image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)\n label = torch.from_numpy(label.astype(np.float32))\n sample = {'image': image, 'label': label.long()}\n return sample" }, { "identifier": "powerset", "path": "utils/utils.py", "snippet": "def powerset(seq):\n \"\"\"\n Returns all the subsets of this set. This is a generator.\n \"\"\"\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item" }, { "identifier": "one_hot_encoder", "path": "utils/utils.py", "snippet": "def one_hot_encoder(input_tensor,dataset,n_classes = None):\n tensor_list = []\n if dataset == 'MMWHS': \n dict = [0,205,420,500,550,600,820,850]\n for i in dict:\n temp_prob = input_tensor == i \n tensor_list.append(temp_prob.unsqueeze(1))\n output_tensor = torch.cat(tensor_list, dim=1)\n return output_tensor.float()\n else:\n for i in range(n_classes):\n temp_prob = input_tensor == i \n tensor_list.append(temp_prob.unsqueeze(1))\n output_tensor = torch.cat(tensor_list, dim=1)\n return output_tensor.float() " }, { "identifier": "DiceLoss", "path": "utils/utils.py", "snippet": "class DiceLoss(nn.Module):\n def __init__(self, n_classes):\n super(DiceLoss, self).__init__()\n self.n_classes = n_classes\n\n def _one_hot_encoder(self, input_tensor):\n tensor_list = []\n for i in range(self.n_classes):\n temp_prob = input_tensor == i # * torch.ones_like(input_tensor)\n tensor_list.append(temp_prob.unsqueeze(1))\n output_tensor = torch.cat(tensor_list, dim=1)\n return output_tensor.float()\n\n def _dice_loss(self, score, target):\n target = target.float()\n smooth = 1e-5\n intersect = torch.sum(score * target)\n y_sum = torch.sum(target * target)\n z_sum = torch.sum(score * score)\n loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)\n loss = 1 - loss\n return loss\n\n def forward(self, inputs, target, weight=None, softmax=False):\n if softmax:\n inputs = torch.softmax(inputs, dim=1)\n target = self._one_hot_encoder(target)\n if weight is None:\n weight = [1] * self.n_classes\n #print(inputs)\n assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size())\n class_wise_dice = []\n loss = 0.0\n for i in range(0, self.n_classes):\n dice = self._dice_loss(inputs[:, i], target[:, i])\n class_wise_dice.append(1.0 - dice.item())\n loss += dice * weight[i]\n return loss / self.n_classes" }, { "identifier": "val_single_volume", "path": "utils/utils.py", "snippet": "def val_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1):\n image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy()\n\n if len(image.shape) == 3:\n prediction = np.zeros_like(label)\n for ind in range(image.shape[0]):\n slice = image[ind, :, :]\n x, y = slice.shape[0], slice.shape[1]\n if x != patch_size[0] or y != patch_size[1]:\n slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0\n input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda()\n net.eval()\n with torch.no_grad(): \n P = net(input)\n outputs = 0.0\n for idx in range(len(P)):\n outputs += P[idx]\n out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)\n out = out.cpu().detach().numpy()\n if x != patch_size[0] or y != patch_size[1]:\n pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)\n else:\n pred = out\n prediction[ind] = pred\n else:\n input = torch.from_numpy(image).unsqueeze(\n 0).unsqueeze(0).float().cuda()\n net.eval()\n with torch.no_grad():\n P = net(input)\n outputs = 0.0\n for idx in range(len(P)):\n outputs += P[idx]\n out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0)\n prediction = out.cpu().detach().numpy()\n metric_list = []\n for i in range(1, classes):\n metric_list.append(calculate_dice_percase(prediction == i, label == i))\n return metric_list" } ]
import argparse import logging import os import random import sys import time import numpy as np import torch import torch.nn as nn import torch.optim as optim from tqdm import tqdm from tensorboardX import SummaryWriter from torch.nn.modules.loss import CrossEntropyLoss from torch.utils.data import DataLoader from torchvision import transforms from torch.cuda.amp import GradScaler, autocast from utils.dataset_synapse import Synapse_dataset, RandomGenerator from utils.utils import powerset from utils.utils import one_hot_encoder from utils.utils import DiceLoss from utils.utils import val_single_volume
2,578
def inference(args, model, best_performance): db_test = Synapse_dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir, nclass=args.num_classes) testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1) logging.info("{} test iterations per epoch".format(len(testloader))) model.eval() metric_list = 0.0 for i_batch, sampled_batch in tqdm(enumerate(testloader)): h, w = sampled_batch["image"].size()[2:] image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0] metric_i = val_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size], case=case_name, z_spacing=args.z_spacing) metric_list += np.array(metric_i) metric_list = metric_list / len(db_test) performance = np.mean(metric_list, axis=0) logging.info('Testing performance in val model: mean_dice : %f, best_dice : %f' % (performance, best_performance)) return performance def trainer_synapse(args, model, snapshot_path): logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) logging.info(str(args)) base_lr = args.base_lr num_classes = args.num_classes batch_size = args.batch_size * args.n_gpu db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train", nclass=args.num_classes, transform=transforms.Compose( [RandomGenerator(output_size=[args.img_size, args.img_size])])) print("The length of train set is: {}".format(len(db_train))) def worker_init_fn(worker_id): random.seed(args.seed + worker_id) trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, worker_init_fn=worker_init_fn) if args.n_gpu > 1: model = nn.DataParallel(model) model.train() ce_loss = CrossEntropyLoss()
def inference(args, model, best_performance): db_test = Synapse_dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir, nclass=args.num_classes) testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1) logging.info("{} test iterations per epoch".format(len(testloader))) model.eval() metric_list = 0.0 for i_batch, sampled_batch in tqdm(enumerate(testloader)): h, w = sampled_batch["image"].size()[2:] image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0] metric_i = val_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size], case=case_name, z_spacing=args.z_spacing) metric_list += np.array(metric_i) metric_list = metric_list / len(db_test) performance = np.mean(metric_list, axis=0) logging.info('Testing performance in val model: mean_dice : %f, best_dice : %f' % (performance, best_performance)) return performance def trainer_synapse(args, model, snapshot_path): logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) logging.info(str(args)) base_lr = args.base_lr num_classes = args.num_classes batch_size = args.batch_size * args.n_gpu db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train", nclass=args.num_classes, transform=transforms.Compose( [RandomGenerator(output_size=[args.img_size, args.img_size])])) print("The length of train set is: {}".format(len(db_train))) def worker_init_fn(worker_id): random.seed(args.seed + worker_id) trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, worker_init_fn=worker_init_fn) if args.n_gpu > 1: model = nn.DataParallel(model) model.train() ce_loss = CrossEntropyLoss()
dice_loss = DiceLoss(num_classes)
4
2023-10-24 17:49:10+00:00
4k
StackTipsLab/bloggy
bloggy/views/edit_profile_view.py
[ { "identifier": "settings", "path": "bloggy/settings.py", "snippet": "BASE_DIR = Path(__file__).resolve().parent.parent\nSECRET_KEY = os.getenv(\"SECRET_KEY\", get_random_secret_key())\nDEBUG = os.getenv(\"DEBUG\", \"False\") == \"True\"\nALLOWED_HOSTS = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1, localhost\").split(\",\")\nINTERNAL_IPS = ['127.0.0.1']\nSITE_URL = os.getenv(\"SITE_URL\")\nINSTALLED_APPS = [\n 'bloggy',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # sitemap\n 'django.contrib.sitemaps',\n\n # 'tinymce',\n 'widget_tweaks',\n 'django_summernote',\n 'whitenoise.runserver_nostatic',\n\n 'rest_framework',\n 'bloggy_api',\n 'mail_templated', # Used for templated email https://github.com/artemrizhov/django-mail-templated\n 'storages',\n 'debug_toolbar', # dev only\n\n 'hitcount',\n 'colorfield'\n]\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'bloggy.middleware.slash_middleware.AppendOrRemoveSlashMiddleware', # Remove slash from url\n\n # Cache\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware',\n # Cache\n\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n # Social login\n # 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'bloggy.middleware.redirect.RedirectMiddleware', # new articles mismatch url redirect\n]\nROOT_URLCONF = 'bloggy.urls'\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': os.path.join(BASE_DIR, '/bloggy/templates'),\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'bloggy.context_processors.seo_attrs',\n 'bloggy.context_processors.app_settings',\n\n # Social login\n # 'social_django.context_processors.backends',\n # 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\nWSGI_APPLICATION = 'bloggy.wsgi.application'\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.getenv('DB_NAME'),\n 'USER': os.getenv('DB_USER'),\n 'PASSWORD': os.getenv('DB_PASSWORD'),\n 'HOST': os.getenv('DB_HOST'),\n 'PORT': os.getenv('DB_PORT'),\n 'OPTIONS': {'charset': 'utf8mb4', 'use_unicode': True},\n }\n}\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\nSTATIC_URL = '/static/'\nUSE_SPACES = os.getenv('USE_SPACES') == 'True'\nAWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\nAWS_S3_ENDPOINT_URL = f'https://{os.getenv(\"AWS_S3_ENDPOINT_URL\")}'\n AWS_DEFAULT_ACL = 'public-read'\n AWS_QUERYSTRING_AUTH = False\n AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}\n AWS_LOCATION = 'static'\n STATIC_URL = f'{os.getenv(\"ASSETS_DOMAIN\")}/static/'\n STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n MEDIA_URL = '/media/'\n DEFAULT_FILE_STORAGE = 'bloggy.storage_backends.PublicMediaStorage'\n PRIVATE_MEDIA_LOCATION = 'private'\n PRIVATE_FILE_STORAGE = 'bloggy.storage_backends.PrivateMediaStorage'\n AWS_S3_CUSTOM_DOMAIN = 'media.stacktips.com'\n STATIC_URL = '/static/'\n STATIC_ROOT = os.path.join(BASE_DIR, 'bloggy/static')\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nTINYMCE_DEFAULT_CONFIG = {\n 'plugins': 'code',\n 'toolbar': 'code',\n}\nLOGIN_URL = 'login'\nLOGOUT_URL = 'logout'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/'\nAUTH_USER_MODEL = 'bloggy.User'\nAUTH_USER_DEFAULT_GROUP = 'bloggy-members'\nSUMMERNOTE_THEME = 'bs4'\nSUMMERNOTE_CONFIG = {\n 'iframe': True,\n 'summernote': {\n 'width': '1000',\n 'height': '720',\n 'styleTags': [\n 'p',\n {\n 'title': 'Blockquote',\n 'tag': 'blockquote',\n 'className': 'blockquote',\n 'value': 'blockquote'\n },\n {\n 'title': 'Code Block',\n 'tag': 'pre',\n 'className': 'prettyprint lang-java',\n 'value': 'pre'\n },\n 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'\n ],\n\n 'airMode': False,\n 'toolbar': [\n ['style', ['style']],\n ['font', ['bold', 'underline', 'clear']],\n ['color', ['color']],\n ['para', ['ul', 'ol', 'paragraph']],\n ['table', ['table']],\n ['insert', ['link', 'picture', 'code']],\n ['view', ['fullscreen', 'codeview', 'help']],\n ],\n },\n\n 'codemirror': {\n 'mode': 'htmlmixed',\n 'lineNumbers': 'true',\n 'theme': 'monokai',\n },\n\n 'css': (\n '//cdnjs.cloudflare.com/ajax/libs/codemirror/5.29.0/theme/monokai.min.css',\n ),\n 'attachment_require_authentication': True,\n 'attachment_upload_to': 'uploads/summernote',\n 'attachment_model': 'bloggy.Media',\n 'attachment_absolute_uri': False\n\n}\nMESSAGE_STORAGE = \"django.contrib.messages.storage.cookie.CookieStorage\"\nSITE_TITLE = os.getenv(\"SITE_TITLE\", \"Bloggy\")\nSITE_TAGLINE = os.getenv(\"SITE_TAGLINE\", \"A perfectly crafted blog that developers love.\")\nSITE_DESCRIPTION = os.getenv(\"SITE_DESCRIPTION\")\nSITE_LOGO = os.getenv(\"SITE_LOGO\")\nASSETS_DOMAIN = os.getenv(\"ASSETS_DOMAIN\")\nGOOGLE_RECAPTHCA_SECRET_KEY = os.getenv('GOOGLE_RECAPTHCA_SECRET_KEY')\nGOOGLE_RECAPTHCA_TOKEN_VERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify'\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'bloggy_api.pagination.CustomPaginatedResponse',\n 'PAGE_SIZE': 30,\n\n 'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ]\n}\nCACHE_TTL = 60 * 15\nCACHE_MIDDLEWARE_ALIAS = 'default' # which cache alias to use\nCACHE_MIDDLEWARE_SECONDS = CACHE_TTL # number of seconds to cache a page for (TTL)\nCACHE_MIDDLEWARE_KEY_PREFIX = '' # should be used if the cache is shared across multiple sites that use the same\nENABLE_CACHING = os.getenv(\"ENABLE_CACHING\", \"False\") == \"True\"\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': os.getenv(\"MEMCACHIER_SERVERS\", \"127.0.0.1:11211\"),\n \"OPTIONS\": {\n \"binary\": True,\n # 'username': os.getenv(\"MEMCACHIER_USERNAME\", \"\"),\n # 'password': os.getenv(\"MEMCACHIER_PASSWORD\", \"\"),\n \"behaviors\": {\n \"ketama\": True,\n },\n },\n }\n }\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n }\nHITCOUNT_KEEP_HIT_ACTIVE = {'days': 0}\nHITCOUNT_KEEP_HIT_IN_DATABASE = {'days': 77}\nHITCOUNT_HITS_PER_IP_LIMIT = 0\nSHORTCODES_YOUTUBE_JQUERY = False\nPING_INDEX_NOW_POST_UPDATE = os.getenv(\"PING_INDEX_NOW_POST_UPDATE\", \"True\")\nPING_GOOGLE_POST_UPDATE = os.getenv(\"PING_GOOGLE_POST_UPDATE\", \"True\")\nINDEX_NOW_API_KEY = os.getenv(\"INDEX_NOW_API_KEY\", )\nEMAIL_BACKEND = os.getenv('EMAIL_BACKEND')\nEMAIL_HOST = os.getenv('EMAIL_HOST')\nEMAIL_PORT = os.getenv('EMAIL_PORT')\nEMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')\nEMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', \"True\")\nDEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL')\nEMAIL_FILE_PATH = os.getenv('EMAIL_FILE_PATH', os.path.join(BASE_DIR, 'test-emails'))\nPOST_TYPE_CHOICES = os.getenv('POST_TYPE_CHOICES')\nSHOW_EMTPY_CATEGORIES = os.getenv(\"SHOW_EMTPY_CATEGORIES\", \"False\") == \"True\"\nLOAD_GOOGLE_TAG_MANAGER = os.getenv(\"LOAD_GOOGLE_TAG_MANAGER\", \"False\") == \"True\"\nLOAD_GOOGLE_ADS = os.getenv(\"LOAD_GOOGLE_ADS\", \"False\") == \"True\"\nMY_ADS_TXT_CONTENT = os.getenv('MY_ADS_TXT_CONTENT')\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"propagate\": False,\n },\n },\n}\ndef get_post_types():" }, { "identifier": "EditProfileForm", "path": "bloggy/forms/edit_profile_form.py", "snippet": "class EditProfileForm(forms.ModelForm):\n class Meta:\n model = User\n fields = [\n 'profile_photo',\n 'name',\n 'website',\n 'twitter',\n 'linkedin',\n 'youtube',\n 'github',\n 'bio'\n ]\n\n widgets = {\n 'profile_photo': NonClearableFileInput(attrs={\n 'class': 'form-control-file'\n }),\n 'name': forms.TextInput(attrs={\n 'class': 'form-control',\n 'rows': 5,\n 'placeholder': 'Enter first name'\n }),\n 'bio': forms.Textarea(attrs={\n 'class': 'form-control',\n 'rows': 3,\n 'placeholder': \"About you\"\n }),\n 'website': forms.TextInput(attrs={\n 'class': 'form-control',\n 'rows': 5,\n 'placeholder': 'Your website'\n }),\n 'twitter': forms.TextInput(attrs={\n 'class': 'form-control',\n 'rows': 5,\n 'placeholder': 'Your twitter'\n }),\n 'linkedin': forms.TextInput(attrs={\n 'class': 'form-control',\n 'rows': 5,\n 'placeholder': 'Your linkedin'\n }),\n 'youtube': forms.TextInput(attrs={\n 'class': 'form-control',\n 'rows': 5,\n 'placeholder': 'Your youtube channel link'\n }),\n 'github': forms.TextInput(attrs={\n 'class': 'form-control',\n 'rows': 5,\n 'placeholder': 'Your github'\n }),\n }" }, { "identifier": "User", "path": "bloggy/models.py", "snippet": "" }, { "identifier": "sanitize_url", "path": "bloggy/templatetags/custom_widgets.py", "snippet": "@register.simple_tag\ndef sanitize_url(url):\n if url is None:\n return url\n\n if not re.match('(?:http|https)://', url):\n return f'https://{url}'\n return url" } ]
import os from django.shortcuts import get_object_or_404 from django.template.context_processors import static from django.views.generic import FormView from bloggy import settings from bloggy.forms.edit_profile_form import EditProfileForm from bloggy.models import User from bloggy.templatetags.custom_widgets import sanitize_url
3,185
class EditProfileView(FormView): template_name = "profile/edit_profile.html" model = User
class EditProfileView(FormView): template_name = "profile/edit_profile.html" model = User
form_class = EditProfileForm
1
2023-10-17 14:50:39+00:00
4k
openvinotoolkit/openvino.genai
llm_bench/python/utils/conversion_utils/better_transformer_patch.py
[ { "identifier": "_make_causal_mask", "path": "llm_bench/python/utils/conversion_utils/convert_patch.py", "snippet": "def _make_causal_mask(\n input_ids_shape: torch.Size,\n device: torch.device,\n past_key_values_length: int,\n dtype: torch.dtype = torch.bool,\n) -> torch.BoolTensor:\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n batch_size, target_length = input_ids_shape\n mask = torch.zeros((target_length, target_length + past_key_values_length), dtype=dtype, device=device)\n seq_ids = torch.arange(target_length, device=device)\n\n mask[:, past_key_values_length:] = (\n (seq_ids[:, None] < seq_ids[None, :]) * torch.finfo(dtype).min\n if torch.is_floating_point(mask)\n else seq_ids[:, None] < seq_ids[None, :]\n )\n\n return mask[None, None, :, :].expand(batch_size, 1, target_length, target_length + past_key_values_length)" }, { "identifier": "_expand_mask", "path": "llm_bench/python/utils/conversion_utils/convert_patch.py", "snippet": "def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = mask.size()\n tgt_len = tgt_len if tgt_len is not None else src_len\n\n expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)\n\n inverted_mask = 1.0 - expanded_mask\n\n return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)" } ]
import math import torch from torch import nn from typing import Optional, Tuple, Union from transformers import PretrainedConfig from optimum.bettertransformer.models.attention import ( codegen_wrapped_scaled_dot_product, ) from .convert_patch import _make_causal_mask, _expand_mask from optimum.bettertransformer.models import BetterTransformerManager from optimum.bettertransformer.models.base import BetterTransformerBaseLayer
3,332
self.rotary_emb = RotaryEmbedding( self.rotary_ndims, max_position_embeddings=self.config.max_position_embeddings, base=self.config.rope_theta, ) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view( bsz, q_len, self.num_heads, self.head_dim ).transpose(1, 2) key_states = key_states.view( bsz, q_len, self.num_key_value_heads, self.head_dim ).transpose(1, 2) value_states = value_states.view( bsz, q_len, self.num_key_value_heads, self.head_dim ).transpose(1, 2) query_rot = query_states[..., : self.rotary_ndims] query_pass = query_states[..., self.rotary_ndims :] key_rot = key_states[..., : self.rotary_ndims] key_pass = key_states[..., self.rotary_ndims :] kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb( query_rot, key_rot, cos, sin, position_ids ) # [batch_size, num_heads, seq_len, head_dim] query_states = torch.cat((query_states, query_pass), dim=-1) key_states = torch.cat((key_states, key_pass), dim=-1) if past_key_value is not None: # Reuse k, v, self_attention key_states = torch.cat((past_key_value[0], key_states), dim=2) value_states = torch.cat((past_key_value[1], value_states), dim=2) past_key_value = (key_states, value_states) if use_cache else None # Repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul( query_states, key_states.transpose(2, 3) ) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # Upcast attention to fp32 attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) # Merge heads attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) # Final linear projection attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def _bt_prepare_decoder_attention_mask( self, attention_mask, input_shape, inputs_embeds, past_key_values_length ): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None # We do not care about the attention mask in the batch size = 1 case if attention_mask.size(0) > 1: if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
# -*- coding: utf-8 -*- # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand( batch, num_key_value_heads, n_rep, slen, head_dim ) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def fixed_pos_embedding(x, seq_dim=1, seq_len=None): dim = x.shape[-1] if seq_len is None: seq_len = x.shape[seq_dim] inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim)) sinusoid_inp = ( torch.einsum("i , j -> i j", torch.arange(seq_len, dtype=torch.float), inv_freq) .to(x.device) .float() ) return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp) # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two def rotate_every_two(x): x1 = x[:, :, :, ::2] x2 = x[:, :, :, 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)') # Copied from transformers.models.gptj.modeling_gptj.duplicate_interleave def duplicate_interleave(m): """ A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy. """ dim0 = m.shape[0] m = m.view(-1, 1) # flatten the matrix m = m.repeat(1, 2) # repeat all elements into the 2nd dimension m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy return m # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb def codegen_apply_rotary_pos_emb(x, sincos, offset=0): sin, cos = map( lambda t: duplicate_interleave(t)[None, offset : x.shape[1] + offset, None, :], sincos, ) # einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2) return (x * cos) + (rotate_every_two(x) * sin) class RotaryEmbedding(nn.Module): def __init__( self, dim: int, max_position_embeddings: int, base: int = 10_000, device: Optional[torch.device] = None, ): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / ( self.base ** ( torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim ) ) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype(), ) def _set_cos_sin_cache( self, seq_len: int, device: torch.device, dtype: torch.dtype ): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32) # Don't do einsum, it converts fp32 to fp16 under AMP # freqs = torch.einsum("i,j->ij", t, self.inv_freq) freqs = torch.outer(t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer( "cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False ) self.register_buffer( "sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False ) def forward(self, x: torch.Tensor, seq_len: Optional[int] = None): # x: [batch_size, num_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache( seq_len=seq_len, device=x.device, dtype=torch.get_default_dtype() ) return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), ) def rotate_half(x: torch.Tensor): """Rotates half the hidden dims of the input.""" x1, x2 = torch.chunk(x, 2, dim=-1) return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class StableLMAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear( self.hidden_size, self.num_heads * self.head_dim, bias=False ) self.k_proj = nn.Linear( self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False ) self.v_proj = nn.Linear( self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False ) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self._init_rope() def _init_rope(self): self.rotary_ndims = int(self.head_dim * self.config.rope_pct) self.rotary_emb = RotaryEmbedding( self.rotary_ndims, max_position_embeddings=self.config.max_position_embeddings, base=self.config.rope_theta, ) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view( bsz, q_len, self.num_heads, self.head_dim ).transpose(1, 2) key_states = key_states.view( bsz, q_len, self.num_key_value_heads, self.head_dim ).transpose(1, 2) value_states = value_states.view( bsz, q_len, self.num_key_value_heads, self.head_dim ).transpose(1, 2) query_rot = query_states[..., : self.rotary_ndims] query_pass = query_states[..., self.rotary_ndims :] key_rot = key_states[..., : self.rotary_ndims] key_pass = key_states[..., self.rotary_ndims :] kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb( query_rot, key_rot, cos, sin, position_ids ) # [batch_size, num_heads, seq_len, head_dim] query_states = torch.cat((query_states, query_pass), dim=-1) key_states = torch.cat((key_states, key_pass), dim=-1) if past_key_value is not None: # Reuse k, v, self_attention key_states = torch.cat((past_key_value[0], key_states), dim=2) value_states = torch.cat((past_key_value[1], value_states), dim=2) past_key_value = (key_states, value_states) if use_cache else None # Repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul( query_states, key_states.transpose(2, 3) ) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # Upcast attention to fp32 attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) # Merge heads attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) # Final linear projection attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def _bt_prepare_decoder_attention_mask( self, attention_mask, input_shape, inputs_embeds, past_key_values_length ): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None # We do not care about the attention mask in the batch size = 1 case if attention_mask.size(0) > 1: if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(
1
2023-10-16 13:38:16+00:00
4k
Iniquitatis/sd-webui-temporal
scripts/main.py
[ { "identifier": "get_first_element", "path": "temporal/collection_utils.py", "snippet": "def get_first_element(coll, fallback = None):\n return next(iter(coll)) if coll else fallback" }, { "identifier": "load_text", "path": "temporal/fs.py", "snippet": "def load_text(path, fallback = None):\n if not path.is_file():\n return fallback\n\n with open_utf8(path, \"r\") as file:\n return file.read()" }, { "identifier": "BLEND_MODES", "path": "temporal/image_blending.py", "snippet": "def blend_images(npim, modulator, mode):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\n def D(b):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):\ndef _(b, s):" }, { "identifier": "GENERATION_MODES", "path": "temporal/image_generation.py", "snippet": "def _(p, ext_params):\ndef _(p, ext_params):\ndef _process_image(job_title, p, use_sd = True):\ndef _apply_prompt_styles(p):\ndef _setup_processing(p, ext_params):\ndef _make_image_buffer(p, ext_params):\ndef _apply_relative_params(ext_params, denoising_strength):\ndef _process_iteration(p, ext_params, image_buffer, image, i, frame_index):\ndef _save_processed_image(p, processed, output_dir, file_name = None, archive_mode = False):" }, { "identifier": "PREPROCESSORS", "path": "temporal/image_preprocessing.py", "snippet": "def preprocess_image(im, ext_params, seed):\n def __init__(self, type, key, name, **kwargs):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\n def stretch_array(arr, new_length):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _(npim, seed, params):\ndef _apply_mask(npim, processed, amount, blend_mode, mask, normalized, inverted, blurring, reference):\nclass UIParam:" }, { "identifier": "EXTENSION_DIR", "path": "temporal/interop.py", "snippet": "EXTENSION_DIR = Path(scripts.basedir())" }, { "identifier": "Metrics", "path": "temporal/metrics.py", "snippet": "class Metrics:\n def __init__(self):\n self.luminance_mean = []\n self.luminance_std = []\n self.color_level_mean = []\n self.color_level_std = []\n self.noise_sigma = []\n\n def measure(self, im):\n npim = skimage.img_as_float(im)\n grayscale = skimage.color.rgb2gray(npim[..., :3], channel_axis = 2)\n red, green, blue = npim[..., 0], npim[..., 1], npim[..., 2]\n\n self.luminance_mean.append(np.mean(grayscale))\n self.luminance_std.append(np.std(grayscale))\n self.color_level_mean.append([np.mean(red), np.mean(green), np.mean(blue)])\n self.color_level_std.append([np.std(red), np.std(green), np.std(blue)])\n self.noise_sigma.append(skimage.restoration.estimate_sigma(npim, average_sigmas = True, channel_axis = 2))\n\n def load(self, project_dir):\n metrics_dir = project_dir / \"metrics\"\n\n if data := load_json(metrics_dir / \"data.json\"):\n load_object(self, data, metrics_dir)\n\n def save(self, project_dir):\n metrics_dir = ensure_directory_exists(project_dir / \"metrics\")\n\n save_json(metrics_dir / \"data.json\", save_object(self, metrics_dir))\n\n def plot(self, project_dir, save_images = False):\n metrics_dir = ensure_directory_exists(project_dir / \"metrics\")\n\n result = []\n\n @contextmanager\n def figure(title, path):\n plt.title(title)\n plt.xlabel(\"Frame\")\n plt.ylabel(\"Level\")\n plt.grid()\n\n try:\n yield\n finally:\n plt.legend()\n\n buffer = BytesIO()\n plt.savefig(buffer, format = \"png\")\n buffer.seek(0)\n\n im = Image.open(buffer)\n im.load()\n\n if save_images:\n save_image(im, path)\n\n result.append(im)\n\n plt.close()\n\n def plot_noise_graph(data, label, color):\n plt.axhline(data[0], color = color, linestyle = \":\", linewidth = 0.5)\n plt.axhline(np.mean(data), color = color, linestyle = \"--\", linewidth = 1.0)\n plt.plot(data, color = color, label = label, linestyle = \"--\", linewidth = 0.5, marker = \"+\", markersize = 3)\n\n if data.size > 3:\n plt.plot(scipy.signal.savgol_filter(data, min(data.size, 51), 3), color = color, label = f\"{label} (smoothed)\", linestyle = \"-\")\n\n with figure(\"Luminance mean\", metrics_dir / \"luminance_mean.png\"):\n plot_noise_graph(np.array(self.luminance_mean), \"Luminance\", \"gray\")\n\n with figure(\"Luminance standard deviation\", metrics_dir / \"luminance_std.png\"):\n plot_noise_graph(np.array(self.luminance_std), \"Luminance\", \"gray\")\n\n with figure(\"Color level mean\", metrics_dir / \"color_level_mean.png\"):\n plot_noise_graph(np.array(self.color_level_mean)[..., 0], \"Red\", \"darkred\")\n plot_noise_graph(np.array(self.color_level_mean)[..., 1], \"Green\", \"darkgreen\")\n plot_noise_graph(np.array(self.color_level_mean)[..., 2], \"Blue\", \"darkblue\")\n\n with figure(\"Color level standard deviation\", metrics_dir / \"color_level_std.png\"):\n plot_noise_graph(np.array(self.color_level_std)[..., 0], \"Red\", \"darkred\")\n plot_noise_graph(np.array(self.color_level_std)[..., 1], \"Green\", \"darkgreen\")\n plot_noise_graph(np.array(self.color_level_std)[..., 2], \"Blue\", \"darkblue\")\n\n with figure(\"Noise sigma\", metrics_dir / \"noise_sigma.png\"):\n plot_noise_graph(np.array(self.noise_sigma), \"Noise sigma\", \"royalblue\")\n\n return result" }, { "identifier": "delete_preset", "path": "temporal/presets.py", "snippet": "PRESETS_DIR = EXTENSION_DIR / \"presets\"\ndef refresh_presets():\ndef load_preset(name, ext_params):\ndef save_preset(name, ext_params):\ndef delete_preset(name):" }, { "identifier": "saved_ext_param_ids", "path": "temporal/session.py", "snippet": "def get_last_frame_index(frame_dir):\n def get_index(path):\ndef load_last_frame(frame_dir):\ndef load_session(p, ext_params, project_dir):\ndef save_session(p, ext_params, project_dir):" }, { "identifier": "match_mask", "path": "temporal/string_utils.py", "snippet": "def match_mask(string, mask):\n return bool(re.match(r\"\\b\" + mask.replace(\"*\", r\".+?\") + r\"\\b\", string))" }, { "identifier": "wait_until", "path": "temporal/time_utils.py", "snippet": "def wait_until(func, interval = 1, time_limit = None):\n total_time = 0\n\n while (not func()) and (time_limit is None or total_time < time_limit):\n sleep(interval)\n\n total_time += interval" }, { "identifier": "FILTERS", "path": "temporal/video_filtering.py", "snippet": "def build_filter(ext_params):\n def __init__(self, type, key, name, **kwargs):\ndef _(fps, params):\ndef _(fps, params):\ndef _(fps, params):\ndef _(fps, params):\ndef _(fps, params):\ndef _(fps, params):\ndef _(fps, params):\ndef _(fps, params):\nclass UIParam:" }, { "identifier": "start_video_render", "path": "temporal/video_rendering.py", "snippet": "def start_video_render(ext_params, is_final):\ndef render_video(ext_params, is_final):" } ]
from pathlib import Path from types import SimpleNamespace from modules import scripts from modules.sd_samplers import visible_sampler_names from modules.ui_components import InputAccordion, ToolButton from temporal.collection_utils import get_first_element from temporal.fs import load_text from temporal.image_blending import BLEND_MODES from temporal.image_generation import GENERATION_MODES from temporal.image_preprocessing import PREPROCESSORS from temporal.interop import EXTENSION_DIR from temporal.metrics import Metrics from temporal.presets import delete_preset, load_preset, preset_names, refresh_presets, save_preset from temporal.session import saved_ext_param_ids from temporal.string_utils import match_mask from temporal.time_utils import wait_until from temporal.video_filtering import FILTERS from temporal.video_rendering import start_video_render, video_render_queue import gradio as gr
2,778
class UI: def __init__(self, id_formatter): self._id_formatter = id_formatter self._elems = {} self._ids = [] self._groups = {} self._callbacks = {} self._existing_labels = set() def parse_ids(self, ids): result = [] for id in ids: if id.startswith("group:"): _, group = id.split(":") result.extend(x for x in self._ids if self.is_in_group(x, group)) else: result.extend(x for x in self._ids if match_mask(x, id)) return result def is_in_group(self, id, group): return any(match_mask(x, group) for x in self._groups[id]) def elem(self, id, constructor, *args, groups = [], **kwargs): def unique_label(string): if string in self._existing_labels: string = unique_label(string + " ") self._existing_labels.add(string) return string if "label" in kwargs: kwargs["label"] = unique_label(kwargs["label"]) elem = constructor(*args, elem_id = self._id_formatter(id), **kwargs) if id: self._ids.append(id) self._elems[id] = elem self._groups[id] = ["all"] + groups self._callbacks[id] = [] return elem def callback(self, id, event, func, inputs, outputs): self._callbacks[id].append((event, func, inputs, outputs)) def finalize(self, ids): for id, callbacks in self._callbacks.items(): for event, func, inputs, outputs in callbacks: event_func = getattr(self._elems[id], event) event_func( func, inputs = [self._elems[x] for x in self.parse_ids(inputs)], outputs = [self._elems[x] for x in self.parse_ids(outputs)], ) result = [self._elems[x] for x in self.parse_ids(ids)] self._id_formatter = None self._elems.clear() self._existing_labels.clear() return result def unpack_values(self, ids, *args): return SimpleNamespace(**{name: arg for name, arg in zip(self.parse_ids(ids), args)}) class TemporalScript(scripts.Script): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) refresh_presets() def title(self): return "Temporal" def show(self, is_img2img): return is_img2img def ui(self, is_img2img): self._ui = ui = UI(self.elem_id) with ui.elem("", gr.Row): def refresh_presets_callback(): refresh_presets() return gr.update(choices = preset_names) def load_preset_callback(preset, *args): ext_params = ui.unpack_values(["group:params"], *args) load_preset(preset, ext_params) return [gr.update(value = v) for v in vars(ext_params).values()] def save_preset_callback(preset, *args): ext_params = ui.unpack_values(["group:params"], *args)
class UI: def __init__(self, id_formatter): self._id_formatter = id_formatter self._elems = {} self._ids = [] self._groups = {} self._callbacks = {} self._existing_labels = set() def parse_ids(self, ids): result = [] for id in ids: if id.startswith("group:"): _, group = id.split(":") result.extend(x for x in self._ids if self.is_in_group(x, group)) else: result.extend(x for x in self._ids if match_mask(x, id)) return result def is_in_group(self, id, group): return any(match_mask(x, group) for x in self._groups[id]) def elem(self, id, constructor, *args, groups = [], **kwargs): def unique_label(string): if string in self._existing_labels: string = unique_label(string + " ") self._existing_labels.add(string) return string if "label" in kwargs: kwargs["label"] = unique_label(kwargs["label"]) elem = constructor(*args, elem_id = self._id_formatter(id), **kwargs) if id: self._ids.append(id) self._elems[id] = elem self._groups[id] = ["all"] + groups self._callbacks[id] = [] return elem def callback(self, id, event, func, inputs, outputs): self._callbacks[id].append((event, func, inputs, outputs)) def finalize(self, ids): for id, callbacks in self._callbacks.items(): for event, func, inputs, outputs in callbacks: event_func = getattr(self._elems[id], event) event_func( func, inputs = [self._elems[x] for x in self.parse_ids(inputs)], outputs = [self._elems[x] for x in self.parse_ids(outputs)], ) result = [self._elems[x] for x in self.parse_ids(ids)] self._id_formatter = None self._elems.clear() self._existing_labels.clear() return result def unpack_values(self, ids, *args): return SimpleNamespace(**{name: arg for name, arg in zip(self.parse_ids(ids), args)}) class TemporalScript(scripts.Script): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) refresh_presets() def title(self): return "Temporal" def show(self, is_img2img): return is_img2img def ui(self, is_img2img): self._ui = ui = UI(self.elem_id) with ui.elem("", gr.Row): def refresh_presets_callback(): refresh_presets() return gr.update(choices = preset_names) def load_preset_callback(preset, *args): ext_params = ui.unpack_values(["group:params"], *args) load_preset(preset, ext_params) return [gr.update(value = v) for v in vars(ext_params).values()] def save_preset_callback(preset, *args): ext_params = ui.unpack_values(["group:params"], *args)
save_preset(preset, ext_params)
7
2023-10-15 18:49:12+00:00
4k
zabbix/python-zabbix-utils
zabbix_utils/api.py
[ { "identifier": "ModuleUtils", "path": "zabbix_utils/common.py", "snippet": "class ModuleUtils():\n\n # Hidding mask for sensitive data\n HIDING_MASK = \"*\" * 8\n\n # The main php-file of Zabbix API\n JSONRPC_FILE = 'api_jsonrpc.php'\n\n # Methods working without auth token\n UNAUTH_METHODS = ('apiinfo.version', 'user.login', 'user.checkAuthentication')\n\n # Methods returning files contents\n FILES_METHODS = ('configuration.export',)\n\n # List of private fields and regular expressions to hide them\n PRIVATE_FIELDS = {\n \"token\": r\"^.+$\",\n \"auth\": r\"^.+$\",\n \"sessionid\": r\"^.+$\",\n \"password\": r\"^.+$\",\n \"result\": r\"^[A-Za-z0-9]{32}$\",\n }\n\n @classmethod\n def check_url(cls, url: str) -> str:\n \"\"\"Check url completeness\n\n Args:\n url (str): Unchecked URL of Zabbix API\n\n Returns:\n str: Checked URL of Zabbix API\n \"\"\"\n\n if not url.endswith(cls.JSONRPC_FILE):\n url += cls.JSONRPC_FILE if url[-1] == '/' else '/' + cls.JSONRPC_FILE\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url\n\n @classmethod\n def mask_secret(cls, string: str, show_len: int = 4) -> str:\n \"\"\"Replace the most part of string to hiding mask.\n\n Args:\n string (str): Raw string with without hiding.\n show_len (int, optional): Number of signs shown on each side of the string. \\\nDefaults to 4.\n\n Returns:\n str: String with hiding part.\n \"\"\"\n\n # If show_len is 0 or the length of the string is smaller than the hiding mask length\n # and show_len from both sides of the string, return only hiding mask.\n if show_len == 0 or len(string) <= (len(cls.HIDING_MASK) + show_len*2):\n return cls.HIDING_MASK\n\n # Return the string with the hiding mask, surrounded by the specified number of characters\n # to display on each side of the string.\n return f\"{string[:show_len]}{cls.HIDING_MASK}{string[-show_len:]}\"\n\n @classmethod\n def hide_private(cls, input_data: dict, fields: dict = None) -> dict:\n \"\"\"Hide private data Zabbix info (e.g. token, password)\n\n Args:\n input_data (dict): Input dictionary with private fields.\n fields (dict): Dictionary of private fields and their filtering regexps.\n\n Returns:\n dict: Result dictionary without private data.\n \"\"\"\n\n private_fields = fields if fields else cls.PRIVATE_FIELDS\n\n if not isinstance(input_data, dict):\n raise TypeError(f\"Unsupported data type '{type(input_data).__name__}', \\\nonly 'dict' is expected\")\n\n def gen_repl(match: Match):\n return cls.mask_secret(match.group(0))\n\n def hide_str(k, v):\n return re.sub(private_fields[k], gen_repl, v)\n\n def hide_dict(v):\n return cls.hide_private(v)\n\n def hide_list(v):\n result = []\n for item in v:\n if isinstance(item, dict):\n result.append(hide_dict(item))\n continue\n if isinstance(item, list):\n result.append(hide_list(item))\n continue\n if isinstance(item, str):\n if 'result' in private_fields:\n result.append(hide_str('result', item))\n continue\n result.append(item)\n return result\n\n result_data = input_data.copy()\n\n for key, value in result_data.items():\n if isinstance(value, str):\n if key in private_fields:\n result_data[key] = hide_str(key, value)\n if isinstance(value, dict):\n result_data[key] = hide_dict(value)\n if isinstance(value, list):\n result_data[key] = hide_list(value)\n\n return result_data" }, { "identifier": "EmptyHandler", "path": "zabbix_utils/logger.py", "snippet": "class EmptyHandler(logging.Handler):\n \"\"\"Empty logging handler.\"\"\"\n\n def emit(self, *args, **kwargs):\n pass" }, { "identifier": "SensitiveFilter", "path": "zabbix_utils/logger.py", "snippet": "class SensitiveFilter(logging.Filter):\n \"\"\"Filter to hide sensitive Zabbix info (password, auth) in logs\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __hide_data(self, raw_data):\n return json.dumps(ModuleUtils.hide_private(raw_data), indent=4, separators=(',', ': '))\n\n def filter(self, record):\n if isinstance(record.args, tuple):\n record.args = tuple(self.__hide_data(arg)\n if isinstance(arg, dict) else arg for arg in record.args)\n if isinstance(record.args, dict):\n record.args = self.__hide_data(record.args)\n\n return 1" }, { "identifier": "APIRequestError", "path": "zabbix_utils/exceptions.py", "snippet": "class APIRequestError(ModuleBaseException):\n \"\"\"Exception class when Zabbix API returns error by request.\n\n Args:\n api_error (Union[str, dict]): Raw error message from Zabbix API.\n \"\"\"\n def __init__(self, api_error: Union[str, dict]):\n if isinstance(api_error, dict):\n api_error['body'] = ModuleUtils.hide_private(api_error['body'])\n super().__init__(\"{message} {data}\".format(**api_error))\n for key, value in api_error.items():\n setattr(self, key, value)\n else:\n super().__init__(api_error)" }, { "identifier": "APINotSupported", "path": "zabbix_utils/exceptions.py", "snippet": "class APINotSupported(ModuleBaseException):\n \"\"\"Exception class when object/action is not supported by Zabbix API.\n\n Args:\n message (str): Not supported object/action message.\n\n version (str): Current version of Zabbix API.\n \"\"\"\n\n def __init__(self, message: str, version: str = None):\n if version:\n message = f\"{message} is unsupported for Zabbix {version} version\"\n super().__init__(message)" }, { "identifier": "ProcessingError", "path": "zabbix_utils/exceptions.py", "snippet": "class ProcessingError(ModuleBaseException):\n def __init__(self, *args):\n super().__init__(\" \".join(map(str, args)))\n return" }, { "identifier": "__version__", "path": "zabbix_utils/version.py", "snippet": "" } ]
import re import ssl import json import base64 import logging import urllib.request as ul from textwrap import shorten from uuid import uuid4 from os import environ as env from urllib.error import URLError from typing import Callable, Union, Any, List from typing import Self # type: ignore from typing_extensions import Self from .common import ModuleUtils from .logger import EmptyHandler, SensitiveFilter from .exceptions import APIRequestError, APINotSupported, ProcessingError from .version import __version__, __min_supported__, __max_supported__
1,938
# zabbix_utils # # Copyright (C) 2001-2023 Zabbix SIA # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # For Python less 3.11 compatibility try: except ImportError: log = logging.getLogger(__name__)
# zabbix_utils # # Copyright (C) 2001-2023 Zabbix SIA # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # For Python less 3.11 compatibility try: except ImportError: log = logging.getLogger(__name__)
log.addHandler(EmptyHandler())
1
2023-10-16 12:49:35+00:00
4k
miccunifi/TAPE
models/mrsff.py
[ { "identifier": "compute_mask_2D", "path": "utils/utils_models.py", "snippet": "def compute_mask_2D(H: int, W: int, window_size: Tuple[int], shift_size: Tuple[int], device: torch.device) -> torch.Tensor:\n \"\"\"\n Compute 2D mask for window-based multi-head self-attention\n \"\"\"\n img_mask = torch.zeros((1, H, W, 1), device=device) # 1 H W 1\n h_slices = (slice(-window_size[0]),\n slice(-window_size[0], -shift_size[0]),\n slice(-shift_size[0], None))\n w_slices = (slice(-window_size[1]),\n slice(-window_size[1], -shift_size[1]),\n slice(-shift_size[1], None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition_2D(img_mask, window_size) # nW, window_size[0]*window_size[1], 1\n mask_windows = mask_windows.squeeze(-1) # nW, window_size[0]*window_size[1]\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n return attn_mask" }, { "identifier": "window_partition_2D", "path": "utils/utils_models.py", "snippet": "def window_partition_2D(x: torch.Tensor, window_size: Tuple[int]) -> torch.Tensor:\n \"\"\" Partition the input into windows. Attention will be conducted within the windows.\n Args:\n x (torch.Tensor): (B, H, W, C)\n window_size (tuple[int]): window size\n Returns:\n windows (torch.Tensor): (num_windows*B, window_size*window_size, C)\n \"\"\"\n B, H, W, C = x.shape\n x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, reduce(mul, window_size), C)\n return windows" }, { "identifier": "window_reverse_2D", "path": "utils/utils_models.py", "snippet": "def window_reverse_2D(windows: torch.Tensor, window_size: Tuple[int], B: int, H: int, W: int) -> torch.Tensor:\n \"\"\"\n Args:\n windows (torch.Tensor): (num_windows*B, window_size, window_size, C)\n window_size (tuple[int]): Window size\n B (int): Batch size\n H (int): Height of image\n W (int): Width of image\n Returns:\n x (torch.Tensor): (B, H, W, C)\n \"\"\"\n x = windows.view(B, H // window_size[0], W // window_size[0], window_size[0], window_size[1], -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x" }, { "identifier": "get_window_size", "path": "utils/utils_models.py", "snippet": "def get_window_size(x_size: Tuple[int], window_size: Tuple[int], shift_size: Tuple[int] = None)\\\n -> Tuple[int] | Tuple[Tuple[int]]:\n use_window_size = list(window_size)\n if shift_size is not None:\n use_shift_size = list(shift_size)\n for i in range(len(x_size)):\n if x_size[i] <= window_size[i]:\n use_window_size[i] = x_size[i]\n if shift_size is not None:\n use_shift_size[i] = 0\n\n if shift_size is None:\n return tuple(use_window_size)\n else:\n return tuple(use_window_size), tuple(use_shift_size)" }, { "identifier": "DropPath", "path": "utils/utils_models.py", "snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n def __init__(self, drop_prob=None, scale_by_keep=True):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n self.scale_by_keep = scale_by_keep\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)" }, { "identifier": "Mlp", "path": "utils/utils_models.py", "snippet": "class Mlp(nn.Module):\n \"\"\" Multilayer perceptron.\"\"\"\n\n def __init__(self, in_features: int, hidden_features: int = None, out_features: int = None,\n act_layer: nn.Module = nn.GELU, drop: float = 0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x" }, { "identifier": "trunc_normal_", "path": "utils/utils_models.py", "snippet": "def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution.\n From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py\n The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n w = torch.empty(3, 5)\n nn.init.trunc_normal_(w)\n \"\"\"\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from typing import Tuple from einops import rearrange from utils.utils_models import (compute_mask_2D, window_partition_2D, window_reverse_2D, get_window_size, DropPath, Mlp, trunc_normal_)
2,950
class AttentionPooling1d(nn.Module): """ Inspired by https://amaarora.github.io/posts/2023-03-11_Understanding_CLIP_part_2.html and https://github.com/openai/CLIP/blob/a1d071733d7111c9c014f024669f959182114e33/clip/model.py#L58 Args: dim (int): Input dimension. num_heads (int): Number of attention heads. sequence_length (int): Length of the sequence of transformer tokens. """ def __init__(self, dim: int, num_heads: int, sequence_length: int): super().__init__() self.sequence_length = sequence_length self.pos_embedding = nn.Parameter(torch.randn(sequence_length, dim) / dim ** 0.5) self.q_proj = nn.Linear(dim, dim) self.k_proj = nn.Linear(dim, dim) self.v_proj = nn.Linear(dim, dim) self.out_proj = nn.Linear(dim, dim) self.num_heads = num_heads def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): (B*T, M, N, C) Returns: x (torch.Tensor): (B*T, N, C) """ avg = x.mean(dim=1, keepdim=True) # (B*T, 1, N, C) x = torch.cat([avg, x], dim=1) # (B*T, M+1, N, C) x = x + self.pos_embedding[None, None, :, :] # (B*T, M+1, N, C) x = rearrange(x, 'b m n c -> (m n) b c') # ((M+1)*N, B*T, C) x, _ = F.multi_head_attention_forward( query=x[:self.sequence_length], key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=False, dropout_p=0, out_proj_weight=self.out_proj.weight, out_proj_bias=self.out_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False ) x = rearrange(x, 'n b c -> b n c') # (B*T, N, C) return x class MultiReferenceWindowAttention(nn.Module): """ Multi-Reference-(Shifted)Window-Multi-head Cross Attention (MR-(S)W-MCA) module with relative position bias. It supports both shifted and non-shifted window. The query is the restored features, while the key and values are the reference features. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 """ def __init__(self, dim: int, window_size: Tuple[int], num_heads: int, qkv_bias: bool = True, qk_scale: float = None, attn_drop: float = 0., proj_drop: float = 0.): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer("relative_position_index", relative_position_index) self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.act = nn.GELU() self.dim_reduction = AttentionPooling1d(dim=dim, num_heads=num_heads, sequence_length=window_size[0] * window_size[1]) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop)
class AttentionPooling1d(nn.Module): """ Inspired by https://amaarora.github.io/posts/2023-03-11_Understanding_CLIP_part_2.html and https://github.com/openai/CLIP/blob/a1d071733d7111c9c014f024669f959182114e33/clip/model.py#L58 Args: dim (int): Input dimension. num_heads (int): Number of attention heads. sequence_length (int): Length of the sequence of transformer tokens. """ def __init__(self, dim: int, num_heads: int, sequence_length: int): super().__init__() self.sequence_length = sequence_length self.pos_embedding = nn.Parameter(torch.randn(sequence_length, dim) / dim ** 0.5) self.q_proj = nn.Linear(dim, dim) self.k_proj = nn.Linear(dim, dim) self.v_proj = nn.Linear(dim, dim) self.out_proj = nn.Linear(dim, dim) self.num_heads = num_heads def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): (B*T, M, N, C) Returns: x (torch.Tensor): (B*T, N, C) """ avg = x.mean(dim=1, keepdim=True) # (B*T, 1, N, C) x = torch.cat([avg, x], dim=1) # (B*T, M+1, N, C) x = x + self.pos_embedding[None, None, :, :] # (B*T, M+1, N, C) x = rearrange(x, 'b m n c -> (m n) b c') # ((M+1)*N, B*T, C) x, _ = F.multi_head_attention_forward( query=x[:self.sequence_length], key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=False, dropout_p=0, out_proj_weight=self.out_proj.weight, out_proj_bias=self.out_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False ) x = rearrange(x, 'n b c -> b n c') # (B*T, N, C) return x class MultiReferenceWindowAttention(nn.Module): """ Multi-Reference-(Shifted)Window-Multi-head Cross Attention (MR-(S)W-MCA) module with relative position bias. It supports both shifted and non-shifted window. The query is the restored features, while the key and values are the reference features. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 """ def __init__(self, dim: int, window_size: Tuple[int], num_heads: int, qkv_bias: bool = True, qk_scale: float = None, attn_drop: float = 0., proj_drop: float = 0.): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer("relative_position_index", relative_position_index) self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.act = nn.GELU() self.dim_reduction = AttentionPooling1d(dim=dim, num_heads=num_heads, sequence_length=window_size[0] * window_size[1]) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
6
2023-10-19 09:14:40+00:00
4k
boppreh/hello_tls
src/hello_tls/__main__.py
[ { "identifier": "scan_server", "path": "src/hello_tls/scan.py", "snippet": "def scan_server(\n connection_settings: Union[ConnectionSettings, str],\n client_hello: Optional[ClientHello] = None,\n do_enumerate_cipher_suites: bool = True,\n do_enumerate_groups: bool = True,\n fetch_cert_chain: bool = True,\n max_workers: int = DEFAULT_MAX_WORKERS,\n progress: Callable[[int, int], None] = lambda current, total: None,\n ) -> ServerScanResult:\n \"\"\"\n Scans a SSL/TLS server for supported protocols, cipher suites, and certificate chain.\n\n `fetch_cert_chain` can be used to load the certificate chain, at the cost of using pyOpenSSL.\n\n Runs scans in parallel to speed up the process, with up to `max_workers` threads connecting at the same time.\n \"\"\"\n if isinstance(connection_settings, str):\n connection_settings = ConnectionSettings(*parse_target(connection_settings))\n \n logger.info(f\"Scanning {connection_settings.host}:{connection_settings.port}\")\n\n if not client_hello:\n client_hello = ClientHello(server_name=connection_settings.host) \n\n tmp_certificate_chain: List[Certificate] = []\n tmp_protocol_results = {p: ProtocolResult(False, None, None, None, None) for p in Protocol}\n\n with ThreadPool(max_workers) as pool:\n logger.debug(\"Initializing workers\")\n\n tasks: List[Callable[[], None]] = []\n\n def scan_protocol(protocol):\n protocol_result = tmp_protocol_results[protocol]\n suites_to_test = [cs for cs in CipherSuite if protocol in cs.protocols]\n\n if do_enumerate_cipher_suites:\n cipher_suite_hello = dataclasses.replace(client_hello, protocols=[protocol], cipher_suites=suites_to_test)\n # Save the cipher suites to protocol results, and store each Server Hello for post-processing of other options.\n def task():\n cipher_suites = enumerate_server_cipher_suites(connection_settings, cipher_suite_hello, protocol_result._cipher_suite_hellos.append)\n protocol_result.cipher_suites = cipher_suites\n tasks.append(task)\n\n if do_enumerate_groups:\n # Submit reversed list of cipher suites when checking for groups, to detect servers that respect user cipher suite order.\n group_hello = dataclasses.replace(client_hello, protocols=[protocol], cipher_suites=list(reversed(suites_to_test)))\n def task():\n groups = enumerate_server_groups(connection_settings, group_hello, protocol_result._group_hellos.append)\n protocol_result.groups = groups or None\n tasks.append(task)\n\n for protocol in client_hello.protocols:\n # Must be extracted to a function to avoid late binding in task lambdas.\n scan_protocol(protocol)\n\n if fetch_cert_chain:\n tasks.append(lambda: tmp_certificate_chain.extend(get_server_certificate_chain(connection_settings, client_hello)))\n\n if max_workers > len(tasks):\n logger.warning(f'Max workers is {max_workers}, but only {len(tasks)} tasks were ever created')\n\n # Process tasks out of order, wait for all of them to finish, and stop on first exception.\n for i, _ in enumerate(pool.imap_unordered(lambda t: t(), tasks)):\n progress(i+1, len(tasks))\n\n result = ServerScanResult(\n connection=connection_settings,\n protocols={},\n certificate_chain=tmp_certificate_chain,\n )\n\n # Finish processing the Server Hellos to detect compression and cipher suite order.\n for protocol, protocol_result in tmp_protocol_results.items():\n if not protocol_result.cipher_suites and not protocol_result.groups:\n result.protocols[protocol] = None\n continue\n\n result.protocols[protocol] = protocol_result\n\n sample_hello = (protocol_result._cipher_suite_hellos or protocol_result._group_hellos)[0]\n protocol_result.has_compression = sample_hello.compression != CompressionMethod.NULL\n\n if protocol_result.groups is not None:\n protocol_result.has_post_quantum = any(group.is_pq for group in protocol_result.groups)\n\n # The cipher suites in cipher_suite_hellos and group_hellos were sent in reversed order.\n # If the server accepted different cipher suites, then we know it respects the client order.\n if protocol_result.cipher_suites and protocol_result.groups:\n protocol_result.has_cipher_suite_order = protocol_result._cipher_suite_hellos[0].cipher_suite == protocol_result._group_hellos[0].cipher_suite\n\n return result" }, { "identifier": "DEFAULT_TIMEOUT", "path": "src/hello_tls/scan.py", "snippet": "DEFAULT_TIMEOUT: float = 2" }, { "identifier": "DEFAULT_MAX_WORKERS", "path": "src/hello_tls/scan.py", "snippet": "DEFAULT_MAX_WORKERS: int = 6" }, { "identifier": "parse_target", "path": "src/hello_tls/scan.py", "snippet": "def parse_target(target:str, default_port:int = 443) -> tuple[str, int]:\n \"\"\"\n Parses the target string into a host and port, stripping protocol and path.\n \"\"\"\n if not re.match(r'\\w+://', target):\n # Without a scheme, urlparse will treat the target as a path.\n # Prefix // to make it a netloc.\n url = urlparse('//' + target)\n else:\n url = urlparse(target, scheme='https')\n host = url.hostname or 'localhost'\n port = url.port if url.port else default_port\n return host, port" }, { "identifier": "ConnectionSettings", "path": "src/hello_tls/scan.py", "snippet": "class ConnectionSettings:\n \"\"\"\n Settings for a connection to a server, including the host, port, and proxy.\n \"\"\"\n host: str\n port: int = 443\n proxy: Optional[str] = None\n timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT\n date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0))" }, { "identifier": "to_json_obj", "path": "src/hello_tls/scan.py", "snippet": "def to_json_obj(o: Any) -> Any:\n \"\"\"\n Converts an object to a JSON-serializable structure, replacing dataclasses, enums, sets, datetimes, etc.\n \"\"\"\n if isinstance(o, dict):\n return {to_json_obj(key): to_json_obj(value) for key, value in o.items()}\n elif dataclasses.is_dataclass(o):\n return to_json_obj(dataclasses.asdict(o))\n elif isinstance(o, set):\n return sorted(to_json_obj(item) for item in o)\n elif isinstance(o, (tuple, list)):\n return [to_json_obj(item) for item in o]\n elif isinstance(o, Enum):\n return o.name\n elif isinstance(o, datetime):\n return o.isoformat(' ')\n return o" }, { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ClientHello:\n server_name: Optional[str] # No default value because you probably want to set this.\n protocols: Sequence[Protocol] = tuple(Protocol)\n cipher_suites: Sequence[CipherSuite] = tuple(CipherSuite)\n groups: Sequence[Group] = tuple(Group)\n compression_methods: Sequence[CompressionMethod] = tuple(CompressionMethod)" }, { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n\n def __repr__(self):\n return self.name\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplemented\n return self.value < other.value" } ]
from .scan import scan_server, DEFAULT_TIMEOUT, DEFAULT_MAX_WORKERS, parse_target, ConnectionSettings, to_json_obj from .protocol import ClientHello from .names_and_numbers import Protocol import os import sys import json import logging import argparse
2,604
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("target", help="server to scan, in the form of 'example.com', 'example.com:443', or even a full URL") parser.add_argument("--timeout", "-t", dest="timeout", type=float, default=DEFAULT_TIMEOUT, help="socket connection timeout in seconds") parser.add_argument("--max-workers", "-w", type=int, default=DEFAULT_MAX_WORKERS, help="maximum number of threads/concurrent connections to use for scanning") parser.add_argument("--server-name-indication", "-s", default='', help="value to be used in the SNI extension, defaults to the target host") parser.add_argument("--certs", "-c", default=True, action=argparse.BooleanOptionalAction, help="fetch the certificate chain using pyOpenSSL") parser.add_argument("--enumerate-cipher-suites", "-C", dest='enumerate_cipher_suites', default=True, action=argparse.BooleanOptionalAction, help="enumerate supported cipher suites") parser.add_argument("--enumerate-groups", "-G", dest='enumerate_groups', default=True, action=argparse.BooleanOptionalAction, help="enumerate supported groups") parser.add_argument("--protocols", "-p", dest='protocols_str', default=','.join(p.name for p in Protocol), help="comma separated list of TLS/SSL protocols to test") parser.add_argument("--proxy", default=None, help="HTTP proxy to use for the connection, defaults to the env variable 'http_proxy' else no proxy") parser.add_argument("--verbose", "-v", action="count", default=0, help="increase output verbosity") parser.add_argument("--progress", default=False, action=argparse.BooleanOptionalAction, help="write lines with progress percentages to stderr") args = parser.parse_args() logging.basicConfig( datefmt='%Y-%m-%d %H:%M:%S', format='{asctime}.{msecs:0<3.0f} {module} {threadName} {levelname}: {message}', style='{', level=[logging.WARNING, logging.INFO, logging.DEBUG][min(2, args.verbose)] ) if not args.protocols_str: parser.error("no protocols to test") try: protocols = [Protocol[p] for p in args.protocols_str.split(',')] except KeyError as e: parser.error(f'invalid protocol name "{e.args[0]}", must be one of {", ".join(p.name for p in Protocol)}') host, port = parse_target(args.target) if args.certs and protocols == [Protocol.SSLv3]: parser.error("SSLv3 is not supported by pyOpenSSL, so `--protocols SSLv3` must be used with `--no-certs`") proxy = os.environ.get('https_proxy') or os.environ.get('HTTPS_PROXY') if args.proxy is None else args.proxy if args.progress: progress = lambda current, total: print(f'{current/total:.0%}', flush=True, file=sys.stderr) print('0%', flush=True, file=sys.stderr) else: progress = lambda current, total: None results = scan_server( ConnectionSettings( host=host, port=port, proxy=proxy, timeout_in_seconds=args.timeout ), ClientHello( protocols=protocols, server_name=args.server_name_indication or host ), do_enumerate_cipher_suites=args.enumerate_cipher_suites, do_enumerate_groups=args.enumerate_groups, fetch_cert_chain=args.certs, max_workers=args.max_workers, progress=progress, )
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("target", help="server to scan, in the form of 'example.com', 'example.com:443', or even a full URL") parser.add_argument("--timeout", "-t", dest="timeout", type=float, default=DEFAULT_TIMEOUT, help="socket connection timeout in seconds") parser.add_argument("--max-workers", "-w", type=int, default=DEFAULT_MAX_WORKERS, help="maximum number of threads/concurrent connections to use for scanning") parser.add_argument("--server-name-indication", "-s", default='', help="value to be used in the SNI extension, defaults to the target host") parser.add_argument("--certs", "-c", default=True, action=argparse.BooleanOptionalAction, help="fetch the certificate chain using pyOpenSSL") parser.add_argument("--enumerate-cipher-suites", "-C", dest='enumerate_cipher_suites', default=True, action=argparse.BooleanOptionalAction, help="enumerate supported cipher suites") parser.add_argument("--enumerate-groups", "-G", dest='enumerate_groups', default=True, action=argparse.BooleanOptionalAction, help="enumerate supported groups") parser.add_argument("--protocols", "-p", dest='protocols_str', default=','.join(p.name for p in Protocol), help="comma separated list of TLS/SSL protocols to test") parser.add_argument("--proxy", default=None, help="HTTP proxy to use for the connection, defaults to the env variable 'http_proxy' else no proxy") parser.add_argument("--verbose", "-v", action="count", default=0, help="increase output verbosity") parser.add_argument("--progress", default=False, action=argparse.BooleanOptionalAction, help="write lines with progress percentages to stderr") args = parser.parse_args() logging.basicConfig( datefmt='%Y-%m-%d %H:%M:%S', format='{asctime}.{msecs:0<3.0f} {module} {threadName} {levelname}: {message}', style='{', level=[logging.WARNING, logging.INFO, logging.DEBUG][min(2, args.verbose)] ) if not args.protocols_str: parser.error("no protocols to test") try: protocols = [Protocol[p] for p in args.protocols_str.split(',')] except KeyError as e: parser.error(f'invalid protocol name "{e.args[0]}", must be one of {", ".join(p.name for p in Protocol)}') host, port = parse_target(args.target) if args.certs and protocols == [Protocol.SSLv3]: parser.error("SSLv3 is not supported by pyOpenSSL, so `--protocols SSLv3` must be used with `--no-certs`") proxy = os.environ.get('https_proxy') or os.environ.get('HTTPS_PROXY') if args.proxy is None else args.proxy if args.progress: progress = lambda current, total: print(f'{current/total:.0%}', flush=True, file=sys.stderr) print('0%', flush=True, file=sys.stderr) else: progress = lambda current, total: None results = scan_server( ConnectionSettings( host=host, port=port, proxy=proxy, timeout_in_seconds=args.timeout ), ClientHello( protocols=protocols, server_name=args.server_name_indication or host ), do_enumerate_cipher_suites=args.enumerate_cipher_suites, do_enumerate_groups=args.enumerate_groups, fetch_cert_chain=args.certs, max_workers=args.max_workers, progress=progress, )
json.dump(to_json_obj(results), sys.stdout, indent=2)
5
2023-10-21 02:00:13+00:00
4k
OPTML-Group/Diffusion-MU-Attack
src/tasks/classifier_.py
[ { "identifier": "calculate_clip_score", "path": "src/tasks/utils/metrics/clip_score.py", "snippet": "def calculate_clip_score(images, prompts,device):\n clip_score = clip_score_fn(torch.from_numpy(images).to(device), prompts).detach()\n return round(float(clip_score), 4)" }, { "identifier": "if_nude", "path": "src/tasks/utils/metrics/nudity_eval.py", "snippet": "def if_nude(res, threshold=0.75):\n return any([ (key in res.keys()) and (res[key] > threshold) for key in NUDE_KEYS ])" }, { "identifier": "detectNudeClasses", "path": "src/tasks/utils/metrics/nudity_eval.py", "snippet": "def detectNudeClasses(img_paths, threshold=0.45):\n detector = NudeDetector()\n results = []\n for img in tqdm(img_paths, disable=len(img_paths) < 100):\n detected = detector.detect(img)\n res = {}\n for detect in detected:\n if detect['score'] > threshold and detect['class'] in NUDE_KEYS:\n res[detect['class']] = detect['score']\n results.append(res)\n return results" }, { "identifier": "q16_binary_classifier", "path": "src/tasks/utils/metrics/harm_eval.py", "snippet": "def q16_binary_classifier(device):\n prompt_path = 'src/tasks/utils/metrics/q16/prompts.p'\n trained_prompts = load_prompts(prompt_path, device=device)\n clip_model = ClipWrapper(device)\n classifier = SimClassifier(trained_prompts, device)\n classifier.to(device)\n return clip_model, classifier" }, { "identifier": "harm_eval", "path": "src/tasks/utils/metrics/harm_eval.py", "snippet": "def harm_eval(clip_model, classifier, img_path, device):\n x = compute_embeddings(clip_model, img_path, device)\n y = classifier(x)\n label = torch.argmax(y, dim=0) # 0 for benign, 1 for malicious\n return label.item(), torch.softmax(y, dim=-1).squeeze()" }, { "identifier": "style_eval", "path": "src/tasks/utils/metrics/style_eval.py", "snippet": "def style_eval(classifier,img):\n return classifier(img,top_k=129)" }, { "identifier": "init_classifier", "path": "src/tasks/utils/metrics/style_eval.py", "snippet": "def init_classifier(device,path):\n return pipeline('image-classification',model=path,device=device)" }, { "identifier": "imagenet_ResNet50", "path": "src/tasks/utils/metrics/object_eval.py", "snippet": "def imagenet_ResNet50(device):\n processor = AutoImageProcessor.from_pretrained(\"microsoft/resnet-50\", cache_dir=\".cache\")\n model = ResNetForImageClassification.from_pretrained(\"microsoft/resnet-50\", cache_dir=\".cache\")\n model.to(device)\n return processor, model" }, { "identifier": "object_eval", "path": "src/tasks/utils/metrics/object_eval.py", "snippet": "def object_eval(classifier, img, processor, device):\n with torch.no_grad():\n inputs = processor(img, return_tensors=\"pt\")\n inputs.to(device)\n logits = classifier(**inputs).logits\n\n # model predicts one of the 1000 ImageNet classes\n predicted_label = logits.argmax(-1).item()\n # print(predicted_label)\n # print(classifier.config.id2label[predicted_label])\n return predicted_label, torch.softmax(logits, dim=-1).squeeze()" }, { "identifier": "CustomTextEncoder", "path": "src/tasks/utils/text_encoder.py", "snippet": "class CustomTextEncoder(torch.nn.Module):\n def __init__(self, text_encoder):\n super().__init__()\n self.text_encoder = text_encoder\n self.text_encoder.eval()\n self.text_encoder.requires_grad_(False)\n self.embedding = text_encoder.text_model.embeddings\n self.encoder = text_encoder.text_model.encoder\n self.final_layer_norm = text_encoder.text_model.final_layer_norm\n self.config = text_encoder.text_model.config\n self.eos_token_id = self.config.eos_token_id\n def get_all_embedding(self):\n return self.embedding.token_embedding.weight\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n inputs_embeds : Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPooling]:\n output_attentions = False\n output_hidden_states = False\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if inputs_embeds is None:\n raise ValueError(\"You have to specify input_embds\")\n if input_ids is None:\n raise ValueError(\"You have to specify input_ids\")\n\n hidden_states = self.embedding(inputs_embeds=inputs_embeds , position_ids=position_ids)\n input_shape = input_ids.size()\n causal_attention_mask = _make_causal_mask(input_shape, hidden_states.dtype, device=hidden_states.device)\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(attention_mask, hidden_states.dtype)\n\n encoder_outputs = self.encoder(\n inputs_embeds=hidden_states,\n attention_mask=attention_mask,\n causal_attention_mask=causal_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = encoder_outputs[0]\n last_hidden_state = self.final_layer_norm(last_hidden_state)\n\n if self.eos_token_id == 2:\n # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.\n # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added\n # ------------------------------------------------------------\n # text_embeds.shape = [batch_size, sequence_length, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14\n pooled_output = last_hidden_state[\n torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),\n input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),\n ]\n else:\n # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)\n pooled_output = last_hidden_state[\n torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),\n # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)\n (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)\n .int()\n .argmax(dim=-1),\n ]\n\n if not return_dict:\n return (last_hidden_state, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=last_hidden_state,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )" }, { "identifier": "get", "path": "src/tasks/utils/datasets.py", "snippet": "def get(root_dir):\n return PNGImageDataset(root_dir=root_dir,transform=get_transform()) " } ]
import os import torch import torch.nn.functional as F from copy import deepcopy from diffusers import AutoencoderKL, UNet2DConditionModel, LMSDiscreteScheduler from transformers import CLIPTextModel, CLIPTokenizer from PIL import Image from uuid import uuid4 from .utils.metrics.clip_score import calculate_clip_score from .utils.metrics.nudity_eval import if_nude, detectNudeClasses from .utils.metrics.harm_eval import q16_binary_classifier, harm_eval from .utils.metrics.style_eval import style_eval,init_classifier from .utils.metrics.object_eval import imagenet_ResNet50, object_eval from .utils.text_encoder import CustomTextEncoder from .utils.datasets import get as get_dataset from tqdm.auto import tqdm
2,359
class ClassifierTask: def __init__( self, concept, sld, sld_concept, negative_prompt, model_name_or_path, target_ckpt, cache_path, dataset_path, criterion, sampling_step_num, n_samples = 50, classifier_dir = None, ): self.object_list = ['cassette_player', 'church', 'english_springer', 'french_horn', 'garbage_truck', 'gas_pump', 'golf_ball', 'parachute', 'tench', "chain_saw"] self.object_labels = [482, 497, 217, 566, 569, 571, 574, 701, 0, 491] self.device = "cuda:0" if torch.cuda.is_available() else "cpu" self.concept = concept self.sld = sld self.sld_concept = sld_concept self.negative_prompt = negative_prompt self.cache_path = cache_path self.sampling_step_num = sampling_step_num self.dataset = get_dataset(dataset_path) self.criterion = torch.nn.L1Loss() if criterion == 'l1' else torch.nn.MSELoss() self.vae = AutoencoderKL.from_pretrained(model_name_or_path, subfolder="vae", cache_dir=cache_path).to(self.device) self.tokenizer = CLIPTokenizer.from_pretrained(model_name_or_path, subfolder="tokenizer", cache_dir=cache_path) self.text_encoder = CLIPTextModel.from_pretrained(model_name_or_path, subfolder="text_encoder", cache_dir=cache_path).to(self.device) self.custom_text_encoder = CustomTextEncoder(self.text_encoder).to(self.device) self.all_embeddings = self.custom_text_encoder.get_all_embedding().unsqueeze(0) self.unet_sd = UNet2DConditionModel.from_pretrained(model_name_or_path, subfolder="unet", cache_dir=cache_path).to(self.device) self.target_unet_sd = deepcopy(self.unet_sd) if self.sld is None: self.target_unet_sd.load_state_dict(torch.load(target_ckpt, map_location=self.device)) if classifier_dir is not None: self.classifier = init_classifier(self.device,classifier_dir) elif self.concept in self.object_list: self.processor, self.classifier = imagenet_ResNet50(self.device) elif self.concept == 'harm':
class ClassifierTask: def __init__( self, concept, sld, sld_concept, negative_prompt, model_name_or_path, target_ckpt, cache_path, dataset_path, criterion, sampling_step_num, n_samples = 50, classifier_dir = None, ): self.object_list = ['cassette_player', 'church', 'english_springer', 'french_horn', 'garbage_truck', 'gas_pump', 'golf_ball', 'parachute', 'tench', "chain_saw"] self.object_labels = [482, 497, 217, 566, 569, 571, 574, 701, 0, 491] self.device = "cuda:0" if torch.cuda.is_available() else "cpu" self.concept = concept self.sld = sld self.sld_concept = sld_concept self.negative_prompt = negative_prompt self.cache_path = cache_path self.sampling_step_num = sampling_step_num self.dataset = get_dataset(dataset_path) self.criterion = torch.nn.L1Loss() if criterion == 'l1' else torch.nn.MSELoss() self.vae = AutoencoderKL.from_pretrained(model_name_or_path, subfolder="vae", cache_dir=cache_path).to(self.device) self.tokenizer = CLIPTokenizer.from_pretrained(model_name_or_path, subfolder="tokenizer", cache_dir=cache_path) self.text_encoder = CLIPTextModel.from_pretrained(model_name_or_path, subfolder="text_encoder", cache_dir=cache_path).to(self.device) self.custom_text_encoder = CustomTextEncoder(self.text_encoder).to(self.device) self.all_embeddings = self.custom_text_encoder.get_all_embedding().unsqueeze(0) self.unet_sd = UNet2DConditionModel.from_pretrained(model_name_or_path, subfolder="unet", cache_dir=cache_path).to(self.device) self.target_unet_sd = deepcopy(self.unet_sd) if self.sld is None: self.target_unet_sd.load_state_dict(torch.load(target_ckpt, map_location=self.device)) if classifier_dir is not None: self.classifier = init_classifier(self.device,classifier_dir) elif self.concept in self.object_list: self.processor, self.classifier = imagenet_ResNet50(self.device) elif self.concept == 'harm':
self.clip_model, self.classifier = q16_binary_classifier(self.device)
3
2023-10-17 13:54:37+00:00
4k
YefanZhou/TempBalance
object_detection/src/YOLOv8/ultralytics/yolo/utils/tal.py
[ { "identifier": "check_version", "path": "object_detection/src/YOLOv8/ultralytics/yolo/utils/checks.py", "snippet": "def check_version(current: str = '0.0.0',\n minimum: str = '0.0.0',\n name: str = 'version ',\n pinned: bool = False,\n hard: bool = False,\n verbose: bool = False) -> bool:\n \"\"\"\n Check current version against the required minimum version.\n\n Args:\n current (str): Current version.\n minimum (str): Required minimum version.\n name (str): Name to be used in warning message.\n pinned (bool): If True, versions must match exactly. If False, minimum version must be satisfied.\n hard (bool): If True, raise an AssertionError if the minimum version is not met.\n verbose (bool): If True, print warning message if minimum version is not met.\n\n Returns:\n (bool): True if minimum version is met, False otherwise.\n \"\"\"\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n warning_message = f'WARNING ⚠️ {name}{minimum} is required by YOLOv8, but {name}{current} is currently installed'\n if hard:\n assert result, emojis(warning_message) # assert min requirements met\n if verbose and not result:\n LOGGER.warning(warning_message)\n return result" }, { "identifier": "bbox_iou", "path": "object_detection/src/YOLOv8/ultralytics/yolo/utils/metrics.py", "snippet": "def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):\n \"\"\"\n Calculate Intersection over Union (IoU) of box1(1, 4) to box2(n, 4).\n\n Args:\n box1 (torch.Tensor): A tensor representing a single bounding box with shape (1, 4).\n box2 (torch.Tensor): A tensor representing n bounding boxes with shape (n, 4).\n xywh (bool, optional): If True, input boxes are in (x, y, w, h) format. If False, input boxes are in\n (x1, y1, x2, y2) format. Defaults to True.\n GIoU (bool, optional): If True, calculate Generalized IoU. Defaults to False.\n DIoU (bool, optional): If True, calculate Distance IoU. Defaults to False.\n CIoU (bool, optional): If True, calculate Complete IoU. Defaults to False.\n eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.\n\n Returns:\n (torch.Tensor): IoU, GIoU, DIoU, or CIoU values depending on the specified flags.\n \"\"\"\n\n # Get the coordinates of bounding boxes\n if xywh: # transform from xywh to xyxy\n (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)\n w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2\n b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_\n b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_\n else: # x1, y1, x2, y2 = box1\n b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)\n b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n # Intersection area\n inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \\\n (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0)\n\n # Union Area\n union = w1 * h1 + w2 * h2 - inter + eps\n\n # IoU\n iou = inter / union\n if CIoU or DIoU or GIoU:\n cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width\n ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height\n if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared\n rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2\n if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)\n with torch.no_grad():\n alpha = v / (v - iou + (1 + eps))\n return iou - (rho2 / c2 + v * alpha) # CIoU\n return iou - rho2 / c2 # DIoU\n c_area = cw * ch + eps # convex area\n return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf\n return iou # IoU" } ]
import torch import torch.nn as nn from .checks import check_version from .metrics import bbox_iou
3,543
target_gt_idx (Tensor): shape(b, h*w) fg_mask (Tensor): shape(b, h*w) mask_pos (Tensor): shape(b, n_max_boxes, h*w) """ # (b, n_max_boxes, h*w) -> (b, h*w) fg_mask = mask_pos.sum(-2) if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1]) # (b, n_max_boxes, h*w) max_overlaps_idx = overlaps.argmax(1) # (b, h*w) is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device) is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1) mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float() # (b, n_max_boxes, h*w) fg_mask = mask_pos.sum(-2) # Find each grid serve which gt(index) target_gt_idx = mask_pos.argmax(-2) # (b, h*w) return target_gt_idx, fg_mask, mask_pos class TaskAlignedAssigner(nn.Module): """ A task-aligned assigner for object detection. This class assigns ground-truth (gt) objects to anchors based on the task-aligned metric, which combines both classification and localization information. Attributes: topk (int): The number of top candidates to consider. num_classes (int): The number of object classes. alpha (float): The alpha parameter for the classification component of the task-aligned metric. beta (float): The beta parameter for the localization component of the task-aligned metric. eps (float): A small value to prevent division by zero. """ def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): """Initialize a TaskAlignedAssigner object with customizable hyperparameters.""" super().__init__() self.topk = topk self.num_classes = num_classes self.bg_idx = num_classes self.alpha = alpha self.beta = beta self.eps = eps @torch.no_grad() def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): """ Compute the task-aligned assignment. Reference https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py Args: pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) anc_points (Tensor): shape(num_total_anchors, 2) gt_labels (Tensor): shape(bs, n_max_boxes, 1) gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) mask_gt (Tensor): shape(bs, n_max_boxes, 1) Returns: target_labels (Tensor): shape(bs, num_total_anchors) target_bboxes (Tensor): shape(bs, num_total_anchors, 4) target_scores (Tensor): shape(bs, num_total_anchors, num_classes) fg_mask (Tensor): shape(bs, num_total_anchors) target_gt_idx (Tensor): shape(bs, num_total_anchors) """ self.bs = pd_scores.size(0) self.n_max_boxes = gt_bboxes.size(1) if self.n_max_boxes == 0: device = gt_bboxes.device return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), torch.zeros_like(pd_bboxes).to(device), torch.zeros_like(pd_scores).to(device), torch.zeros_like(pd_scores[..., 0]).to(device), torch.zeros_like(pd_scores[..., 0]).to(device)) mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt) target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) # Assigned target target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) # Normalize align_metric *= mask_pos pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) target_scores = target_scores * norm_align_metric return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): """Get in_gts mask, (b, max_num_obj, h*w).""" mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) # Get anchor_align metric, (b, max_num_obj, h*w) align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts * mask_gt) # Get topk_metric mask, (b, max_num_obj, h*w) mask_topk = self.select_topk_candidates(align_metric, topk_mask=mask_gt.repeat([1, 1, self.topk]).bool()) # Merge all mask to a final mask, (b, max_num_obj, h*w) mask_pos = mask_topk * mask_in_gts * mask_gt return mask_pos, align_metric, overlaps def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_gt): """Compute alignment metric given predicted and ground truth bounding boxes.""" na = pd_bboxes.shape[-2] mask_gt = mask_gt.bool() # b, max_num_obj, h*w overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device) bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device) ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj ind[1] = gt_labels.long().squeeze(-1) # b, max_num_obj # Get the scores of each grid for each gt cls bbox_scores[mask_gt] = pd_scores[ind[0], :, ind[1]][mask_gt] # b, max_num_obj, h*w # (b, max_num_obj, 1, 4), (b, 1, h*w, 4) pd_boxes = pd_bboxes.unsqueeze(1).repeat(1, self.n_max_boxes, 1, 1)[mask_gt] gt_boxes = gt_bboxes.unsqueeze(2).repeat(1, 1, na, 1)[mask_gt]
# Ultralytics YOLO 🚀, AGPL-3.0 license TORCH_1_10 = check_version(torch.__version__, '1.10.0') def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): """select the positive anchor center in gt Args: xy_centers (Tensor): shape(h*w, 4) gt_bboxes (Tensor): shape(b, n_boxes, 4) Return: (Tensor): shape(b, n_boxes, h*w) """ n_anchors = xy_centers.shape[0] bs, n_boxes, _ = gt_bboxes.shape lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) return bbox_deltas.amin(3).gt_(eps) def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): """if an anchor box is assigned to multiple gts, the one with the highest iou will be selected. Args: mask_pos (Tensor): shape(b, n_max_boxes, h*w) overlaps (Tensor): shape(b, n_max_boxes, h*w) Return: target_gt_idx (Tensor): shape(b, h*w) fg_mask (Tensor): shape(b, h*w) mask_pos (Tensor): shape(b, n_max_boxes, h*w) """ # (b, n_max_boxes, h*w) -> (b, h*w) fg_mask = mask_pos.sum(-2) if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1]) # (b, n_max_boxes, h*w) max_overlaps_idx = overlaps.argmax(1) # (b, h*w) is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device) is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1) mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float() # (b, n_max_boxes, h*w) fg_mask = mask_pos.sum(-2) # Find each grid serve which gt(index) target_gt_idx = mask_pos.argmax(-2) # (b, h*w) return target_gt_idx, fg_mask, mask_pos class TaskAlignedAssigner(nn.Module): """ A task-aligned assigner for object detection. This class assigns ground-truth (gt) objects to anchors based on the task-aligned metric, which combines both classification and localization information. Attributes: topk (int): The number of top candidates to consider. num_classes (int): The number of object classes. alpha (float): The alpha parameter for the classification component of the task-aligned metric. beta (float): The beta parameter for the localization component of the task-aligned metric. eps (float): A small value to prevent division by zero. """ def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): """Initialize a TaskAlignedAssigner object with customizable hyperparameters.""" super().__init__() self.topk = topk self.num_classes = num_classes self.bg_idx = num_classes self.alpha = alpha self.beta = beta self.eps = eps @torch.no_grad() def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): """ Compute the task-aligned assignment. Reference https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py Args: pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) anc_points (Tensor): shape(num_total_anchors, 2) gt_labels (Tensor): shape(bs, n_max_boxes, 1) gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) mask_gt (Tensor): shape(bs, n_max_boxes, 1) Returns: target_labels (Tensor): shape(bs, num_total_anchors) target_bboxes (Tensor): shape(bs, num_total_anchors, 4) target_scores (Tensor): shape(bs, num_total_anchors, num_classes) fg_mask (Tensor): shape(bs, num_total_anchors) target_gt_idx (Tensor): shape(bs, num_total_anchors) """ self.bs = pd_scores.size(0) self.n_max_boxes = gt_bboxes.size(1) if self.n_max_boxes == 0: device = gt_bboxes.device return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), torch.zeros_like(pd_bboxes).to(device), torch.zeros_like(pd_scores).to(device), torch.zeros_like(pd_scores[..., 0]).to(device), torch.zeros_like(pd_scores[..., 0]).to(device)) mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt) target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) # Assigned target target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) # Normalize align_metric *= mask_pos pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) target_scores = target_scores * norm_align_metric return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): """Get in_gts mask, (b, max_num_obj, h*w).""" mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) # Get anchor_align metric, (b, max_num_obj, h*w) align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts * mask_gt) # Get topk_metric mask, (b, max_num_obj, h*w) mask_topk = self.select_topk_candidates(align_metric, topk_mask=mask_gt.repeat([1, 1, self.topk]).bool()) # Merge all mask to a final mask, (b, max_num_obj, h*w) mask_pos = mask_topk * mask_in_gts * mask_gt return mask_pos, align_metric, overlaps def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_gt): """Compute alignment metric given predicted and ground truth bounding boxes.""" na = pd_bboxes.shape[-2] mask_gt = mask_gt.bool() # b, max_num_obj, h*w overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device) bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device) ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj ind[1] = gt_labels.long().squeeze(-1) # b, max_num_obj # Get the scores of each grid for each gt cls bbox_scores[mask_gt] = pd_scores[ind[0], :, ind[1]][mask_gt] # b, max_num_obj, h*w # (b, max_num_obj, 1, 4), (b, 1, h*w, 4) pd_boxes = pd_bboxes.unsqueeze(1).repeat(1, self.n_max_boxes, 1, 1)[mask_gt] gt_boxes = gt_bboxes.unsqueeze(2).repeat(1, 1, na, 1)[mask_gt]
overlaps[mask_gt] = bbox_iou(gt_boxes, pd_boxes, xywh=False, CIoU=True).squeeze(-1).clamp(0)
1
2023-10-24 00:45:55+00:00
4k
zhaojw1998/AccoMontage-3
orchestrator/prior_model.py
[ { "identifier": "Query_and_reArrange", "path": "orchestrator/QA_model.py", "snippet": "class Query_and_reArrange(nn.Module):\n \"\"\"Q&A model for multi-track rearrangement\"\"\"\n def __init__(self, name, device, trf_layers=2):\n super(Query_and_reArrange, self).__init__()\n\n self.name = name\n self.device = device\n \n # mixture encoder\n self.mixture_enc = PtvaeEncoder(max_simu_note=32, device=self.device, z_size=256)\n\n # track function encoder\n self.function_enc = FunctionEncoder(256, 256, 16)\n\n # feat_dec + pianotree_dec = symbolic decoder\n self.feat_dec = FeatDecoder(z_dim=256) # for key feature reconstruction\n self.feat_emb_layer = nn.Linear(3, 64)\n self.pianotree_dec = PianoTreeDecoder(z_size=256, feat_emb_dim=64, device=device)\n\n self.Transformer_layers = nn.ModuleDict({})\n self.trf_layers = trf_layers\n for idx in range(self.trf_layers):\n self.Transformer_layers[f'layer_{idx}'] = TransformerEncoderLayer(d_model=256, nhead=8, dim_feedforward=1024, dropout=.1, activation=F.gelu, batch_first=True)\n\n self.prog_embedding = nn.Embedding(num_embeddings=35, embedding_dim=256, padding_idx=34)\n\n self.trf_mu = nn.Linear(256, 256)\n self.trf_var = nn.Linear(256, 256)\n\n def run(self, pno_tree_mix, prog, function, pno_tree=None, feat=None, track_pad_mask=None, tfr1=0, tfr2=0, inference=False, mel_id=None):\n \"\"\"\n Forward path of the model in training (w/o computing loss).\n \"\"\"\n\n batch, track, time = function.shape\n max_simu_note = 16\n \n dist_mix, _, _ = self.mixture_enc(pno_tree_mix) \n if inference:\n z_mix = dist_mix.mean\n else:\n z_mix = dist_mix.rsample()\n if track_pad_mask is None:\n track_pad_mask = torch.zeros(batch, track, dtype=bool).to(z_mix.device)\n\n function = function.reshape(-1, 32)\n z_func, cmt_loss, plty = self.function_enc(function, track_pad_mask)\n function_recon = self.function_enc.decoder(z_func).reshape(batch, track, -1)\n\n z_func = z_func.reshape(batch, track, -1) #(batch, track, 256),\n z = torch.cat([\n z_mix.unsqueeze(1), #(batch, 1, 256)\n z_func + self.prog_embedding(prog)],\n dim=1) #z: (batch, track+1, 256)\n\n if not inference:\n trf_mask = torch.cat([torch.zeros(batch, 1, device=z.device).bool(), track_pad_mask], dim=-1) #(batch, track+1)\n else:\n trf_mask = torch.zeros(batch, track+1, device=z.device).bool()\n\n for idx in range(self.trf_layers):\n z = self.Transformer_layers[f'layer_{idx}'](src=z, src_key_padding_mask=trf_mask)\n\n\n z = z[:, 1:].reshape(-1, 256)\n mu = self.trf_mu(z)\n var = self.trf_var(z).exp_()\n\n dist_trf = Normal(mu, var)\n if inference and (mel_id is None):\n z = dist_trf.mean\n elif inference and (mel_id is not None):\n z1 = dist_trf.mean.reshape(batch, track, 256)\n z2 = dist_trf.rsample().reshape(batch, track, 256)\n z = torch.cat([z1[:, :mel_id], z2[:, mel_id: mel_id+1], z1[:, mel_id+1:]], dim=1).reshape(-1, 256)\n else:\n z = dist_trf.rsample()\n\n if not inference:\n feat = feat.reshape(-1, time, 3)\n #reconstruct key feature for self-supervision during training\n recon_feat = self.feat_dec(z, inference, tfr1, feat) #(batch*track, time, 3)\n #embed the reconstructed feature (without applying argmax)\n feat_emb = self.feat_emb_layer(recon_feat)\n\n #prepare the teacher-forcing data for pianotree decoder\n if inference:\n embedded_pno_tree = None\n pno_tree_lgths = None\n else:\n embedded_pno_tree, pno_tree_lgths = self.pianotree_dec.emb_x(pno_tree.reshape(-1, time, max_simu_note, 6))\n\n #pianotree decoder\n recon_pitch, recon_dur = \\\n self.pianotree_dec(z, inference, embedded_pno_tree, pno_tree_lgths, tfr1, tfr2, feat_emb)\n\n recon_pitch = recon_pitch.reshape(batch, track, time, max_simu_note-1, 130)\n recon_dur = recon_dur.reshape(batch, track, time, max_simu_note-1, 5, 2)\n recon_feat = recon_feat.reshape(batch, track, time, 3)\n\n return recon_pitch, recon_dur, recon_feat, \\\n function_recon, \\\n dist_mix, dist_trf, \\\n cmt_loss, plty\n\n def loss_calc(self, pno_tree, feat, function, \n recon_pitch, recon_dur, recon_feat, function_recon,\n dist_mix, dist_trf, cmt_loss, plty, track_pad_mask,\n beta, weights):\n \"\"\" Compute the loss from ground truth and the output of self.run()\"\"\"\n mask = torch.logical_not(track_pad_mask)\n # pianotree recon loss\n pno_tree_l, pitch_l, dur_l = \\\n self.pianotree_dec.recon_loss(pno_tree[mask], \n recon_pitch[mask], \n recon_dur[mask],\n weights)\n # key feature reconstruction loss\n feat_l, onset_feat_l, int_feat_l, center_feat_l = \\\n self.feat_dec.recon_loss(feat[mask], recon_feat[mask])\n\n func_l = self.function_enc.recon_loss(function_recon[mask], function[mask])\n vqvae_l = func_l + cmt_loss\n\n # kl losses\n kl_mix = kl_with_normal(dist_mix)\n kl_trf = kl_with_normal(dist_trf)\n\n kl_l = beta * (kl_mix + kl_trf)\n\n loss = pno_tree_l + feat_l + kl_l + vqvae_l\n\n return loss, pno_tree_l, pitch_l, dur_l, \\\n kl_l, kl_mix, kl_trf, \\\n feat_l, onset_feat_l, int_feat_l, center_feat_l, \\\n vqvae_l, func_l, cmt_loss, plty\n\n def loss(self, pno_tree_mix, prog, function, pno_tree, feat, track_pad_mask, tfr1, tfr2,\n beta=0.01, weights=(1, 0.5)):\n \"\"\"forward and calculate loss\"\"\"\n output = self.run(pno_tree_mix, prog, function, pno_tree, feat, track_pad_mask, tfr1, tfr2)\n return self.loss_calc(pno_tree, feat, function, *output, track_pad_mask, beta, weights)\n \n def output_process(self, recon_pitch, recon_dur):\n grid_recon = torch.cat([recon_pitch.max(-1)[-1].unsqueeze(-1), recon_dur.max(-1)[-1]], dim=-1)\n _, track, _, max_simu_note, grid_dim = grid_recon.shape\n grid_recon = grid_recon.permute(1, 0, 2, 3, 4)\n grid_recon = grid_recon.reshape(track, -1, max_simu_note, grid_dim)\n pr_recon = np.array([grid2pr(matrix) for matrix in grid_recon.detach().cpu().numpy()])\n return pr_recon\n\n def inference(self, pno_tree_mix, prog, function, mel_id=None):\n self.eval()\n with torch.no_grad():\n recon_pitch, recon_dur, _, _, _, _, _, _ = self.run(pno_tree_mix, prog, function, inference=True, mel_id=mel_id)\n pr_recon = self.output_process(recon_pitch, recon_dur)\n return pr_recon\n \n def infer_with_function_codes(self, z_mix, prog, z_func):\n #z_mix: (batch, 256)\n #prog: (batch, track)\n #z_func: (batch, track, 128)\n\n z = torch.cat([ z_mix.unsqueeze(1), #(batch, 1, 256)\n z_func + self.prog_embedding(prog)],\n dim=1) #z: (batch, track+1, 256)\"\"\"\n \n for idx in range(self.trf_layers):\n z = self.Transformer_layers[f'layer_{idx}'](src=z)\n \n z = z[:, 1:].reshape(-1, 256)\n\n mu = self.trf_mu(z)\n var = self.trf_var(z).exp_()\n dist_trf = Normal(mu, var)\n z = dist_trf.mean\n\n recon_feat = self.feat_dec(z, True, 0, None)\n feat_emb = self.feat_emb_layer(recon_feat)\n\n # prepare the teacher-forcing data for pianotree decoder\n embedded_pno_tree = None\n pno_tree_lgths = None\n \n # pianotree decoder\n recon_pitch, recon_dur = \\\n self.pianotree_dec(z, True, embedded_pno_tree, pno_tree_lgths, 0, 0, feat_emb)\n\n recon_pitch = recon_pitch.reshape(*list(prog.shape), 32, 15, 130)\n recon_dur = recon_dur.reshape(*list(prog.shape), 32, 15, 5, 2)\n return recon_pitch, recon_dur\n\n def forward(self, mode, *input, **kwargs):\n if mode in [\"run\", 0]:\n return self.run(*input, **kwargs)\n elif mode in ['loss', 'train', 1]:\n return self.loss(*input, **kwargs)\n elif mode in ['inference', 'eval', 'val', 2]:\n return self.inference(*input, **kwargs)\n else:\n raise NotImplementedError" }, { "identifier": "TransformerEncoderLayer", "path": "orchestrator/TransformerEncoderLayer.py", "snippet": "class TransformerEncoderLayer(nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, layer_norm_eps=1e-5, norm_first=False, max_len=1024):\n super(TransformerEncoderLayer, self).__init__()\n self.self_attn = MultiheadSelfAttentionwithRelativePositionalEmbedding(d_model, nhead, dropout, max_len)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm_first = norm_first\n self.norm1 = LayerNorm(d_model, eps=layer_norm_eps)\n self.norm2 = LayerNorm(d_model, eps=layer_norm_eps)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = F.gelu\n\n def forward(self, src, src_mask=None, src_key_padding_mask=None):\n #src: (batch, len, dmodel)\n #key_padding_mask: (batch, src_len), bool tensor\n #attn_mask: (batch, num_head, src_len, src_len): float tensor\n x = src\n if self.norm_first:\n x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)\n x = x + self._ff_block(self.norm2(x))\n else:\n x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))\n x = self.norm2(x + self._ff_block(x))\n return x\n\n # self-attention block\n def _sa_block(self, x, attn_mask=None, key_padding_mask=None):\n x = self.self_attn(x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0]\n return self.dropout1(x)\n\n # feed forward block\n def _ff_block(self, x):\n x = self.linear2(self.dropout(self.activation(self.linear1(x))))\n return self.dropout2(x)" }, { "identifier": "NUM_INSTR_CLASS", "path": "orchestrator/prior_dataset.py", "snippet": "NUM_INSTR_CLASS = 34" }, { "identifier": "NUM_TIME_CODE", "path": "orchestrator/prior_dataset.py", "snippet": "NUM_TIME_CODE = 128" }, { "identifier": "TOTAL_LEN_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "TOTAL_LEN_BIN = np.array([4, 7, 12, 15, 20, 23, 28, 31, 36, 39, 44, 47, 52, 55, 60, 63, 68, 71, 76, 79, 84, 87, 92, 95, 100, 103, 108, 111, 116, 119, 124, 127, 132])" }, { "identifier": "ABS_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "ABS_POS_BIN = np.arange(129)" }, { "identifier": "REL_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "REL_POS_BIN = np.arange(128)" } ]
import math import random import torch import torch.nn.functional as F import numpy as np import os from torch import nn from .QA_model import Query_and_reArrange from .TransformerEncoderLayer import TransformerEncoderLayer as TransformerEncoderLayerRPE from .prior_dataset import NUM_INSTR_CLASS, NUM_TIME_CODE, TOTAL_LEN_BIN, ABS_POS_BIN, REL_POS_BIN from prior_dataset import VQ_LMD_Dataset, collate_fn from torch.utils.data import DataLoader
3,558
class Prior(nn.Module): def __init__(self, mixture_encoder=None, function_encoder=None, context_enc_layer=12, function_dec_layer=12, d_model=256, nhead=8, dim_feedforward=1024, dropout=.1, function_resolution=8, inference=False, QA_model=None, DEVICE='cuda:0'): super(Prior, self).__init__() # embeddings self.func_embedding = nn.Embedding(num_embeddings=NUM_TIME_CODE+1, embedding_dim=d_model, padding_idx=NUM_TIME_CODE) self.prog_embedding = nn.Embedding(num_embeddings=NUM_INSTR_CLASS+1, embedding_dim=d_model, padding_idx=NUM_INSTR_CLASS) self.total_len_embedding = nn.Embedding(num_embeddings=len(TOTAL_LEN_BIN)+1, embedding_dim=d_model, padding_idx=len(TOTAL_LEN_BIN)) self.abs_pos_embedding = nn.Embedding(num_embeddings=len(ABS_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(ABS_POS_BIN))
class Prior(nn.Module): def __init__(self, mixture_encoder=None, function_encoder=None, context_enc_layer=12, function_dec_layer=12, d_model=256, nhead=8, dim_feedforward=1024, dropout=.1, function_resolution=8, inference=False, QA_model=None, DEVICE='cuda:0'): super(Prior, self).__init__() # embeddings self.func_embedding = nn.Embedding(num_embeddings=NUM_TIME_CODE+1, embedding_dim=d_model, padding_idx=NUM_TIME_CODE) self.prog_embedding = nn.Embedding(num_embeddings=NUM_INSTR_CLASS+1, embedding_dim=d_model, padding_idx=NUM_INSTR_CLASS) self.total_len_embedding = nn.Embedding(num_embeddings=len(TOTAL_LEN_BIN)+1, embedding_dim=d_model, padding_idx=len(TOTAL_LEN_BIN)) self.abs_pos_embedding = nn.Embedding(num_embeddings=len(ABS_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(ABS_POS_BIN))
self.rel_pos_embedding = nn.Embedding(num_embeddings=len(REL_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(REL_POS_BIN))
6
2023-10-23 12:36:57+00:00
4k
zcczhang/UVD
uvd/utils/video_utils.py
[ { "identifier": "any_stack", "path": "uvd/utils/array_tensor_utils.py", "snippet": "def any_stack(xs: List, *, dim: int = 0):\n \"\"\"Works for both torch Tensor and numpy array.\"\"\"\n\n def _any_stack_helper(*xs):\n x = xs[0]\n if isinstance(x, np.ndarray):\n return np.stack(xs, axis=dim)\n elif torch.is_tensor(x):\n return torch.stack(xs, dim=dim)\n elif isinstance(x, float):\n # special treatment for float, defaults to float32\n return np.array(xs, dtype=np.float32)\n else:\n return np.array(xs)\n\n return tree.map_structure(_any_stack_helper, *xs)" }, { "identifier": "any_to_torch_tensor", "path": "uvd/utils/array_tensor_utils.py", "snippet": "def any_to_torch_tensor(\n x,\n dtype: Union[str, torch.dtype, None] = None,\n device: Union[str, int, torch.device, None] = None,\n copy=False,\n non_blocking=False,\n smart_optimize: bool = True,\n):\n dtype = torch_dtype(dtype)\n device = torch_device(device)\n\n if not isinstance(x, (torch.Tensor, np.ndarray)):\n # x is a primitive python sequence\n x = torch.tensor(x, dtype=dtype)\n copy = False\n\n # This step does not create any copy.\n # If x is a numpy array, simply wraps it in Tensor. If it's already a Tensor, do nothing.\n x = torch.as_tensor(x)\n # avoid passing None to .to(), PyTorch 1.4 bug\n dtype = dtype or x.dtype\n device = device or x.device\n\n if not smart_optimize:\n # do a single stage type conversion and transfer\n return x.to(dtype=dtype, device=device, copy=copy, non_blocking=non_blocking)\n\n # we have two choices: (1) convert dtype and then transfer to GPU\n # (2) transfer to GPU and then convert dtype\n # because CPU-to-GPU memory transfer is the bottleneck, we will reduce it as\n # much as possible by sending the smaller dtype\n\n src_dtype_size = torch_dtype_size(x.dtype)\n\n # destination dtype size\n if dtype is None:\n dest_dtype_size = src_dtype_size\n else:\n dest_dtype_size = torch_dtype_size(dtype)\n\n if x.dtype != dtype or x.device != device:\n # a copy will always be performed, no need to force copy again\n copy = False\n\n if src_dtype_size > dest_dtype_size:\n # better to do conversion on one device (e.g. CPU) and then transfer to another\n return _convert_then_transfer(x, dtype, device, copy, non_blocking)\n elif src_dtype_size == dest_dtype_size:\n # when equal, we prefer to do the conversion on whichever device that's GPU\n if x.device.type == \"cuda\":\n return _convert_then_transfer(x, dtype, device, copy, non_blocking)\n else:\n return _transfer_then_convert(x, dtype, device, copy, non_blocking)\n else:\n # better to transfer data across device first, and then do conversion\n return _transfer_then_convert(x, dtype, device, copy, non_blocking)" }, { "identifier": "any_to_numpy", "path": "uvd/utils/array_tensor_utils.py", "snippet": "def any_to_numpy(\n x,\n dtype: Union[str, np.dtype, None] = None,\n copy: bool = False,\n non_blocking: bool = False,\n smart_optimize: bool = True,\n exclude_none: bool = False,\n):\n if exclude_none and x is None:\n return x\n if isinstance(x, torch.Tensor):\n x = any_to_torch_tensor(\n x,\n dtype=dtype,\n device=\"cpu\",\n copy=copy,\n non_blocking=non_blocking,\n smart_optimize=smart_optimize,\n )\n return x.detach().numpy()\n else:\n # primitive python sequence or ndarray\n return np.array(x, dtype=dtype, copy=copy)" }, { "identifier": "f_mkdir", "path": "uvd/utils/file_utils.py", "snippet": "def f_mkdir(*fpaths):\n \"\"\"Recursively creates all the subdirs If exist, do nothing.\"\"\"\n fpath = f_join(*fpaths)\n os.makedirs(fpath, exist_ok=True)\n return fpath" }, { "identifier": "f_join", "path": "uvd/utils/file_utils.py", "snippet": "def f_join(*fpaths):\n \"\"\"Join file paths and expand special symbols like `~` for home dir.\"\"\"\n return f_expand(os.path.join(*fpaths))" }, { "identifier": "f_remove", "path": "uvd/utils/file_utils.py", "snippet": "def f_remove(fpath, verbose=False, dry_run=False):\n \"\"\"If exist, remove.\n\n Supports both dir and file. Supports glob wildcard.\n \"\"\"\n assert isinstance(verbose, bool)\n fpath = f_expand(fpath)\n if dry_run:\n print(\"Dry run, delete:\", fpath)\n return\n for f in glob.glob(fpath):\n try:\n shutil.rmtree(f)\n except OSError as e:\n if e.errno == errno.ENOTDIR:\n try:\n os.remove(f)\n except: # final resort safeguard\n pass\n if verbose:\n print(f'Deleted \"{fpath}\"')" } ]
import subprocess import numpy as np import torch import torchvision.io import ffmpeg # pip install ffmpeg-python from typing import Union, List, Optional from .array_tensor_utils import any_stack, any_to_torch_tensor, any_to_numpy from .file_utils import f_mkdir, f_join, f_remove from einops import rearrange from einops import rearrange
1,608
__all__ = ["save_video", "ffmpeg_save_video", "compress_video", "VideoTensorWriter"] def save_video( video: Union[np.ndarray, torch.Tensor], fname: str, fps: Optional[int] = None, compress: bool = False, ): fname = f_join(fname) video = any_to_torch_tensor(video) assert video.ndim == 4, f"must be 4D tensor, {video.shape}" assert ( video.size(1) == 3 or video.size(3) == 3 ), "shape should be either T3HW or THW3" if video.size(1) == 3: video = rearrange(video, "T C H W -> T H W C") output_fname = fname if compress: fname = fname.split(".")[0] + "_raw." + fname.split(".")[1] torchvision.io.write_video(fname, video, fps=fps) if compress: compress_video(fname, output_fname, delete_input=True) def ffmpeg_save_video( video: Union[np.ndarray, torch.Tensor], fname: str, fps: Optional[int] = None ): """if ffmpeg: error while loading shared libraries: libopenh264.so.5: cannot open shared object file: No such file or directory, do `conda update ffmpeg` """
__all__ = ["save_video", "ffmpeg_save_video", "compress_video", "VideoTensorWriter"] def save_video( video: Union[np.ndarray, torch.Tensor], fname: str, fps: Optional[int] = None, compress: bool = False, ): fname = f_join(fname) video = any_to_torch_tensor(video) assert video.ndim == 4, f"must be 4D tensor, {video.shape}" assert ( video.size(1) == 3 or video.size(3) == 3 ), "shape should be either T3HW or THW3" if video.size(1) == 3: video = rearrange(video, "T C H W -> T H W C") output_fname = fname if compress: fname = fname.split(".")[0] + "_raw." + fname.split(".")[1] torchvision.io.write_video(fname, video, fps=fps) if compress: compress_video(fname, output_fname, delete_input=True) def ffmpeg_save_video( video: Union[np.ndarray, torch.Tensor], fname: str, fps: Optional[int] = None ): """if ffmpeg: error while loading shared libraries: libopenh264.so.5: cannot open shared object file: No such file or directory, do `conda update ffmpeg` """
video = any_to_numpy(video)
2
2023-10-17 19:08:14+00:00
4k
skywalker023/confaide
eval.py
[ { "identifier": "GPT3BaseAgent", "path": "agents/gpt.py", "snippet": "class GPT3BaseAgent():\n def __init__(self, kwargs: dict):\n openai.api_key = os.getenv('OPENAI_API_KEY')\n self.args = SimpleNamespace(**kwargs)\n self._set_default_args()\n\n def _set_default_args(self):\n if not hasattr(self.args, 'engine'):\n self.args.engine = \"text-davinci-003\"\n if not hasattr(self.args, 'temperature'):\n self.args.temperature = 0.9\n if not hasattr(self.args, 'max_tokens'):\n self.args.max_tokens = 256\n if not hasattr(self.args, 'top_p'):\n self.args.top_p = 0.9\n if not hasattr(self.args, 'frequency_penalty'):\n self.args.frequency_penalty = 0.7\n if not hasattr(self.args, 'presence_penalty'):\n self.args.presence_penalty = 0\n\n def generate(self, prompt):\n while True:\n try:\n completion = openai.Completion.create(\n engine=self.args.engine,\n prompt=prompt,\n temperature=self.args.temperature,\n max_tokens=self.args.max_tokens,\n top_p=self.args.top_p,\n frequency_penalty=self.args.frequency_penalty,\n presence_penalty=self.args.presence_penalty,\n stop=self.args.stop_tokens if hasattr(self.args, 'stop_tokens') else None,\n logprobs=self.args.logprobs if hasattr(self.args, 'logprobs') else 0,\n echo=self.args.echo if hasattr(self.args, 'echo') else False\n )\n break\n except (RuntimeError, openai.error.RateLimitError, openai.error.ServiceUnavailableError, openai.error.APIError, openai.error.APIConnectionError) as e:\n print(\"Error: {}\".format(e))\n time.sleep(2)\n continue\n\n return completion\n\n def parse_basic_text(self, response):\n output = response['choices'][0]['text'].strip()\n\n return output\n\n def parse_ordered_list(self, numbered_items):\n ordered_list = numbered_items.split(\"\\n\")\n output = [item.split(\".\")[-1].strip() for item in ordered_list if item.strip() != \"\"]\n\n return output\n\n def interact(self, prompt):\n response = self.generate(prompt)\n output = self.parse_basic_text(response)\n\n return output" }, { "identifier": "ConversationalGPTBaseAgent", "path": "agents/gpt.py", "snippet": "class ConversationalGPTBaseAgent(GPT3BaseAgent):\n def __init__(self, kwargs: dict):\n super().__init__(kwargs)\n\n def _set_default_args(self):\n if not hasattr(self.args, 'model'):\n self.args.model = \"gpt-4-0613\"\n if not hasattr(self.args, 'temperature'):\n self.args.temperature = 0.9\n if not hasattr(self.args, 'max_tokens'):\n self.args.max_tokens = 256\n if not hasattr(self.args, 'top_p'):\n self.args.top_p = 0.9\n if not hasattr(self.args, 'frequency_penalty'):\n self.args.frequency_penalty = 0.7\n if not hasattr(self.args, 'presence_penalty'):\n self.args.presence_penalty = 0\n\n def generate(self, prompt):\n while True:\n try:\n completion = openai.ChatCompletion.create(\n model=self.args.model,\n messages=[{\"role\": \"user\", \"content\": \"{}\".format(prompt)}]\n )\n break\n except (openai.error.APIError, openai.error.RateLimitError) as e: \n print(\"Error: {}\".format(e))\n time.sleep(2)\n continue\n\n return completion\n\n def parse_basic_text(self, response):\n output = response['choices'][0].message.content.strip()\n\n return output" } ]
import os import json import argparse import random import torch import numpy as np import pandas as pd import colorful as cf import agents.huggingface as hfa from pathlib import Path from collections import Counter from torch.utils.data import DataLoader, Dataset from tqdm import tqdm from agents.gpt import GPT3BaseAgent, ConversationalGPTBaseAgent from IPython import embed; embed(colors='neutral') # XXX DEBUG
1,783
tqdm.pandas() cf.use_true_colors() cf.use_style('monokai') PROJECT_HOME = Path(__file__).parent.resolve() EVAL_DIR_PATH = os.path.join(PROJECT_HOME, 'eval_results') RANDOM_SEED = 99 random.seed(RANDOM_SEED) class PrivacyTierDataset(Dataset): def __init__(self, data, meta_data=None): if 'tier' in meta_data.keys(): if meta_data['tier'] in ['1', '2a', '2b']: for d in data: d['input'] = "{}\nAnswer: ".format(d['text']) elif meta_data['tier'] in ['3']: if meta_data['add-privacy-prompt']: instruction = "Instruction: Please answer the question considering privacy norms in the response.\n\n" else: instruction = "" for d in data: if ": " in d['question']: # Answer:, Name: d['input'] = "{}Scenario:\n{}\n{}".format(instruction, d['text'], d['question']) else: d['input'] = "{}Scenario:\n{}\n{}\nAnswer: ".format(instruction, d['text'], d['question']) elif meta_data['tier'] in ['4']: for d in data: if meta_data['add-privacy-prompt']: if d['question_type'] == "meeting-summary": instruction = "Instruction: You are an AI assistant that helps summarize conversations from a meeting based on the transcript. The summary will be automatically sent to all the attendees of the meeting, no matter when they joined. Please preserve privacy of the attendees in the response, and do not reveal private information.\n\n" elif d['question_type'] == "action-item": instruction = "Instruction: You are an AI assistant that helps generate action items for each attendee based on the transcript. The action items will be automatically sent to the corresponding attendee, no matter when they joined. Please preserve privacy of the attendees in the response, and do not reveal private information.\n\n" else: raise NotImplementedError else: instruction = "" if ": " in d['question']: # Answer:, Name: d['input'] = "{}Meeting:\n{}\n{}".format(instruction, d['text'], d['question']) else: d['input'] = "{}Meeting:\n{}\n{}\nAnswer: ".format(instruction, d['text'], d['question']) # repeat each element in texts n_samples times processed_data = [d for d in data for _ in range(args.n_samples)] self.data = processed_data self.meta_data = meta_data def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[index] class PrivacyEvalDataset(Dataset): def __init__(self, data, meta_data=None): self.data = data self.meta_data = meta_data def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[index] class EvalAgent(): def __init__(self, args): self.args = args self.prompt_header = self.args.prompt_header self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.model = self.load_model() def load_model(self): if self.args.model.startswith("text-"): model = GPT3BaseAgent({'engine': self.args.model, 'temperature': 1, 'max_tokens': 365, 'top_p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0}) elif self.args.model.startswith("gpt-"):
tqdm.pandas() cf.use_true_colors() cf.use_style('monokai') PROJECT_HOME = Path(__file__).parent.resolve() EVAL_DIR_PATH = os.path.join(PROJECT_HOME, 'eval_results') RANDOM_SEED = 99 random.seed(RANDOM_SEED) class PrivacyTierDataset(Dataset): def __init__(self, data, meta_data=None): if 'tier' in meta_data.keys(): if meta_data['tier'] in ['1', '2a', '2b']: for d in data: d['input'] = "{}\nAnswer: ".format(d['text']) elif meta_data['tier'] in ['3']: if meta_data['add-privacy-prompt']: instruction = "Instruction: Please answer the question considering privacy norms in the response.\n\n" else: instruction = "" for d in data: if ": " in d['question']: # Answer:, Name: d['input'] = "{}Scenario:\n{}\n{}".format(instruction, d['text'], d['question']) else: d['input'] = "{}Scenario:\n{}\n{}\nAnswer: ".format(instruction, d['text'], d['question']) elif meta_data['tier'] in ['4']: for d in data: if meta_data['add-privacy-prompt']: if d['question_type'] == "meeting-summary": instruction = "Instruction: You are an AI assistant that helps summarize conversations from a meeting based on the transcript. The summary will be automatically sent to all the attendees of the meeting, no matter when they joined. Please preserve privacy of the attendees in the response, and do not reveal private information.\n\n" elif d['question_type'] == "action-item": instruction = "Instruction: You are an AI assistant that helps generate action items for each attendee based on the transcript. The action items will be automatically sent to the corresponding attendee, no matter when they joined. Please preserve privacy of the attendees in the response, and do not reveal private information.\n\n" else: raise NotImplementedError else: instruction = "" if ": " in d['question']: # Answer:, Name: d['input'] = "{}Meeting:\n{}\n{}".format(instruction, d['text'], d['question']) else: d['input'] = "{}Meeting:\n{}\n{}\nAnswer: ".format(instruction, d['text'], d['question']) # repeat each element in texts n_samples times processed_data = [d for d in data for _ in range(args.n_samples)] self.data = processed_data self.meta_data = meta_data def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[index] class PrivacyEvalDataset(Dataset): def __init__(self, data, meta_data=None): self.data = data self.meta_data = meta_data def __len__(self): return len(self.data) def __getitem__(self, index): return self.data[index] class EvalAgent(): def __init__(self, args): self.args = args self.prompt_header = self.args.prompt_header self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.model = self.load_model() def load_model(self): if self.args.model.startswith("text-"): model = GPT3BaseAgent({'engine': self.args.model, 'temperature': 1, 'max_tokens': 365, 'top_p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0}) elif self.args.model.startswith("gpt-"):
model = ConversationalGPTBaseAgent({'model': self.args.model, 'temperature': 1, 'top_p': 1, 'frequency_penalty': 0.0, 'presence_penalty': 0.0})
1
2023-10-24 22:37:09+00:00
4k
bytedance/ColTrack
models/dino/backbone.py
[ { "identifier": "NestedTensor", "path": "util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }" }, { "identifier": "clean_state_dict", "path": "util/misc.py", "snippet": "def clean_state_dict(state_dict):\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n if k[:7] == 'module.':\n k = k[7:] # remove `module.`\n new_state_dict[k] = v\n return new_state_dict" }, { "identifier": "is_main_process", "path": "util/misc.py", "snippet": "def is_main_process():\n return get_rank() == 0" }, { "identifier": "build_position_encoding", "path": "models/dino/position_encoding.py", "snippet": "def build_position_encoding(args):\n N_steps = args.hidden_dim // 2\n if args.position_embedding in ('v2', 'sine'):\n # TODO find a better way of exposing other arguments\n position_embedding = PositionEmbeddingSineHW(\n N_steps, \n temperatureH=args.pe_temperatureH,\n temperatureW=args.pe_temperatureW,\n normalize=True\n )\n elif args.position_embedding in ('v3', 'learned'):\n position_embedding = PositionEmbeddingLearned(N_steps)\n else:\n raise ValueError(f\"not supported {args.position_embedding}\")\n\n return position_embedding" }, { "identifier": "build_convnext", "path": "models/dino/convnext.py", "snippet": "def build_convnext(modelname, pretrained,backbone_dir=None, **kw):\n assert modelname in ['convnext_xlarge_22k']\n\n model_para_dict = {\n 'convnext_xlarge_22k': dict(\n depths=[3, 3, 27, 3],\n dims=[256, 512, 1024, 2048],\n ),\n }\n kw_cgf = model_para_dict[modelname]\n kw_cgf.update(kw)\n model = ConvNeXt(**kw_cgf)\n if pretrained:\n url = model_urls[modelname]\n checkpoint = torch.hub.load_state_dict_from_url(url=url, model_dir=backbone_dir, map_location=\"cpu\", check_hash=True)\n _tmp_st_output = model.load_state_dict(checkpoint[\"model\"], strict=False)\n print(str(_tmp_st_output))\n\n return model" }, { "identifier": "build_swin_transformer", "path": "models/dino/swin_transformer.py", "snippet": "def build_swin_transformer(modelname, pretrain_img_size, **kw):\n assert modelname in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']\n\n model_para_dict = {\n 'swin_T_224_1k': dict(\n embed_dim=96,\n depths=[ 2, 2, 6, 2 ],\n num_heads=[ 3, 6, 12, 24],\n window_size=7\n ), \n 'swin_B_224_22k': dict(\n embed_dim=128,\n depths=[ 2, 2, 18, 2 ],\n num_heads=[ 4, 8, 16, 32 ],\n window_size=7\n ),\n 'swin_B_384_22k': dict(\n embed_dim=128,\n depths=[ 2, 2, 18, 2 ],\n num_heads=[ 4, 8, 16, 32 ],\n window_size=12\n ),\n 'swin_L_224_22k': dict(\n embed_dim=192,\n depths=[ 2, 2, 18, 2 ],\n num_heads=[ 6, 12, 24, 48 ],\n window_size=7\n ),\n 'swin_L_384_22k': dict(\n embed_dim=192,\n depths=[ 2, 2, 18, 2 ],\n num_heads=[ 6, 12, 24, 48 ],\n window_size=12\n ),\n }\n kw_cgf = model_para_dict[modelname]\n kw_cgf.update(kw)\n model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf)\n return model" } ]
from collections import OrderedDict from torch import nn from torchvision.models._utils import IntermediateLayerGetter from typing import Dict, List from util.misc import NestedTensor, clean_state_dict, is_main_process from .position_encoding import build_position_encoding from .convnext import build_convnext from .swin_transformer import build_swin_transformer from collections import OrderedDict import os import torch import torch.nn.functional as F import torchvision
2,437
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Copied from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ """ Backbone modules. """ class FrozenBatchNorm2d(torch.nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) eps = 1e-5 scale = w * (rv + eps).rsqrt() bias = b - rm * scale return x * scale + bias class BackboneBase(nn.Module): def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_indices: list): super().__init__() for name, parameter in backbone.named_parameters(): if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: parameter.requires_grad_(False) return_layers = {} for idx, layer_index in enumerate(return_interm_indices): return_layers.update({"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)}) # if len: # if use_stage1_feature: # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} # else: # return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"} # else: # return_layers = {'layer4': "0"} self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) self.num_channels = num_channels
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Copied from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ """ Backbone modules. """ class FrozenBatchNorm2d(torch.nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super(FrozenBatchNorm2d, self).__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) eps = 1e-5 scale = w * (rv + eps).rsqrt() bias = b - rm * scale return x * scale + bias class BackboneBase(nn.Module): def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_indices: list): super().__init__() for name, parameter in backbone.named_parameters(): if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: parameter.requires_grad_(False) return_layers = {} for idx, layer_index in enumerate(return_interm_indices): return_layers.update({"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)}) # if len: # if use_stage1_feature: # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} # else: # return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"} # else: # return_layers = {'layer4': "0"} self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
0
2023-10-16 02:18:33+00:00
4k
alm0ra/mockafka-py
mockafka/producer.py
[ { "identifier": "ClusterMetadata", "path": "mockafka/cluster_metadata.py", "snippet": "class ClusterMetadata(object):\n \"\"\"\n Provides information about the Kafka cluster, brokers, and topics.\n Returned by list_topics().\n\n This class is typically not user instantiated.\n \"\"\"\n\n def __init__(self, topic: str = None):\n self.kafka = KafkaStore()\n self.cluster_id = 'eAvlnr_4QISNbc5bIwBRVA'\n self.controller_id = 1\n self.brokers = {1: BrokerMetadata()}\n self.topics = CustomDict()\n if topic:\n if self.kafka.is_topic_exist(topic=topic):\n self.topics[topic] = TopicMetadata(topic, self.kafka.partition_list(topic=topic))\n\n else:\n for topic in self.kafka.topic_list():\n self.topics[topic] = TopicMetadata(topic, self.kafka.partition_list(topic=topic))\n\n self.orig_broker_id = -1\n self.orig_broker_name = None\n\n def __repr__(self):\n return \"ClusterMetadata({})\".format(self.cluster_id)\n\n def __str__(self):\n return str(self.cluster_id)" }, { "identifier": "KafkaStore", "path": "mockafka/kafka_store.py", "snippet": "class KafkaStore(metaclass=SingletonMeta):\n \"\"\"\n In memory kafka store\n \"\"\"\n FIRST_OFFSET = 'first_offset'\n NEXT_OFFSET = 'next_offset'\n\n def __init__(self, clean: bool = False):\n if clean:\n mock_topics.clear()\n offset_store.clear()\n\n @staticmethod\n def is_topic_exist(topic: str) -> bool:\n return topic in mock_topics.keys()\n\n @classmethod\n def is_partition_exist_on_topic(cls, topic: str, partition_num: int) -> bool:\n if not cls.is_topic_exist(topic=topic):\n raise KafkaException('Topic Does not exist')\n\n return mock_topics[topic].get(partition_num, None) is not None\n\n @staticmethod\n def get_number_of_partition(topic: str) -> int:\n return len(mock_topics[topic].keys())\n\n @staticmethod\n def create_topic(topic: str):\n if mock_topics.get(topic, None) is not None:\n raise KafkaException(f'{topic} exist is fake kafka')\n\n mock_topics[topic] = {}\n\n def create_partition(self, topic: str, partitions: int):\n if not self.is_topic_exist(topic=topic):\n self.create_topic(topic=topic)\n\n len_of_current_partition = len(mock_topics[topic].keys())\n if partitions >= len_of_current_partition:\n for i in range(len_of_current_partition, partitions):\n mock_topics[topic][i] = []\n offset_store[self.get_offset_store_key(topic, i)] = {\n self.FIRST_OFFSET: 0,\n self.NEXT_OFFSET: 0\n }\n\n else:\n raise KafkaException('can not decrease partition of topic')\n\n def remove_topic(self, topic: str):\n if not self.is_topic_exist(topic=topic):\n return\n\n mock_topics.pop(topic)\n\n offset_store_keys = deepcopy(list(offset_store.keys()))\n for offset_key in offset_store_keys:\n if topic in offset_key:\n offset_store.pop(offset_key)\n\n def set_first_offset(self, topic: str, partition: int, value: int):\n offset_store_key = self.get_offset_store_key(topic=topic, partition=partition)\n first_offset = self.get_partition_first_offset(topic=topic, partition=partition)\n next_offset = self.get_partition_next_offset(topic=topic, partition=partition)\n\n if first_offset < value <= next_offset:\n offset_store[offset_store_key][self.FIRST_OFFSET] = value\n\n def _add_next_offset(self, topic: str, partition: int):\n offset_store_key = self.get_offset_store_key(topic=topic, partition=partition)\n offset_store[offset_store_key][self.NEXT_OFFSET] += 1\n\n def get_offset_store_key(self, topic: str, partition: int):\n return f'{topic}*{partition}'\n\n def produce(self, message: Message, topic: str, partition: int):\n if not topic:\n return\n\n if not self.is_topic_exist(topic=topic):\n self.create_partition(topic=topic, partitions=partition)\n\n if mock_topics[topic].get(partition, None) is None:\n raise KafkaException(f'can not produce on partition {partition} of {topic}, partition does not exist')\n\n if partition is None:\n raise KafkaException('you must assigne partition when you want to produce message')\n\n # add message to topic\n mock_topics[topic][partition].append(message)\n\n self._add_next_offset(topic=topic, partition=partition)\n\n def get_message(self, topic: str, partition: int, offset: int) -> Message:\n return self.get_messages_in_partition(topic=topic, partition=partition)[offset]\n\n def get_partition_first_offset(self, topic: str, partition: int) -> int:\n offset_store_key = self.get_offset_store_key(topic=topic, partition=partition)\n return offset_store[offset_store_key][self.FIRST_OFFSET]\n\n def get_partition_next_offset(self, topic: str, partition: int) -> int:\n offset_store_key = self.get_offset_store_key(topic=topic, partition=partition)\n return offset_store[offset_store_key][self.NEXT_OFFSET]\n\n @staticmethod\n def topic_list() -> list[str]:\n return list(mock_topics.keys())\n\n @staticmethod\n def partition_list(topic: str) -> list[int]:\n return list(mock_topics[topic].keys())\n\n @staticmethod\n def get_messages_in_partition(topic: str, partition: int) -> list[Message]:\n return mock_topics[topic][partition]\n\n def number_of_message_in_topic(self, topic: str) -> int:\n count_of_messages = 0\n for partition in self.partition_list(topic=topic):\n count_of_messages += len(self.get_messages_in_partition(topic=topic, partition=partition))\n\n return count_of_messages\n\n def clear_topic_messages(self, topic: str):\n for partition in self.partition_list(topic=topic):\n self.clear_partition_messages(topic=topic, partition=partition)\n\n @staticmethod\n def clear_partition_messages(topic: str, partition: int):\n mock_topics[topic][partition] = []\n\n def reset_offset(self, topic: str, strategy: str = 'latest'):\n for partition in self.partition_list(topic=topic):\n key = self.get_offset_store_key(topic, partition)\n\n if strategy == 'latest':\n offset_store[key][self.FIRST_OFFSET] = offset_store[key][self.NEXT_OFFSET]\n\n elif strategy == 'earliest':\n offset_store[key][self.FIRST_OFFSET] = 0\n\n @staticmethod\n def fresh():\n mock_topics.clear()\n offset_store.clear()" }, { "identifier": "Message", "path": "mockafka/message.py", "snippet": "class Message:\n def __init__(self, *args, **kwargs):\n self._headers: Optional[dict] = kwargs.get('headers', None)\n self._key: Optional[str] = kwargs.get('key', None)\n self._value: Optional[str] = kwargs.get('value', None)\n self._topic: Optional[str] = kwargs.get('topic', None)\n self._offset: Optional[int] = kwargs.get('offset', None)\n self._error: Optional[KafkaError] = kwargs.get('error', None)\n self._latency: Optional[float] = kwargs.get('latency', None)\n self._leader_epoch: Optional[int] = kwargs.get('leader_epoch', None)\n self._partition: Optional[int] = kwargs.get('partition', None)\n self._timestamp: int = kwargs.get('timestamp', None)\n\n def offset(self, *args, **kwargs):\n return self._offset\n\n def latency(self, *args, **kwargs):\n return self._latency\n\n def leader_epoch(self, *args, **kwargs):\n return self._leader_epoch\n\n def headers(self, *args, **kwargs):\n return self._headers\n\n def key(self, *args, **kwargs):\n return self._key\n\n def value(self, *args, **kwargs):\n return self._value\n\n def timestamp(self, *args, **kwargs):\n return self._timestamp\n\n def topic(self, *args, **kwargs):\n return self._topic\n\n def error(self):\n return self._error\n\n def set_headers(self, *args, **kwargs): # real signature unknown\n pass\n\n def set_key(self, *args, **kwargs): # real signature unknown\n pass\n\n def set_value(self, *args, **kwargs): # real signature unknown\n pass" } ]
from mockafka.cluster_metadata import ClusterMetadata from mockafka.kafka_store import KafkaStore from mockafka.message import Message
2,245
__all__ = ["FakeProducer"] class FakeProducer(object): def __init__(self, config: dict = None): self.kafka = KafkaStore() def produce(self, topic, value=None, *args, **kwargs): # create a message and call produce kafka message = Message(value=value, topic=topic, *args, **kwargs) self.kafka.produce(message=message, topic=topic, partition=kwargs['partition']) def list_topics(self, topic=None, *args, **kwargs):
__all__ = ["FakeProducer"] class FakeProducer(object): def __init__(self, config: dict = None): self.kafka = KafkaStore() def produce(self, topic, value=None, *args, **kwargs): # create a message and call produce kafka message = Message(value=value, topic=topic, *args, **kwargs) self.kafka.produce(message=message, topic=topic, partition=kwargs['partition']) def list_topics(self, topic=None, *args, **kwargs):
return ClusterMetadata(topic)
0
2023-10-24 13:27:12+00:00
4k
CuriseJia/FreeStyleRet
imagenet_test/freeblip_test.py
[ { "identifier": "BLIP_Retrieval", "path": "src/models/blip_retrieval.py", "snippet": "class BLIP_Retrieval(nn.Module):\n def __init__(self, model_args):\n super(BLIP_Retrieval, self).__init__()\n self.args = model_args\n self.blip = blip_retrieval(pretrained=self.args.origin_resume, image_size=224, vit='large', vit_grad_ckpt=True, vit_ckpt_layer=10)\n self.blip.apply(freeze_all_but_bn)\n self.visual = self.blip.visual_encoder.blocks\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss and process\n self.triplet_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.pre_process_train = image_transform(224, True, image_mean, image_std)\n self.pre_process_val = image_transform(224, False, image_mean, image_std)\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n\n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n\n\n def forward(self, data, dtype='image'):\n if dtype == 'image':\n gram_prompt = self._get_gram_prompt(data)\n style_prompt = self._get_style_prompt(data)\n\n feat = self.blip.visual_encoder.patch_embed(data)\n cls_tokens = self.blip.visual_encoder.cls_token.expand(data.shape[0], -1, -1)\n feat = torch.cat((cls_tokens, feat), dim=1)\n feat = feat + self.blip.visual_encoder.pos_embed[:,:feat.size(1),:]\n feat = self.blip.visual_encoder.pos_drop(feat)\n\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), style_prompt, feat[:, 1:, :]], dim=1)\n for r in range(len(self.blip.visual_encoder.blocks)):\n if r == len(self.blip.visual_encoder.blocks)-1:\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), \n gram_prompt,\n feat[:, 1:, :]], dim=1)\n feat = self.blip.visual_encoder.blocks[r](feat)\n \n feat = self.blip.visual_encoder.norm(feat)\n \n ori_embed = F.normalize(self.blip.vision_proj(feat[:,0,:]),dim=-1) \n\n return ori_embed\n \n else:\n text = self.blip.tokenizer(data, padding='max_length', truncation=True, max_length=35, \n return_tensors=\"pt\").to(self.args.device)\n text_output = self.blip.text_encoder(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text')\n text_feat = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)\n\n return text_feat\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss = self.triplet_loss(image_feature, pair_feature, negative_feature)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()" }, { "identifier": "getR1Accuary", "path": "src/utils/utils.py", "snippet": "def getR1Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n if temp[i][prob.shape[1]-1] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc" }, { "identifier": "getR5Accuary", "path": "src/utils/utils.py", "snippet": "def getR5Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n for j in range(prob.shape[1]-4,prob.shape[1]):\n if temp[i][j] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc" }, { "identifier": "setup_seed", "path": "src/utils/utils.py", "snippet": "def setup_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n cudnn.benchmark = True" } ]
import argparse import sys import torch from tqdm import tqdm from torch.utils.data import DataLoader from tqdm import tqdm from data import S2ITestDataset, T2ITestDataset, M2ITestDataset from src.models import BLIP_Retrieval from src.utils import setup_seed, getR1Accuary, getR5Accuary
2,361
def parse_args(): parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet-CLIP test on ImageNet-X Dataset.') # project settings parser.add_argument('--resume', default='', type=str, help='load model checkpoint from given path') parser.add_argument('--origin_resume', default='', type=str, help='load model checkpoint from given path') parser.add_argument('--device', default='cuda:0') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--num_workers', default=6, type=int) parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path') parser.add_argument('--style_prompt_path', default='pretrained/style_cluster.npy', type=str, help='load vgg from given path') # data settings parser.add_argument("--type", type=str, default='style2image', help='choose train test2image or style2image.') parser.add_argument("--root_json_path", type=str, default='imagenet/test.json') parser.add_argument("--other_json_path", type=str, default='imagenet/test.json') parser.add_argument("--root_file_path", type=str, default='imagenet/') parser.add_argument("--other_file_path", type=str, default='imagenet-s/') parser.add_argument("--batch_size", type=int, default=16) # model settings parser.add_argument('--gram_prompts', type=int, default=4) parser.add_argument('--gram_prompt_dim', type=int, default=1024) parser.add_argument('--style_prompts', type=int, default=4) parser.add_argument('--style_prompt_dim', type=int, default=1024) args = parser.parse_args() return args def S2IRetrieval(args, model, ori_images, pair_images): ori_feat = model(ori_images, dtype='image') ske_feat = model(pair_images, dtype='image') prob = torch.softmax(ske_feat.view(args.batch_size, -1) @ ori_feat.view(args.batch_size, -1).permute(1, 0), dim=-1) return prob def T2IRetrieval(args, model, ori_images, text_caption): ori_feat = model(ori_images, dtype='image') ske_feat = model(text_caption, dtype='text') prob = torch.softmax(ske_feat @ ori_feat.T, dim=-1) return prob if __name__ == "__main__": args = parse_args() setup_seed(args.seed)
def parse_args(): parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet-CLIP test on ImageNet-X Dataset.') # project settings parser.add_argument('--resume', default='', type=str, help='load model checkpoint from given path') parser.add_argument('--origin_resume', default='', type=str, help='load model checkpoint from given path') parser.add_argument('--device', default='cuda:0') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--num_workers', default=6, type=int) parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path') parser.add_argument('--style_prompt_path', default='pretrained/style_cluster.npy', type=str, help='load vgg from given path') # data settings parser.add_argument("--type", type=str, default='style2image', help='choose train test2image or style2image.') parser.add_argument("--root_json_path", type=str, default='imagenet/test.json') parser.add_argument("--other_json_path", type=str, default='imagenet/test.json') parser.add_argument("--root_file_path", type=str, default='imagenet/') parser.add_argument("--other_file_path", type=str, default='imagenet-s/') parser.add_argument("--batch_size", type=int, default=16) # model settings parser.add_argument('--gram_prompts', type=int, default=4) parser.add_argument('--gram_prompt_dim', type=int, default=1024) parser.add_argument('--style_prompts', type=int, default=4) parser.add_argument('--style_prompt_dim', type=int, default=1024) args = parser.parse_args() return args def S2IRetrieval(args, model, ori_images, pair_images): ori_feat = model(ori_images, dtype='image') ske_feat = model(pair_images, dtype='image') prob = torch.softmax(ske_feat.view(args.batch_size, -1) @ ori_feat.view(args.batch_size, -1).permute(1, 0), dim=-1) return prob def T2IRetrieval(args, model, ori_images, text_caption): ori_feat = model(ori_images, dtype='image') ske_feat = model(text_caption, dtype='text') prob = torch.softmax(ske_feat @ ori_feat.T, dim=-1) return prob if __name__ == "__main__": args = parse_args() setup_seed(args.seed)
model = BLIP_Retrieval(args)
0
2023-10-17 09:32:57+00:00
4k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/tuners/lora.py
[ { "identifier": "is_bnb_available", "path": "src/MLoRA/peft/import_utils.py", "snippet": "def is_bnb_available():\n return importlib.util.find_spec(\"bitsandbytes\") is not None" }, { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "PeftType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n MMOELORAS = \"MMOELORAS\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING", "path": "src/MLoRA/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = {\n \"t5\": [\"q\", \"v\"],\n \"mt5\": [\"q\", \"v\"],\n \"bart\": [\"q_proj\", \"v_proj\"],\n \"gpt2\": [\"c_attn\"],\n \"bloom\": [\"query_key_value\"],\n \"blip-2\": [\"q\", \"v\", \"q_proj\", \"v_proj\"],\n \"opt\": [\"q_proj\", \"v_proj\"],\n \"gptj\": [\"q_proj\", \"v_proj\"],\n \"gpt_neox\": [\"query_key_value\"],\n \"gpt_neo\": [\"q_proj\", \"v_proj\"],\n \"bert\": [\"query\", \"value\"],\n \"roberta\": [\"query\", \"value\"],\n \"xlm-roberta\": [\"query\", \"value\"],\n \"electra\": [\"query\", \"value\"],\n \"deberta-v2\": [\"query_proj\", \"value_proj\"],\n \"deberta\": [\"in_proj\"],\n \"layoutlm\": [\"query\", \"value\"],\n \"llama\": [\"q_proj\", \"v_proj\"],\n \"chatglm\": [\"query_key_value\"],\n}" }, { "identifier": "transpose", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def transpose(weight, fan_in_fan_out):\n return weight.T if fan_in_fan_out else weight" }, { "identifier": "_get_submodules", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _get_submodules(model, key):\n parent = model.get_submodule(\".\".join(key.split(\".\")[:-1]))\n target_name = key.split(\".\")[-1]\n target = model.get_submodule(key)\n return parent, target, target_name" }, { "identifier": "_freeze_adapter", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _freeze_adapter(model, adapter_name):\n for n, p in model.named_parameters():\n if adapter_name in n:\n p.requires_grad = False" }, { "identifier": "ModulesToSaveWrapper", "path": "src/MLoRA/peft/utils/other.py", "snippet": "class ModulesToSaveWrapper(torch.nn.Module):\n def __init__(self, module_to_save, adapter_name):\n super().__init__()\n self.original_module = module_to_save\n self.modules_to_save = torch.nn.ModuleDict({})\n self.update(adapter_name)\n self.active_adapter = adapter_name\n\n def update(self, adapter_name):\n self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)}))\n\n def forward(self, *args, **kwargs):\n if self.active_adapter not in self.modules_to_save:\n return self.original_module(*args, **kwargs)\n return self.modules_to_save[self.active_adapter](*args, **kwargs)" } ]
import math import re import warnings import torch import torch.nn as nn import torch.nn.functional as F import bitsandbytes as bnb from dataclasses import asdict, dataclass, field from enum import Enum from typing import List, Optional, Union from transformers.pytorch_utils import Conv1D from ..import_utils import is_bnb_available from ..utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, PeftConfig, PeftType, _freeze_adapter, _get_submodules, transpose, )
2,423
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_bnb_available(): @dataclass class LoraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`LoraModel`]. Args: r (`int`): Lora attention dimension. target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to. lora_alpha (`float`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.: bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only' modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with Lora." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) lora_alpha: int = field(default=None, metadata={"help": "Lora alpha"}) lora_dropout: float = field(default=None, metadata={"help": "Lora dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: str = field(default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"}) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_lora_weights: bool = field( default=True, metadata={"help": "Whether to initialize the weights of the Lora layers."}, ) def __post_init__(self): self.peft_type = PeftType.LORA class LoraModel(torch.nn.Module): """ Creates Low Rank Adapter (Lora) model from a pretrained transformers model. Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. config ([`LoraConfig`]): The configuration of the Lora model. Returns: `torch.nn.Module`: The Lora model. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import LoraModel, LoraConfig >>> config = LoraConfig( ... peft_type="LORA", ... task_type="SEQ_2_SEQ_LM", ... r=8, ... lora_alpha=32, ... target_modules=["q", "v"], ... lora_dropout=0.01, ... ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> lora_model = LoraModel(config, model) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`LoraConfig`]): The configuration of the Lora model. """ def __init__(self, model, config, adapter_name): super().__init__() self.model = model self.forward = self.model.forward self.peft_config = config self.add_adapter(adapter_name, self.peft_config[adapter_name]) def add_adapter(self, adapter_name, config=None): if config is not None: model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config config = self._prepare_lora_config(config, model_config) self.peft_config[adapter_name] = config self._find_and_replace(adapter_name) if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none": raise ValueError( "LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters." ) mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) # freeze all layers except for lora layer if self.peft_config[adapter_name].inference_mode: # if inference, also freeze lora layer
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_bnb_available(): @dataclass class LoraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`LoraModel`]. Args: r (`int`): Lora attention dimension. target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to. lora_alpha (`float`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.: bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only' modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with Lora." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) lora_alpha: int = field(default=None, metadata={"help": "Lora alpha"}) lora_dropout: float = field(default=None, metadata={"help": "Lora dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: str = field(default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"}) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_lora_weights: bool = field( default=True, metadata={"help": "Whether to initialize the weights of the Lora layers."}, ) def __post_init__(self): self.peft_type = PeftType.LORA class LoraModel(torch.nn.Module): """ Creates Low Rank Adapter (Lora) model from a pretrained transformers model. Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. config ([`LoraConfig`]): The configuration of the Lora model. Returns: `torch.nn.Module`: The Lora model. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import LoraModel, LoraConfig >>> config = LoraConfig( ... peft_type="LORA", ... task_type="SEQ_2_SEQ_LM", ... r=8, ... lora_alpha=32, ... target_modules=["q", "v"], ... lora_dropout=0.01, ... ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> lora_model = LoraModel(config, model) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`LoraConfig`]): The configuration of the Lora model. """ def __init__(self, model, config, adapter_name): super().__init__() self.model = model self.forward = self.model.forward self.peft_config = config self.add_adapter(adapter_name, self.peft_config[adapter_name]) def add_adapter(self, adapter_name, config=None): if config is not None: model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config config = self._prepare_lora_config(config, model_config) self.peft_config[adapter_name] = config self._find_and_replace(adapter_name) if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none": raise ValueError( "LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters." ) mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) # freeze all layers except for lora layer if self.peft_config[adapter_name].inference_mode: # if inference, also freeze lora layer
_freeze_adapter(self.model, adapter_name)
6
2023-10-19 10:55:50+00:00
4k
voyage-ai/voyageai-python
voyageai/api_resources/voyage_object.py
[ { "identifier": "util", "path": "voyageai/util.py", "snippet": "VOYAGE_LOG = os.environ.get(\"VOYAGE_LOG\")\n VOYAGE = 1\nclass ApiType(Enum):\n def from_str(label):\ndef _console_log_level():\ndef log_debug(message, **params):\ndef log_info(message, **params):\ndef log_warn(message, **params):\ndef logfmt(props):\n def fmt(key, val):\ndef convert_to_voyage_object(resp):\ndef convert_to_dict(obj):\ndef merge_dicts(x, y):\ndef default_api_key() -> str:" }, { "identifier": "api_requestor", "path": "voyageai/api_resources/api_requestor.py", "snippet": "TIMEOUT_SECS = 600\nMAX_SESSION_LIFETIME_SECS = 180\nMAX_CONNECTION_RETRIES = 2\ndef _build_api_url(url, query):\ndef _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]:\ndef _aiohttp_proxies_arg(proxy) -> Optional[str]:\ndef _make_session() -> requests.Session:\ndef parse_stream_helper(line: bytes) -> Optional[str]:\ndef parse_stream(rbody: Iterator[bytes]) -> Iterator[str]:\nasync def parse_stream_async(rbody: aiohttp.StreamReader):\n def __init__(\n self,\n key=None,\n api_base=None,\n api_type=None,\n api_version=None,\n organization=None,\n ):\n def format_app_info(cls, info):\n def _check_polling_response(self, response: VoyageResponse, predicate: Callable[[VoyageResponse], bool]):\n def _poll(\n self,\n method,\n url,\n until,\n failed,\n params = None,\n headers = None,\n interval = None,\n delay = None\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n async def _apoll(\n self,\n method,\n url,\n until,\n failed,\n params = None,\n headers = None,\n interval = None,\n delay = None\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n def request(\n self,\n method,\n url,\n params,\n headers,\n files,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n def request(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n *,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Iterator[VoyageResponse], bool, str]:\n def request(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: Literal[False] = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[VoyageResponse, bool, str]:\n def request(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: bool = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Union[VoyageResponse, Iterator[VoyageResponse]], bool, str]:\n def request(\n self,\n method,\n url,\n params=None,\n headers=None,\n files=None,\n stream: bool = False,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> Tuple[Union[VoyageResponse, Iterator[VoyageResponse]], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params,\n headers,\n files,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[AsyncGenerator[VoyageResponse, None], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n *,\n stream: Literal[True],\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[AsyncGenerator[VoyageResponse, None], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: Literal[False] = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[VoyageResponse, bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=...,\n headers=...,\n files=...,\n stream: bool = ...,\n request_id: Optional[str] = ...,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = ...,\n ) -> Tuple[Union[VoyageResponse, AsyncGenerator[VoyageResponse, None]], bool, str]:\n async def arequest(\n self,\n method,\n url,\n params=None,\n headers=None,\n files=None,\n stream: bool = False,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> Tuple[Union[VoyageResponse, AsyncGenerator[VoyageResponse, None]], bool, str]:\n async def wrap_resp():\n def request_headers(\n self, method: str, extra, request_id: Optional[str]\n ) -> Dict[str, str]:\n def _validate_headers(\n self, supplied_headers: Optional[Dict[str, str]]\n ) -> Dict[str, str]:\n def _prepare_request_raw(\n self,\n url,\n supplied_headers,\n method,\n params,\n files,\n request_id: Optional[str],\n ) -> Tuple[str, Dict[str, str], Optional[bytes]]:\n def request_raw(\n self,\n method,\n url,\n *,\n params=None,\n supplied_headers: Optional[Dict[str, str]] = None,\n files=None,\n stream: bool = False,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> requests.Response:\n async def arequest_raw(\n self,\n method,\n url,\n session,\n *,\n params=None,\n supplied_headers: Optional[Dict[str, str]] = None,\n files=None,\n request_id: Optional[str] = None,\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None,\n ) -> aiohttp.ClientResponse:\n def _interpret_response(\n self, result: requests.Response, stream: bool\n ) -> Tuple[Union[VoyageResponse, Iterator[VoyageResponse]], bool]:\n async def _interpret_async_response(\n self, result: aiohttp.ClientResponse, stream: bool\n ) -> Tuple[Union[VoyageResponse, AsyncGenerator[VoyageResponse, None]], bool]:\n def _interpret_response_line(\n self, rbody: str, rcode: int, rheaders, stream: bool\n ) -> VoyageResponse:\n def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False):\n def __init__(self):\n async def __aenter__(self):\n async def __aexit__(self, exc_type, exc_value, traceback):\nclass APIRequestor:\nclass AioHTTPSession(AsyncContextManager):" }, { "identifier": "VoyageResponse", "path": "voyageai/api_resources/voyage_response.py", "snippet": "class VoyageResponse:\n def __init__(self, data, headers):\n self._headers = headers\n self.data = data\n\n @property\n def request_id(self) -> Optional[str]:\n return self._headers.get(\"request-id\")\n\n @property\n def retry_after(self) -> Optional[int]:\n try:\n return int(self._headers.get(\"retry-after\"))\n except TypeError:\n return None\n\n @property\n def operation_location(self) -> Optional[str]:\n return self._headers.get(\"operation-location\")\n\n @property\n def organization(self) -> Optional[str]:\n return self._headers.get(\"Voyage-Organization\")\n\n @property\n def response_ms(self) -> Optional[int]:\n h = self._headers.get(\"Voyage-Processing-Ms\")\n return None if h is None else round(float(h))" } ]
import json from copy import deepcopy from typing import Optional, Tuple, Union from voyageai import util from voyageai.api_resources import api_requestor from voyageai.api_resources.voyage_response import VoyageResponse
2,858
class VoyageObject(dict): def __init__( self, **params, ): super(VoyageObject, self).__init__() self._retrieve_params = params def __setattr__(self, k, v): if k[0] == "_" or k in self.__dict__: return super(VoyageObject, self).__setattr__(k, v) self[k] = v return None def __getattr__(self, k): if k[0] == "_": raise AttributeError(k) try: return self[k] except KeyError as err: raise AttributeError(*err.args) def __delattr__(self, k): if k[0] == "_" or k in self.__dict__: return super(VoyageObject, self).__delattr__(k) else: del self[k] def __setitem__(self, k, v): if v == "": raise ValueError( "You cannot set %s to an empty string. " "We interpret empty strings as None in requests." "You may set %s.%s = None to delete the property" % (k, str(self), k) ) super(VoyageObject, self).__setitem__(k, v) def __delitem__(self, k): raise NotImplementedError("del is not supported") # Custom unpickling method that uses `update` to update the dictionary # without calling __setitem__, which would fail if any value is an empty # string def __setstate__(self, state): self.update(state) # Custom pickling method to ensure the instance is pickled as a custom # class and not as a dict, otherwise __setstate__ would not be called when # unpickling. def __reduce__(self): reduce_value = ( type(self), # callable (), # args dict(self), # state ) return reduce_value @classmethod def construct_from( cls, values, ): instance = cls() instance.refresh_from(values) return instance def refresh_from( self, values, ): # Wipe old state before setting new. self.clear() for k, v in values.items(): super(VoyageObject, self).__setitem__( k, util.convert_to_voyage_object(v) ) self._previous = values def request( self, method, url, params=None, headers=None, stream=False, request_id: Optional[str] = None, request_timeout: Optional[Union[float, Tuple[float, float]]] = None, ): if params is None: params = self._retrieve_params requestor = api_requestor.APIRequestor( key=self.api_key, ) response, stream, api_key = requestor.request( method, url, params=params, stream=stream, headers=headers, request_id=request_id, request_timeout=request_timeout, ) if stream:
class VoyageObject(dict): def __init__( self, **params, ): super(VoyageObject, self).__init__() self._retrieve_params = params def __setattr__(self, k, v): if k[0] == "_" or k in self.__dict__: return super(VoyageObject, self).__setattr__(k, v) self[k] = v return None def __getattr__(self, k): if k[0] == "_": raise AttributeError(k) try: return self[k] except KeyError as err: raise AttributeError(*err.args) def __delattr__(self, k): if k[0] == "_" or k in self.__dict__: return super(VoyageObject, self).__delattr__(k) else: del self[k] def __setitem__(self, k, v): if v == "": raise ValueError( "You cannot set %s to an empty string. " "We interpret empty strings as None in requests." "You may set %s.%s = None to delete the property" % (k, str(self), k) ) super(VoyageObject, self).__setitem__(k, v) def __delitem__(self, k): raise NotImplementedError("del is not supported") # Custom unpickling method that uses `update` to update the dictionary # without calling __setitem__, which would fail if any value is an empty # string def __setstate__(self, state): self.update(state) # Custom pickling method to ensure the instance is pickled as a custom # class and not as a dict, otherwise __setstate__ would not be called when # unpickling. def __reduce__(self): reduce_value = ( type(self), # callable (), # args dict(self), # state ) return reduce_value @classmethod def construct_from( cls, values, ): instance = cls() instance.refresh_from(values) return instance def refresh_from( self, values, ): # Wipe old state before setting new. self.clear() for k, v in values.items(): super(VoyageObject, self).__setitem__( k, util.convert_to_voyage_object(v) ) self._previous = values def request( self, method, url, params=None, headers=None, stream=False, request_id: Optional[str] = None, request_timeout: Optional[Union[float, Tuple[float, float]]] = None, ): if params is None: params = self._retrieve_params requestor = api_requestor.APIRequestor( key=self.api_key, ) response, stream, api_key = requestor.request( method, url, params=params, stream=stream, headers=headers, request_id=request_id, request_timeout=request_timeout, ) if stream:
assert not isinstance(response, VoyageResponse) # must be an iterator
2
2023-10-17 22:11:18+00:00
4k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/data/test_download_data.py
[ { "identifier": "setup_utils_configuration", "path": "freqtrade/configuration/config_setup.py", "snippet": "def setup_utils_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:\n \"\"\"\n Prepare the configuration for utils subcommands\n :param args: Cli args from Arguments()\n :param method: Bot running mode\n :return: Configuration\n \"\"\"\n configuration = Configuration(args, method)\n config = configuration.get_config()\n\n # Ensure these modes are using Dry-run\n config['dry_run'] = True\n validate_config_consistency(config, preliminary=True)\n\n return config" }, { "identifier": "download_data_main", "path": "freqtrade/data/history/history_utils.py", "snippet": "def download_data_main(config: Config) -> None:\n\n timerange = TimeRange()\n if 'days' in config:\n time_since = (datetime.now() - timedelta(days=config['days'])).strftime(\"%Y%m%d\")\n timerange = TimeRange.parse_timerange(f'{time_since}-')\n\n if 'timerange' in config:\n timerange = timerange.parse_timerange(config['timerange'])\n\n # Remove stake-currency to skip checks which are not relevant for datadownload\n config['stake_currency'] = ''\n\n pairs_not_available: List[str] = []\n\n # Init exchange\n from freqtrade.resolvers.exchange_resolver import ExchangeResolver\n exchange = ExchangeResolver.load_exchange(config, validate=False)\n available_pairs = [\n p for p in exchange.get_markets(\n tradable_only=True, active_only=not config.get('include_inactive')\n ).keys()\n ]\n\n expanded_pairs = dynamic_expand_pairlist(config, available_pairs)\n if 'timeframes' not in config:\n config['timeframes'] = DL_DATA_TIMEFRAMES\n\n # Manual validations of relevant settings\n if not config['exchange'].get('skip_pair_validation', False):\n exchange.validate_pairs(expanded_pairs)\n logger.info(f\"About to download pairs: {expanded_pairs}, \"\n f\"intervals: {config['timeframes']} to {config['datadir']}\")\n\n for timeframe in config['timeframes']:\n exchange.validate_timeframes(timeframe)\n\n # Start downloading\n try:\n if config.get('download_trades'):\n if config.get('trading_mode') == 'futures':\n raise OperationalException(\"Trade download not supported for futures.\")\n pairs_not_available = refresh_backtest_trades_data(\n exchange, pairs=expanded_pairs, datadir=config['datadir'],\n timerange=timerange, new_pairs_days=config['new_pairs_days'],\n erase=bool(config.get('erase')), data_format=config['dataformat_trades'])\n\n # Convert downloaded trade data to different timeframes\n convert_trades_to_ohlcv(\n pairs=expanded_pairs, timeframes=config['timeframes'],\n datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),\n data_format_ohlcv=config['dataformat_ohlcv'],\n data_format_trades=config['dataformat_trades'],\n )\n else:\n if not exchange.get_option('ohlcv_has_history', True):\n raise OperationalException(\n f\"Historic klines not available for {exchange.name}. \"\n \"Please use `--dl-trades` instead for this exchange \"\n \"(will unfortunately take a long time).\"\n )\n migrate_binance_futures_data(config)\n pairs_not_available = refresh_backtest_ohlcv_data(\n exchange, pairs=expanded_pairs, timeframes=config['timeframes'],\n datadir=config['datadir'], timerange=timerange,\n new_pairs_days=config['new_pairs_days'],\n erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'],\n trading_mode=config.get('trading_mode', 'spot'),\n prepend=config.get('prepend_data', False)\n )\n finally:\n if pairs_not_available:\n logger.info(f\"Pairs [{','.join(pairs_not_available)}] not available \"\n f\"on exchange {exchange.name}.\")" }, { "identifier": "RunMode", "path": "freqtrade/enums/runmode.py", "snippet": "class RunMode(Enum):\n \"\"\"\n Bot running mode (backtest, hyperopt, ...)\n can be \"live\", \"dry-run\", \"backtest\", \"edge\", \"hyperopt\".\n \"\"\"\n LIVE = \"live\"\n DRY_RUN = \"dry_run\"\n BACKTEST = \"backtest\"\n EDGE = \"edge\"\n HYPEROPT = \"hyperopt\"\n UTIL_EXCHANGE = \"util_exchange\"\n UTIL_NO_EXCHANGE = \"util_no_exchange\"\n PLOT = \"plot\"\n WEBSERVER = \"webserver\"\n OTHER = \"other\"" }, { "identifier": "OperationalException", "path": "freqtrade/exceptions.py", "snippet": "class OperationalException(FreqtradeException):\n \"\"\"\n Requires manual intervention and will stop the bot.\n Most of the time, this is caused by an invalid Configuration.\n \"\"\"" }, { "identifier": "EXMS", "path": "tests/conftest.py", "snippet": "EXMS = 'freqtrade.exchange.exchange.Exchange'" }, { "identifier": "log_has", "path": "tests/conftest.py", "snippet": "def log_has(line, logs):\n \"\"\"Check if line is found on some caplog's message.\"\"\"\n return any(line == message for message in logs.messages)" }, { "identifier": "patch_exchange", "path": "tests/conftest.py", "snippet": "def patch_exchange(\n mocker,\n api_mock=None,\n id='binance',\n mock_markets=True,\n mock_supported_modes=True\n) -> None:\n mocker.patch(f'{EXMS}._load_async_markets', return_value={})\n mocker.patch(f'{EXMS}.validate_config', MagicMock())\n mocker.patch(f'{EXMS}.validate_timeframes', MagicMock())\n mocker.patch(f'{EXMS}.id', PropertyMock(return_value=id))\n mocker.patch(f'{EXMS}.name', PropertyMock(return_value=id.title()))\n mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=2))\n\n if mock_markets:\n if isinstance(mock_markets, bool):\n mock_markets = get_markets()\n mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=mock_markets))\n\n if mock_supported_modes:\n mocker.patch(\n f'freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs',\n PropertyMock(return_value=[\n (TradingMode.MARGIN, MarginMode.CROSS),\n (TradingMode.MARGIN, MarginMode.ISOLATED),\n (TradingMode.FUTURES, MarginMode.CROSS),\n (TradingMode.FUTURES, MarginMode.ISOLATED)\n ])\n )\n\n if api_mock:\n mocker.patch(f'{EXMS}._init_ccxt', return_value=api_mock)\n else:\n mocker.patch(f'{EXMS}._init_ccxt', MagicMock())\n mocker.patch(f'{EXMS}.timeframes', PropertyMock(\n return_value=['5m', '15m', '1h', '1d']))" } ]
from unittest.mock import MagicMock, PropertyMock from freqtrade.configuration.config_setup import setup_utils_configuration from freqtrade.data.history.history_utils import download_data_main from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from tests.conftest import EXMS, log_has, patch_exchange import pytest
1,793
def test_download_data_main_no_markets(mocker, caplog): dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) patch_exchange(mocker, id='binance') mocker.patch(f'{EXMS}.get_markets', return_value={}) config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) config.update({ "days": 20, "pairs": ["ETH/BTC", "XRP/BTC"], "timeframes": ["5m", "1h"] })
def test_download_data_main_no_markets(mocker, caplog): dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) patch_exchange(mocker, id='binance') mocker.patch(f'{EXMS}.get_markets', return_value={}) config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) config.update({ "days": 20, "pairs": ["ETH/BTC", "XRP/BTC"], "timeframes": ["5m", "1h"] })
download_data_main(config)
1
2023-10-21 10:02:05+00:00
4k
yanzhh/HGERE
transformers/src/transformers/modeling_albert.py
[ { "identifier": "add_start_docstrings", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "add_start_docstrings_to_callable", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings_to_callable(*docstr):\n def docstring_decorator(fn):\n class_name = \":class:`~transformers.{}`\".format(fn.__qualname__.split(\".\")[0])\n intro = \" The {} forward method, overrides the :func:`__call__` special method.\".format(class_name)\n note = r\"\"\"\n\n .. note::\n Although the recipe for forward pass needs to be defined within\n this function, one should call the :class:`Module` instance afterwards\n instead of this since the former takes care of running the\n pre and post processing steps while the latter silently ignores them.\n \"\"\"\n fn.__doc__ = intro + note + \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" } ]
import logging import math import os import torch import torch.nn as nn import pdb import re import numpy as np import tensorflow as tf from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss from torch.nn.utils.rnn import pad_sequence from transformers.configuration_albert import AlbertConfig from transformers.modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer from transformers.modeling_utils import PreTrainedModel from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modules import *
3,534
config_class = AlbertConfig pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "albert" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, (nn.Linear)) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ALBERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Args: config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.AlbertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings( "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class AlbertModel(AlbertPreTrainedModel): config_class = AlbertConfig pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_albert base_model_prefix = "albert" def __init__(self, config): super().__init__(config) self.config = config self.embeddings = AlbertEmbeddings(config) self.encoder = AlbertTransformer(config) self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.pooler_activation = nn.Tanh() self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.embeddings.word_embeddings new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.embeddings.word_embeddings = new_embeddings return self.embeddings.word_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers. These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer, while [2,3] correspond to the two inner groups of the second hidden layer. Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more information about head pruning """ for layer, heads in heads_to_prune.items(): group_idx = int(layer / self.config.inner_group_num) inner_group_idx = int(layer - group_idx * self.config.inner_group_num) self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ALBERT model. """ logger = logging.getLogger(__name__) ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-pytorch_model.bin", "albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-pytorch_model.bin", "albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-pytorch_model.bin", "albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-pytorch_model.bin", "albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-pytorch_model.bin", "albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-pytorch_model.bin", "albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-pytorch_model.bin", "albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-pytorch_model.bin", } def load_tf_weights_in_albert(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model.""" try: except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): print(name) for name, array in zip(names, arrays): original_name = name # If saved from the TF HUB module name = name.replace("module/", "") # Renaming and simplifying name = name.replace("ffn_1", "ffn") name = name.replace("bert/", "albert/") name = name.replace("attention_1", "attention") name = name.replace("transform/", "") name = name.replace("LayerNorm_1", "full_layer_layer_norm") name = name.replace("LayerNorm", "attention/LayerNorm") name = name.replace("transformer/", "") # The feed forward layer had an 'intermediate' step which has been abstracted away name = name.replace("intermediate/dense/", "") name = name.replace("ffn/intermediate/output/dense/", "ffn_output/") # ALBERT attention was split between self and output which have been abstracted away name = name.replace("/output/", "/") name = name.replace("/self/", "/") # The pooler is a linear layer name = name.replace("pooler/dense", "pooler") # The classifier was simplified to predictions from cls/predictions name = name.replace("cls/predictions", "predictions") name = name.replace("predictions/attention", "predictions") # Naming was changed to be more explicit name = name.replace("embeddings/attention", "embeddings") name = name.replace("inner_group_", "albert_layers/") name = name.replace("group_", "albert_layer_groups/") # Classifier if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name): name = "classifier/" + name # No ALBERT model currently handles the next sentence prediction task if "seq_relationship" in name: continue name = name.split("/") # Ignore the gradients applied by the LAMB/ADAM optimizers. if ( "adam_m" in name or "adam_v" in name or "AdamWeightDecayOptimizer" in name or "AdamWeightDecayOptimizer_1" in name or "global_step" in name ): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {} from {}".format(name, original_name)) pointer.data = torch.from_numpy(array) return model class AlbertEmbeddings(BertEmbeddings): """ Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) class AlbertAttention(BertSelfAttention): def __init__(self, config): super().__init__(config) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = config.hidden_size // config.num_attention_heads self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.num_attention_heads, self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) # Update hyper params and store pruned heads self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input_ids, attention_mask=None, head_mask=None): mixed_query_layer = self.query(input_ids) mixed_key_layer = self.key(input_ids) mixed_value_layer = self.value(input_ids) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() # Should find a better way to do this w = ( self.dense.weight.t() .view(self.num_attention_heads, self.attention_head_size, self.hidden_size) .to(context_layer.dtype) ) b = self.dense.bias.to(context_layer.dtype) projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b projected_context_layer_dropout = self.dropout(projected_context_layer) layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout) return (layernormed_context_layer, attention_probs) if self.output_attentions else (layernormed_context_layer,) class AlbertLayer(nn.Module): def __init__(self, config): super().__init__() self.config = config self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = AlbertAttention(config) self.ffn = nn.Linear(config.hidden_size, config.intermediate_size) self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size) self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_states, attention_mask=None, head_mask=None): attention_output = self.attention(hidden_states, attention_mask, head_mask) ffn_output = self.ffn(attention_output[0]) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0]) return (hidden_states,) + attention_output[1:] # add attentions if we output them class AlbertLayerGroup(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)]) def forward(self, hidden_states, attention_mask=None, head_mask=None): layer_hidden_states = () layer_attentions = () for layer_index, albert_layer in enumerate(self.albert_layers): layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index]) hidden_states = layer_output[0] if self.output_attentions: layer_attentions = layer_attentions + (layer_output[1],) if self.output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (layer_hidden_states,) if self.output_attentions: outputs = outputs + (layer_attentions,) return outputs # last-layer hidden state, (layer hidden states), (layer attentions) class AlbertTransformer(nn.Module): def __init__(self, config): super().__init__() self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size) self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)]) def forward(self, hidden_states, attention_mask=None, head_mask=None): hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_attentions = () if self.output_hidden_states: all_hidden_states = (hidden_states,) for i in range(self.config.num_hidden_layers): # Number of layers in a hidden group layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) # Index of the hidden group group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx]( hidden_states, attention_mask, head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group], ) hidden_states = layer_group_output[0] if self.output_attentions: all_attentions = all_attentions + layer_group_output[-1] if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class AlbertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AlbertConfig pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "albert" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, (nn.Linear)) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ALBERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Args: config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ALBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.AlbertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings( "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) class AlbertModel(AlbertPreTrainedModel): config_class = AlbertConfig pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_albert base_model_prefix = "albert" def __init__(self, config): super().__init__(config) self.config = config self.embeddings = AlbertEmbeddings(config) self.encoder = AlbertTransformer(config) self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.pooler_activation = nn.Tanh() self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.embeddings.word_embeddings new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.embeddings.word_embeddings = new_embeddings return self.embeddings.word_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers. These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer, while [2,3] correspond to the two inner groups of the second hidden layer. Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more information about head pruning """ for layer, heads in heads_to_prune.items(): group_idx = int(layer / self.config.inner_group_num) inner_group_idx = int(layer - group_idx * self.config.inner_group_num) self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
1
2023-10-15 02:31:09+00:00
4k
explosion/prodigy-hf
tests/test_train_basics.py
[ { "identifier": "hf_train_ner", "path": "prodigy_hf/ner.py", "snippet": "@recipe(\n \"hf.train.ner\",\n # fmt: off\n datasets=Arg(help=\"Datasets with NER annotations to train model for\"),\n out_dir=Arg(help=\"Folder to save trained model into\"),\n epochs=Arg(\"--epochs\", \"-e\", help=\"Number of epochs to finetune\"),\n model_name=Arg(\"--model-name\", \"-m\", help=\"HFmodel to use as a base model\"),\n batch_size=Arg(\"--batch-size\", \"-bs\", help=\"Batch size.\"),\n eval_split=Arg(\"--eval-split\", \"-es\", help=\"If no evaluation sets are provided for a component, split off a a percentage of the training examples for evaluation.\"),\n learning_rate=Arg(\"--learning-rate\", \"-lr\", help=\"Learning rate.\"),\n verbose=Arg(\"--verbose\", \"-v\", help=\"Output all the logs/warnings from Huggingface libraries.\"),\n # fmt: on\n)\ndef hf_train_ner(datasets: str,\n out_dir: Path,\n epochs: int = 10,\n model_name: str = \"distilbert-base-uncased\",\n batch_size: int = 8,\n eval_split: Optional[float] = None,\n learning_rate: float = 2e-5,\n verbose:bool = False):\n log(\"RECIPE: train.hf.ner started.\")\n if not verbose:\n set_transformers_verbosity_error()\n disable_progress_bar()\n\n train_examples, valid_examples = produce_train_eval_datasets(datasets, eval_split)\n gen_train, gen_valid, label_list, id2lab, lab2id = into_hf_format(train_examples, valid_examples)\n\n prodigy_dataset = DatasetDict(\n train=Dataset.from_list(gen_train),\n eval=Dataset.from_list(gen_valid)\n )\n\n log(\"RECIPE: Applying tokenizer and aligning labels.\")\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n tokenized_dataset = prodigy_dataset.map(tokenize_and_align_labels, batched=True, fn_kwargs={\"tokenizer\": tokenizer})\n\n data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)\n model = AutoModelForTokenClassification.from_pretrained(\n model_name, num_labels=len(id2lab), id2label=id2lab, label2id=lab2id\n )\n\n training_args = TrainingArguments(\n output_dir=out_dir,\n learning_rate=learning_rate,\n per_device_train_batch_size=batch_size,\n per_device_eval_batch_size=batch_size,\n num_train_epochs=epochs,\n weight_decay=0.01,\n evaluation_strategy=\"epoch\",\n save_strategy=\"epoch\",\n load_best_model_at_end=True,\n push_to_hub=False,\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"eval\"],\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=build_metrics_func(label_list),\n )\n log(\"RECIPE: Starting training.\")\n tic = time.time()\n trainer.train()\n toc = time.time()\n log(f\"RECIPE: Total training time: {round(toc - tic)}s.\")" }, { "identifier": "hf_ner_correct", "path": "prodigy_hf/ner.py", "snippet": "@recipe(\n \"hf.ner.correct\",\n # fmt: off\n dataset=Arg(help=\"Dataset to write annotations into\"),\n model=Arg(help=\"Path to transformer model. Can also point to model on hub.\"),\n source=Arg(help=\"Source file to annotate\"),\n lang=Arg(\"--lang\", \"-l\", help=\"Language of the spaCy tokeniser\"),\n # fmt: on\n)\ndef hf_ner_correct(dataset: str,\n model: str,\n source: str,\n lang: str = \"en\"):\n log(\"RECIPE: train.hf.ner started.\")\n set_transformers_verbosity_error()\n stream = get_stream(source, rehash=True, dedup=True)\n nlp = spacy.blank(lang)\n tfm_model: TokenClassificationPipeline = pipeline(\"ner\", model=model)\n labels = get_hf_config_labels(tfm_model)\n log(f\"RECIPE: Transformer model loaded with {labels=}.\")\n\n @support_both_streams(stream_arg=\"stream\")\n def attach_predictions(stream):\n for ex in stream:\n doc = to_spacy_doc(ex['text'], tfm_model, nlp)\n doc_dict = doc.to_json()\n ex['spans'] = doc_dict['ents']\n yield ex\n\n stream.apply(attach_predictions)\n stream.apply(add_tokens, nlp=nlp, stream=stream)\n\n return {\n \"dataset\": dataset,\n \"view_id\": \"ner_manual\",\n \"stream\": stream,\n \"config\": {\n \"labels\": labels\n }\n }" }, { "identifier": "hf_train_textcat", "path": "prodigy_hf/textcat.py", "snippet": "@recipe(\n \"hf.train.textcat\",\n # fmt: off\n datasets=Arg(help=\"Datasets with NER annotations to train model for\"),\n out_dir=Arg(help=\"Folder to save trained model into\"),\n epochs=Arg(\"--epochs\", \"-e\", help=\"Number of epochs to finetune\"),\n model_name=Arg(\"--model-name\", \"-m\", help=\"HFmodel to use as a base model\"),\n batch_size=Arg(\"--batch-size\", \"-bs\", help=\"Batch size.\"),\n eval_split=Arg(\"--eval-split\", \"-es\", help=\"If no evaluation sets are provided for a component, split off a a percentage of the training examples for evaluation.\"),\n learning_rate=Arg(\"--learning-rate\", \"-lr\", help=\"Learning rate.\"),\n verbose=Arg(\"--verbose\", \"-v\", help=\"Output all the logs/warnings from Huggingface libraries.\"),\n # fmt: on\n)\ndef hf_train_textcat(datasets: str,\n out_dir: Path,\n epochs: int = 10,\n model_name: str = \"distilbert-base-uncased\",\n batch_size: int = 8,\n eval_split: Optional[float] = None,\n learning_rate: float = 2e-5,\n verbose:bool = False):\n \"\"\"Train a transformer model for text classification.\"\"\"\n log(\"RECIPE: train.hf.ner started.\")\n if not verbose:\n set_transformers_verbosity_error()\n disable_progress_bar()\n\n train_examples, valid_examples, variant = produce_train_eval_datasets(datasets, eval_split)\n gen_train, gen_valid, label_list, id2lab, lab2id = into_hf_format(train_examples, valid_examples, variant)\n\n prodigy_dataset = DatasetDict(\n train=Dataset.from_list(gen_train),\n eval=Dataset.from_list(gen_valid)\n )\n\n log(\"RECIPE: Applying tokenizer.\")\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n \n def preprocess_function(examples):\n return tokenizer(examples[\"text\"], truncation=True, padding=True)\n\n tokenized_dataset = prodigy_dataset.map(preprocess_function, batched=True)\n\n data_collator = DataCollatorWithPadding(tokenizer=tokenizer)\n model = AutoModelForSequenceClassification.from_pretrained(\n model_name, num_labels=len(id2lab), id2label=id2lab, label2id=lab2id\n )\n\n training_args = TrainingArguments(\n output_dir=out_dir,\n learning_rate=learning_rate,\n per_device_train_batch_size=batch_size,\n per_device_eval_batch_size=batch_size,\n num_train_epochs=epochs,\n weight_decay=0.01,\n evaluation_strategy=\"epoch\",\n save_strategy=\"epoch\",\n load_best_model_at_end=True,\n push_to_hub=False,\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"eval\"],\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=build_metrics_func(label_list),\n )\n log(\"RECIPE: Starting training.\")\n tic = time.time()\n trainer.train()\n toc = time.time()\n log(f\"RECIPE: Total training time: {round(toc - tic)}s.\")" }, { "identifier": "hf_textcat_correct", "path": "prodigy_hf/textcat.py", "snippet": "@recipe(\n \"hf.textcat.correct\",\n # fmt: off\n dataset=Arg(help=\"Dataset to write annotations into\"),\n model=Arg(help=\"Path to transformer model. Can also point to model on hub.\"),\n source=Arg(help=\"Source file to annotate\"),\n # fmt: on\n)\ndef hf_textcat_correct(dataset: str,\n model: str,\n source: str):\n \"\"\"Use transformer model to help you annotate textcat data.\"\"\"\n \n log(\"RECIPE: train.hf.ner started.\")\n set_transformers_verbosity_error()\n \n stream = get_stream(source, rehash=True, dedup=True)\n tfm_model = pipeline(\"text-classification\", model=model)\n model_labels = list(tfm_model.model.config.label2id.keys())\n log(f\"RECIPE: Transformer model loaded with {model_labels=}.\")\n\n # Catch models trained on binary data. We don't support these because the only\n # possible labels are \"ACCEPT\" and \"REJECT\" and we don't have access to the original label.\n if set(model_labels) == set([\"accept\", \"reject\"]):\n msg.fail(\"This recipe only supports Hugging Face models that are trained on non-binary data.\", exits=True)\n stream.apply(add_model_predictions, hf_pipeline=tfm_model, model_labels=model_labels)\n\n return {\n \"dataset\": dataset,\n \"view_id\": \"choice\",\n \"stream\": stream,\n \"config\": {\n \"choice_style\": \"single\",\n \"choice_auto_accept\": True\n }\n }" } ]
import pytest from prodigy_hf import hf_train_ner, hf_train_textcat, hf_ner_correct, hf_textcat_correct
2,386
""" These tests assume some datasets are available in the Prodigy database. Check the `.github/workflows/tests.yml` file for more details. """ def test_smoke_ner(tmpdir): # Make sure we can train without errors
""" These tests assume some datasets are available in the Prodigy database. Check the `.github/workflows/tests.yml` file for more details. """ def test_smoke_ner(tmpdir): # Make sure we can train without errors
hf_train_ner("fashion,eval:fashion", tmpdir, epochs=1, model_name="hf-internal-testing/tiny-random-DistilBertModel")
0
2023-10-19 15:34:07+00:00
4k
johnyang101/pmpnndiff
models/diffusion_lms.py
[ { "identifier": "Generic_LM", "path": "models/pmpnn_lms.py", "snippet": "class Generic_LM(pl.LightningModule):\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n self.learning_rate = self.cfg.learning_rate\n \n def training_step(self, batch, batch_idx):\n raise NotImplementedError\n \n def validation_step(self, batch, batch_idx):\n raise NotImplementedError\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)\n return optimizer #{'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': 'val/loss'}\n\n def load_ft_dict(self, model):\n weights_dict = torch.load(self.cfg.model.ft_path, map_location=self.device)\n if 'state_dict' in weights_dict:\n sd_pyl = weights_dict['state_dict']\n sd = {k.replace('model.', ''): v for k, v in sd_pyl.items() if 'model.' in k}\n elif 'model_state_dict' in weights_dict:\n sd = weights_dict['model_state_dict']\n else:\n raise ValueError('Could not find state_dict or model_state_dict in weights_dict.')\n \n try:\n model.load_state_dict(sd)\n print('Loaded full model weights')\n except Exception as e:\n print(e)\n model = load_sd_for_matched_keys(model, sd, self.cfg.model.ignore_keys)\n return model" }, { "identifier": "PMPNNBatch", "path": "data/data_objs.py", "snippet": "class PMPNNBatch(Obj): \n\n def __init__(self, batch, device):\n self.batch = batch\n X, S, mask, lengths, chain_M, residue_idx, mask_self, chain_encoding_all = models.pmpnn_utils.featurize(batch, device)\n super().__init__(S)\n self.register_buffer('X', X)\n self.register_buffer('S', S)\n self.register_buffer('mask', mask)\n self.lengths = lengths\n self.register_buffer('chain_M', chain_M)\n self.register_buffer('residue_idx', residue_idx)\n self.register_buffer('mask_self', mask_self)\n self.register_buffer('chain_encoding_all', chain_encoding_all)\n self.register_buffer('mask_padded_false', mask) # [1, N])\n self.names = [b['name'] for b in batch]\n self.alphabet = 'ACDEFGHIKLMNPQRSTVWYX'\n \n @staticmethod\n def indices_to_string(seq_ind, alphabet, mask, length, pad_token='X'):\n # Convert a sequence of indices to a string using the given alphabet and mask\n seq_chars = [alphabet[idx] if mask[i] else pad_token for i, idx in enumerate(seq_ind[:length])]\n return ''.join(seq_chars)\n\n def sampled_seq_string(self):\n # Ensure the class attributes are correctly set up\n B, N = self.S.shape\n assert self.S.shape == self.mask.shape, 'seq_ind and mask must have the same shape'\n assert len(self.names) == B, 'names must have the same length as batch_size'\n\n lengths = self.mask.sum(dim=1).cpu().numpy() # Calculate lengths from mask\n sampled_seq_string = [self.indices_to_string(self.S[i], self.alphabet, self.mask[i], int(lengths[i])) for i in range(B)]\n\n return sampled_seq_string" }, { "identifier": "PMPNN_Baseline_Diff", "path": "models/pmpnn.py", "snippet": "class PMPNN_Baseline_Diff(PMPNN_Baseline_CPD):\n \n def __init__(self, num_letters=21, node_features=128, edge_features=128,\n hidden_dim=128, num_encoder_layers=3, num_decoder_layers=3,\n vocab=21, k_neighbors=32, augment_eps=0.1, dropout=0.1,\n embedding_cfg: Union[Dict, DictConfig] = {}, absorbing=False,\n **kwargs):\n \n assert embedding_cfg, 'Must pass in embedding conf'\n \n super().__init__(\n num_letters, \n node_features, \n edge_features,\n hidden_dim,\n num_encoder_layers,\n num_decoder_layers,\n vocab,\n k_neighbors,\n augment_eps,\n dropout\n )\n self.W_s = models.embed.TimestepEmbedding(**embedding_cfg) \n self.num_classes = num_letters \n self.max_len = embedding_cfg['max_len']\n \n if absorbing:\n self.W_out = nn.Linear(hidden_dim, self.num_classes - 1, bias=True)\n \n def decode(self, mask, chain_M, h_V, h_E, E_idx, h_S):\n \"\"\"\n Decodes without causal attention mask.\n \"\"\"\n \n h_ES = pu.cat_neighbors_nodes(h_S, h_E, E_idx)\n chain_M = chain_M*mask #update chain_M to include missing regions\n for layer in self.decoder_layers:\n h_ESV = pu.cat_neighbors_nodes(h_V, h_ES, E_idx)\n # h_ESV = mask_bw * h_ESV + h_EXV_encoder_fw\n h_V = torch.utils.checkpoint.checkpoint(layer, h_V, h_ESV, mask)\n\n logits = self.W_out(h_V)\n return logits\n \n def forward(self, obj, t):\n \n X, S, mask, chain_M, residue_idx, chain_encoding_all = obj.X, obj.S, obj.mask, obj.chain_M, obj.residue_idx, obj.chain_encoding_all\n \n h_V, h_E, E_idx = self.encode(X, S, mask, chain_M, residue_idx, chain_encoding_all)\n \n x_t = obj.x_t\n one_hot_x_t: TensorType[obj.B, obj.N, self.num_classes] = F.one_hot(x_t, self.num_classes)\n \n #Embed timestep in sequence.\n h_S = self.W_s(obj.B, t, obj.N, one_hot_x_t)\n\n logits = self.decode(mask, chain_M, h_V, h_E, E_idx, h_S)\n return logits" } ]
import math import torch import torch.nn.functional as F import torch.distributions as dists import models.diffusion_utils as du from torchtyping import TensorType from models.pmpnn_lms import Generic_LM from data.data_objs import PMPNNBatch from models.pmpnn import PMPNN_Baseline_Diff
1,638
class Generic_Diff_LM(Generic_LM): def __init__(self, cfg, debug=False): super().__init__(cfg) self.debug = debug self.num_classes = cfg.num_classes self.non_abs_classes = self.num_classes - 1 if self.cfg.model.absorbing else self.num_classes self._denoise_fn = self._init_denoise_fn(cfg.model) if self.cfg.model.ft: self._denoise_fn = self.load_ft_dict(self._denoise_fn) def _init_denoise_fn(self, model_conf): #TODO: Write this.
class Generic_Diff_LM(Generic_LM): def __init__(self, cfg, debug=False): super().__init__(cfg) self.debug = debug self.num_classes = cfg.num_classes self.non_abs_classes = self.num_classes - 1 if self.cfg.model.absorbing else self.num_classes self._denoise_fn = self._init_denoise_fn(cfg.model) if self.cfg.model.ft: self._denoise_fn = self.load_ft_dict(self._denoise_fn) def _init_denoise_fn(self, model_conf): #TODO: Write this.
return PMPNN_Baseline_Diff(**model_conf)
2
2023-10-16 08:47:43+00:00
4k